repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
joelmpiper/bill_taxonomy | src/ingest/get_bills.py | 1 | 1312 | import psycopg2
import pandas as pd
def get_us_bills(dbname, username, us_subset):
""" Retrieve the subset of bills from the U.S. Congress for 114th term
"""
con = psycopg2.connect(database=dbname, user=username)
# query:
sql_str = """
SELECT bill_num, bill_name, bill_text FROM us_bills
LIMIT {0}
"""
sql_query = sql_str.format(us_subset)
us_bills = pd.read_sql_query(sql_query, con)
return us_bills
def get_ny_bills(dbname, username, ny_subset):
""" Retrieve the subset of bills from the NY legislature for 2015
"""
con = psycopg2.connect(database=dbname, user=username)
# query:
sql_str = """
SELECT bill_num, bill_name, bill_text FROM ny_bills
LIMIT {0}
"""
sql_query = sql_str.format(ny_subset)
ny_bills = pd.read_sql_query(sql_query, con)
return ny_bills
def get_subjects(dbname, username, subjects):
""" Retrieve the subset of subjects associated with bills from
U.S. Congress for 114th term
"""
con = psycopg2.connect(database=dbname, user=username)
# query:
sql_str = """
SELECT bill_num, subject FROM bill_subject
WHERE subject IN ('{0}')
"""
sql_query = sql_str.format("','".join(subjects))
subjects = pd.read_sql_query(sql_query, con)
return subjects
| mit |
trachelr/mne-python | mne/report.py | 7 | 61251 | """Generate html report from MNE database
"""
# Authors: Alex Gramfort <[email protected]>
# Mainak Jas <[email protected]>
# Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import fnmatch
import re
import codecs
import time
from glob import glob
import warnings
import base64
from datetime import datetime as dt
import numpy as np
from . import read_evokeds, read_events, pick_types, read_cov
from .io import Raw, read_info
from .utils import _TempDir, logger, verbose, get_subjects_dir
from .viz import plot_events, plot_trans, plot_cov
from .viz._3d import _plot_mri_contours
from .forward import read_forward_solution
from .epochs import read_epochs
from .minimum_norm import read_inverse_operator
from .parallel import parallel_func, check_n_jobs
from .externals.tempita import HTMLTemplate, Template
from .externals.six import BytesIO
from .externals.six import moves
VALID_EXTENSIONS = ['raw.fif', 'raw.fif.gz', 'sss.fif', 'sss.fif.gz',
'-eve.fif', '-eve.fif.gz', '-cov.fif', '-cov.fif.gz',
'-trans.fif', '-trans.fif.gz', '-fwd.fif', '-fwd.fif.gz',
'-epo.fif', '-epo.fif.gz', '-inv.fif', '-inv.fif.gz',
'-ave.fif', '-ave.fif.gz', 'T1.mgz']
SECTION_ORDER = ['raw', 'events', 'epochs', 'evoked', 'covariance', 'trans',
'mri', 'forward', 'inverse']
###############################################################################
# PLOTTING FUNCTIONS
def _fig_to_img(function=None, fig=None, image_format='png',
scale=None, **kwargs):
"""Wrapper function to plot figure and create a binary image"""
import matplotlib.pyplot as plt
if function is not None:
plt.close('all')
fig = function(**kwargs)
output = BytesIO()
if scale is not None:
_scale_mpl_figure(fig, scale)
fig.savefig(output, format=image_format, bbox_inches='tight',
dpi=fig.get_dpi())
plt.close(fig)
output = output.getvalue()
return (output if image_format == 'svg' else
base64.b64encode(output).decode('ascii'))
def _scale_mpl_figure(fig, scale):
"""Magic scaling helper
Keeps font-size and artist sizes constant
0.5 : current font - 4pt
2.0 : current font + 4pt
XXX it's unclear why this works, but good to go for most cases
"""
fig.set_size_inches(fig.get_size_inches() * scale)
fig.set_dpi(fig.get_dpi() * scale)
import matplotlib as mpl
if scale >= 1:
sfactor = scale ** 2
elif scale < 1:
sfactor = -((1. / scale) ** 2)
for text in fig.findobj(mpl.text.Text):
fs = text.get_fontsize()
new_size = fs + sfactor
if new_size <= 0:
raise ValueError('could not rescale matplotlib fonts, consider '
'increasing "scale"')
text.set_fontsize(new_size)
fig.canvas.draw()
def _figs_to_mrislices(sl, n_jobs, **kwargs):
import matplotlib.pyplot as plt
plt.close('all')
use_jobs = min(n_jobs, max(1, len(sl)))
parallel, p_fun, _ = parallel_func(_plot_mri_contours, use_jobs)
outs = parallel(p_fun(slices=s, **kwargs)
for s in np.array_split(sl, use_jobs))
for o in outs[1:]:
outs[0] += o
return outs[0]
def _iterate_trans_views(function, **kwargs):
"""Auxiliary function to iterate over views in trans fig.
"""
from scipy.misc import imread
import matplotlib.pyplot as plt
import mayavi
fig = function(**kwargs)
assert isinstance(fig, mayavi.core.scene.Scene)
views = [(90, 90), (0, 90), (0, -90)]
fig2, axes = plt.subplots(1, len(views))
for view, ax in zip(views, axes):
mayavi.mlab.view(view[0], view[1])
# XXX: save_bmp / save_png / ...
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test.png')
if fig.scene is not None:
fig.scene.save_png(temp_fname)
im = imread(temp_fname)
else: # Testing mode
im = np.zeros((2, 2, 3))
ax.imshow(im)
ax.axis('off')
mayavi.mlab.close(fig)
img = _fig_to_img(fig=fig2)
return img
###############################################################################
# TOC FUNCTIONS
def _is_bad_fname(fname):
"""Auxiliary function for identifying bad file naming patterns
and highlighting them in red in the TOC.
"""
if fname.endswith('(whitened)'):
fname = fname[:-11]
if not fname.endswith(tuple(VALID_EXTENSIONS + ['bem', 'custom'])):
return 'red'
else:
return ''
def _get_toc_property(fname):
"""Auxiliary function to assign class names to TOC
list elements to allow toggling with buttons.
"""
if fname.endswith(('-eve.fif', '-eve.fif.gz')):
div_klass = 'events'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
div_klass = 'evoked'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-cov.fif', '-cov.fif.gz')):
div_klass = 'covariance'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('raw.fif', 'raw.fif.gz',
'sss.fif', 'sss.fif.gz')):
div_klass = 'raw'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-trans.fif', '-trans.fif.gz')):
div_klass = 'trans'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
div_klass = 'forward'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
div_klass = 'inverse'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
div_klass = 'epochs'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('.nii', '.nii.gz', '.mgh', '.mgz')):
div_klass = 'mri'
tooltip = 'MRI'
text = 'MRI'
elif fname.endswith(('bem')):
div_klass = 'mri'
tooltip = 'MRI'
text = 'MRI'
elif fname.endswith('(whitened)'):
div_klass = 'evoked'
tooltip = fname
text = op.basename(fname[:-11]) + '(whitened)'
else:
div_klass = fname.split('-#-')[1]
tooltip = fname.split('-#-')[0]
text = fname.split('-#-')[0]
return div_klass, tooltip, text
def _iterate_files(report, fnames, info, cov, baseline, sfreq, on_error):
"""Auxiliary function to parallel process in batch mode.
"""
htmls, report_fnames, report_sectionlabels = [], [], []
def _update_html(html, report_fname, report_sectionlabel):
"""Update the lists above."""
htmls.append(html)
report_fnames.append(report_fname)
report_sectionlabels.append(report_sectionlabel)
for fname in fnames:
logger.info("Rendering : %s"
% op.join('...' + report.data_path[-20:],
fname))
try:
if fname.endswith(('raw.fif', 'raw.fif.gz',
'sss.fif', 'sss.fif.gz')):
html = report._render_raw(fname)
report_fname = fname
report_sectionlabel = 'raw'
elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
html = report._render_forward(fname)
report_fname = fname
report_sectionlabel = 'forward'
elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
html = report._render_inverse(fname)
report_fname = fname
report_sectionlabel = 'inverse'
elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
if cov is not None:
html = report._render_whitened_evoked(fname, cov, baseline)
report_fname = fname + ' (whitened)'
report_sectionlabel = 'evoked'
_update_html(html, report_fname, report_sectionlabel)
html = report._render_evoked(fname, baseline)
report_fname = fname
report_sectionlabel = 'evoked'
elif fname.endswith(('-eve.fif', '-eve.fif.gz')):
html = report._render_eve(fname, sfreq)
report_fname = fname
report_sectionlabel = 'events'
elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
html = report._render_epochs(fname)
report_fname = fname
report_sectionlabel = 'epochs'
elif (fname.endswith(('-cov.fif', '-cov.fif.gz')) and
report.info_fname is not None):
html = report._render_cov(fname, info)
report_fname = fname
report_sectionlabel = 'covariance'
elif (fname.endswith(('-trans.fif', '-trans.fif.gz')) and
report.info_fname is not None and report.subjects_dir
is not None and report.subject is not None):
html = report._render_trans(fname, report.data_path, info,
report.subject,
report.subjects_dir)
report_fname = fname
report_sectionlabel = 'trans'
else:
html = None
report_fname = None
report_sectionlabel = None
except Exception as e:
if on_error == 'warn':
logger.warning('Failed to process file %s:\n"%s"' % (fname, e))
elif on_error == 'raise':
raise
html = None
report_fname = None
report_sectionlabel = None
_update_html(html, report_fname, report_sectionlabel)
return htmls, report_fnames, report_sectionlabels
###############################################################################
# IMAGE FUNCTIONS
def _build_image(data, cmap='gray'):
"""Build an image encoded in base64.
"""
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
figsize = data.shape[::-1]
if figsize[0] == 1:
figsize = tuple(figsize[1:])
data = data[:, :, 0]
fig = Figure(figsize=figsize, dpi=1.0, frameon=False)
FigureCanvas(fig)
cmap = getattr(plt.cm, cmap, plt.cm.gray)
fig.figimage(data, cmap=cmap)
output = BytesIO()
fig.savefig(output, dpi=1.0, format='png')
return base64.b64encode(output.getvalue()).decode('ascii')
def _iterate_sagittal_slices(array, limits=None):
"""Iterate sagittal slice.
"""
shape = array.shape[0]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, array[ind, :, :]
def _iterate_axial_slices(array, limits=None):
"""Iterate axial slice.
"""
shape = array.shape[1]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, array[:, ind, :]
def _iterate_coronal_slices(array, limits=None):
"""Iterate coronal slice.
"""
shape = array.shape[2]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, np.flipud(np.rot90(array[:, :, ind]))
def _iterate_mri_slices(name, ind, global_id, slides_klass, data, cmap,
image_format='png'):
"""Auxiliary function for parallel processing of mri slices.
"""
img_klass = 'slideimg-%s' % name
caption = u'Slice %s %s' % (name, ind)
slice_id = '%s-%s-%s' % (name, global_id, ind)
div_klass = 'span12 %s' % slides_klass
img = _build_image(data, cmap=cmap)
first = True if ind == 0 else False
html = _build_html_image(img, slice_id, div_klass,
img_klass, caption, first)
return ind, html
###############################################################################
# HTML functions
def _build_html_image(img, id, div_klass, img_klass, caption=None, show=True):
"""Build a html image from a slice array.
"""
html = []
add_style = u'' if show else u'style="display: none"'
html.append(u'<li class="%s" id="%s" %s>' % (div_klass, id, add_style))
html.append(u'<div class="thumbnail">')
html.append(u'<img class="%s" alt="" style="width:90%%;" '
'src="data:image/png;base64,%s">'
% (img_klass, img))
html.append(u'</div>')
if caption:
html.append(u'<h4>%s</h4>' % caption)
html.append(u'</li>')
return u'\n'.join(html)
slider_template = HTMLTemplate(u"""
<script>$("#{{slider_id}}").slider({
range: "min",
/*orientation: "vertical",*/
min: {{minvalue}},
max: {{maxvalue}},
step: {{step}},
value: {{startvalue}},
create: function(event, ui) {
$(".{{klass}}").hide();
$("#{{klass}}-{{startvalue}}").show();},
stop: function(event, ui) {
var list_value = $("#{{slider_id}}").slider("value");
$(".{{klass}}").hide();
$("#{{klass}}-"+list_value).show();}
})</script>
""")
def _build_html_slider(slices_range, slides_klass, slider_id):
"""Build an html slider for a given slices range and a slices klass.
"""
startvalue = slices_range[len(slices_range) // 2]
return slider_template.substitute(slider_id=slider_id,
klass=slides_klass,
step=slices_range[1] - slices_range[0],
minvalue=slices_range[0],
maxvalue=slices_range[-1],
startvalue=startvalue)
###############################################################################
# HTML scan renderer
header_template = Template(u"""
<!DOCTYPE html>
<html lang="fr">
<head>
{{include}}
<script type="text/javascript">
var toggle_state = false;
$(document).on('keydown', function (event) {
if (event.which == 84){
if (!toggle_state)
$('.has_toggle').trigger('click');
else if (toggle_state)
$('.has_toggle').trigger('click');
toggle_state = !toggle_state;
}
});
function togglebutton(class_name){
$(class_name).toggle();
if ($(class_name + '-btn').hasClass('active'))
$(class_name + '-btn').removeClass('active');
else
$(class_name + '-btn').addClass('active');
}
/* Scroll down on click to #id so that caption is not hidden
by navbar */
var shiftWindow = function() { scrollBy(0, -60) };
if (location.hash) shiftWindow();
window.addEventListener("hashchange", shiftWindow);
</script>
<style type="text/css">
body {
line-height: 1.5em;
font-family: arial, sans-serif;
}
h1 {
font-size: 30px;
text-align: center;
}
h4 {
text-align: center;
}
@link-color: @brand-primary;
@link-hover-color: darken(@link-color, 15%);
a{
color: @link-color;
&:hover {
color: @link-hover-color;
text-decoration: underline;
}
}
li{
list-style-type:none;
}
#wrapper {
text-align: left;
margin: 5em auto;
width: 700px;
}
#container{
position: relative;
}
#content{
margin-left: 22%;
margin-top: 60px;
width: 75%;
}
#toc {
margin-top: navbar-height;
position: fixed;
width: 20%;
height: 90%;
overflow: auto;
}
#toc li {
overflow: hidden;
padding-bottom: 2px;
margin-left: 20px;
}
#toc span {
float: left;
padding: 0 2px 3px 0;
}
div.footer {
background-color: #C0C0C0;
color: #000000;
padding: 3px 8px 3px 0;
clear: both;
font-size: 0.8em;
text-align: right;
}
</style>
</head>
<body>
<nav class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container-fluid">
<div class="navbar-header navbar-left">
<ul class="nav nav-pills"><li class="active">
<a class="navbar-btn" data-toggle="collapse"
data-target="#viewnavbar" href="javascript:void(0)">
></a></li></ul>
</div>
<h3 class="navbar-text" style="color:white">{{title}}</h3>
<ul class="nav nav-pills navbar-right" style="margin-top: 7px;"
id="viewnavbar">
{{for section in sections}}
<li class="active {{sectionvars[section]}}-btn">
<a href="javascript:void(0)"
onclick="togglebutton('.{{sectionvars[section]}}')"
class="has_toggle">
{{section if section != 'mri' else 'MRI'}}
</a>
</li>
{{endfor}}
</ul>
</div>
</nav>
""")
footer_template = HTMLTemplate(u"""
</div></body>
<div class="footer">
© Copyright 2012-{{current_year}}, MNE Developers.
Created on {{date}}.
Powered by <a href="http://martinos.org/mne">MNE.
</div>
</html>
""")
html_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4>
<div class="thumbnail">{{html}}</div>
</li>
""")
image_template = Template(u"""
{{default interactive = False}}
{{default width = 50}}
{{default id = False}}
{{default image_format = 'png'}}
{{default scale = None}}
{{default comment = None}}
<li class="{{div_klass}}" {{if id}}id="{{id}}"{{endif}}
{{if not show}}style="display: none"{{endif}}>
{{if caption}}
<h4>{{caption}}</h4>
{{endif}}
<div class="thumbnail">
{{if not interactive}}
{{if image_format == 'png'}}
{{if scale is not None}}
<img alt="" style="width:{{width}}%;"
src="data:image/png;base64,{{img}}">
{{else}}
<img alt=""
src="data:image/png;base64,{{img}}">
{{endif}}
{{elif image_format == 'svg'}}
<div style="text-align:center;">
{{img}}
</div>
{{endif}}
{{if comment is not None}}
<br><br>
<div style="text-align:center;">
<style>
p.test {word-wrap: break-word;}
</style>
<p class="test">
{{comment}}
</p>
</div>
{{endif}}
{{else}}
<center>{{interactive}}</center>
{{endif}}
</div>
</li>
""")
repr_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4><hr>
{{repr}}
<hr></li>
""")
raw_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4>
<table class="table table-hover">
<tr>
<th>Measurement date</th>
{{if meas_date is not None}}
<td>{{meas_date}}</td>
{{else}}<td>Unknown</td>{{endif}}
</tr>
<tr>
<th>Experimenter</th>
{{if info['experimenter'] is not None}}
<td>{{info['experimenter']}}</td>
{{else}}<td>Unknown</td>{{endif}}
</tr>
<tr>
<th>Digitized points</th>
{{if info['dig'] is not None}}
<td>{{len(info['dig'])}} points</td>
{{else}}
<td>Not available</td>
{{endif}}
</tr>
<tr>
<th>Good channels</th>
<td>{{n_mag}} magnetometer, {{n_grad}} gradiometer,
and {{n_eeg}} EEG channels</td>
</tr>
<tr>
<th>Bad channels</th>
{{if info['bads'] is not None}}
<td>{{', '.join(info['bads'])}}</td>
{{else}}<td>None</td>{{endif}}
</tr>
<tr>
<th>EOG channels</th>
<td>{{eog}}</td>
</tr>
<tr>
<th>ECG channels</th>
<td>{{ecg}}</td>
<tr>
<th>Measurement time range</th>
<td>{{u'%0.2f' % tmin}} to {{u'%0.2f' % tmax}} sec.</td>
</tr>
<tr>
<th>Sampling frequency</th>
<td>{{u'%0.2f' % info['sfreq']}} Hz</td>
</tr>
<tr>
<th>Highpass</th>
<td>{{u'%0.2f' % info['highpass']}} Hz</td>
</tr>
<tr>
<th>Lowpass</th>
<td>{{u'%0.2f' % info['lowpass']}} Hz</td>
</tr>
</table>
</li>
""")
toc_list = Template(u"""
<li class="{{div_klass}}">
{{if id}}
<a href="javascript:void(0)" onclick="window.location.hash={{id}};">
{{endif}}
<span title="{{tooltip}}" style="color:{{color}}"> {{text}}</span>
{{if id}}</a>{{endif}}
</li>
""")
def _check_scale(scale):
"""Helper to ensure valid scale value is passed"""
if np.isscalar(scale) and scale <= 0:
raise ValueError('scale must be positive, not %s' % scale)
class Report(object):
"""Object for rendering HTML
Parameters
----------
info_fname : str
Name of the file containing the info dictionary.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
subject : str | None
Subject name.
title : str
Title of the report.
cov_fname : str
Name of the file containing the noise covariance.
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction for evokeds.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Notes
-----
To toggle the show/hide state of all sections in the html report, press 't'
.. versionadded:: 0.8.0
"""
def __init__(self, info_fname=None, subjects_dir=None,
subject=None, title=None, cov_fname=None, baseline=None,
verbose=None):
self.info_fname = info_fname
self.cov_fname = cov_fname
self.baseline = baseline
self.subjects_dir = get_subjects_dir(subjects_dir, raise_error=False)
self.subject = subject
self.title = title
self.verbose = verbose
self.initial_id = 0
self.html = []
self.fnames = [] # List of file names rendered
self.sections = [] # List of sections
self._sectionlabels = [] # Section labels
self._sectionvars = {} # Section variable names in js
# boolean to specify if sections should be ordered in natural
# order of processing (raw -> events ... -> inverse)
self._sort_sections = False
self._init_render() # Initialize the renderer
def _get_id(self):
"""Get id of plot.
"""
self.initial_id += 1
return self.initial_id
def _validate_input(self, items, captions, section, comments=None):
"""Validate input.
"""
if not isinstance(items, (list, tuple)):
items = [items]
if not isinstance(captions, (list, tuple)):
captions = [captions]
if not isinstance(comments, (list, tuple)):
if comments is None:
comments = [comments] * len(captions)
else:
comments = [comments]
if len(comments) != len(items):
raise ValueError('Comments and report items must have the same '
'length or comments should be None.')
elif len(captions) != len(items):
raise ValueError('Captions and report items must have the same '
'length.')
# Book-keeping of section names
if section not in self.sections:
self.sections.append(section)
self._sectionvars[section] = _clean_varnames(section)
return items, captions, comments
def _add_figs_to_section(self, figs, captions, section='custom',
image_format='png', scale=None, comments=None):
"""Auxiliary method for `add_section` and `add_figs_to_section`.
"""
from scipy.misc import imread
import matplotlib.pyplot as plt
mayavi = None
try:
# on some version mayavi.core won't be exposed unless ...
from mayavi import mlab # noqa, mlab imported
import mayavi
except: # on some systems importing Mayavi raises SystemExit (!)
warnings.warn('Could not import mayavi. Trying to render '
'`mayavi.core.scene.Scene` figure instances'
' will throw an error.')
figs, captions, comments = self._validate_input(figs, captions,
section, comments)
_check_scale(scale)
for fig, caption, comment in zip(figs, captions, comments):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
img_klass = self._sectionvars[section]
if mayavi is not None and isinstance(fig, mayavi.core.scene.Scene):
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test')
if fig.scene is not None:
fig.scene.save_png(temp_fname)
img = imread(temp_fname)
else: # Testing mode
img = np.zeros((2, 2, 3))
mayavi.mlab.close(fig)
fig = plt.figure()
plt.imshow(img)
plt.axis('off')
img = _fig_to_img(fig=fig, scale=scale,
image_format=image_format)
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=True,
image_format=image_format,
comment=comment)
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(html)
def add_figs_to_section(self, figs, captions, section='custom',
scale=None, image_format='png', comments=None):
"""Append custom user-defined figures.
Parameters
----------
figs : list of figures.
Each figure in the list can be an instance of
matplotlib.pyplot.Figure, mayavi.core.scene.Scene,
or np.ndarray (images read in using scipy.imread).
captions : list of str
A list of captions to the figures.
section : str
Name of the section. If section already exists, the figures
will be appended to the end of the section
scale : float | None | callable
Scale the images maintaining the aspect ratio.
If None, no scaling is applied. If float, scale will determine
the relative scaling (might not work for scale <= 1 depending on
font sizes). If function, should take a figure object as input
parameter. Defaults to None.
image_format : {'png', 'svg'}
The image format to be used for the report. Defaults to 'png'.
comments : None | str | list of str
A string of text or a list of strings of text to be appended after
the figure.
"""
return self._add_figs_to_section(figs=figs, captions=captions,
section=section, scale=scale,
image_format=image_format,
comments=comments)
def add_images_to_section(self, fnames, captions, scale=None,
section='custom', comments=None):
"""Append custom user-defined images.
Parameters
----------
fnames : str | list of str
A filename or a list of filenames from which images are read.
captions : str | list of str
A caption or a list of captions to the images.
scale : float | None
Scale the images maintaining the aspect ratio.
Defaults to None. If None, no scaling will be applied.
section : str
Name of the section. If section already exists, the images
will be appended to the end of the section.
comments : None | str | list of str
A string of text or a list of strings of text to be appended after
the image.
"""
# Note: using scipy.misc is equivalent because scipy internally
# imports PIL anyway. It's not possible to redirect image output
# to binary string using scipy.misc.
from PIL import Image
fnames, captions, comments = self._validate_input(fnames, captions,
section, comments)
_check_scale(scale)
for fname, caption, comment in zip(fnames, captions, comments):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
img_klass = self._sectionvars[section]
# Convert image to binary string.
im = Image.open(fname)
output = BytesIO()
im.save(output, format='png')
img = base64.b64encode(output.getvalue()).decode('ascii')
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
width=scale,
comment=comment,
show=True)
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(html)
def add_htmls_to_section(self, htmls, captions, section='custom'):
"""Append htmls to the report.
Parameters
----------
htmls : str | list of str
An html str or a list of html str.
captions : str | list of str
A caption or a list of captions to the htmls.
section : str
Name of the section. If section already exists, the images
will be appended to the end of the section.
Notes
-----
.. versionadded:: 0.9.0
"""
htmls, captions, _ = self._validate_input(htmls, captions, section)
for html, caption in zip(htmls, captions):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(
html_template.substitute(div_klass=div_klass, id=global_id,
caption=caption, html=html))
def add_bem_to_section(self, subject, caption='BEM', section='bem',
decim=2, n_jobs=1, subjects_dir=None):
"""Renders a bem slider html str.
Parameters
----------
subject : str
Subject name.
caption : str
A caption for the bem.
section : str
Name of the section. If section already exists, the bem
will be appended to the end of the section.
decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
n_jobs : int
Number of jobs to run in parallel.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
Notes
-----
.. versionadded:: 0.9.0
"""
caption = 'custom plot' if caption == '' else caption
html = self._render_bem(subject=subject, subjects_dir=subjects_dir,
decim=decim, n_jobs=n_jobs, section=section,
caption=caption)
html, caption, _ = self._validate_input(html, caption, section)
sectionvar = self._sectionvars[section]
self.fnames.append('%s-#-%s-#-custom' % (caption[0], sectionvar))
self._sectionlabels.append(sectionvar)
self.html.extend(html)
###########################################################################
# HTML rendering
def _render_one_axis(self, slices_iter, name, global_id, cmap,
n_elements, n_jobs):
"""Render one axis of the array.
"""
global_id = global_id or name
html = []
slices, slices_range = [], []
html.append(u'<div class="col-xs-6 col-md-4">')
slides_klass = '%s-%s' % (name, global_id)
use_jobs = min(n_jobs, max(1, n_elements))
parallel, p_fun, _ = parallel_func(_iterate_mri_slices, use_jobs)
r = parallel(p_fun(name, ind, global_id, slides_klass, data, cmap)
for ind, data in slices_iter)
slices_range, slices = zip(*r)
# Render the slider
slider_id = 'select-%s-%s' % (name, global_id)
html.append(u'<div id="%s"></div>' % slider_id)
html.append(u'<ul class="thumbnails">')
# Render the slices
html.append(u'\n'.join(slices))
html.append(u'</ul>')
html.append(_build_html_slider(slices_range, slides_klass, slider_id))
html.append(u'</div>')
return '\n'.join(html)
###########################################################################
# global rendering functions
@verbose
def _init_render(self, verbose=None):
"""Initialize the renderer.
"""
inc_fnames = ['jquery-1.10.2.min.js', 'jquery-ui.min.js',
'bootstrap.min.js', 'jquery-ui.min.css',
'bootstrap.min.css']
include = list()
for inc_fname in inc_fnames:
logger.info('Embedding : %s' % inc_fname)
f = open(op.join(op.dirname(__file__), 'html', inc_fname),
'r')
if inc_fname.endswith('.js'):
include.append(u'<script type="text/javascript">' +
f.read() + u'</script>')
elif inc_fname.endswith('.css'):
include.append(u'<style type="text/css">' +
f.read() + u'</style>')
f.close()
self.include = ''.join(include)
@verbose
def parse_folder(self, data_path, pattern='*.fif', n_jobs=1, mri_decim=2,
sort_sections=True, on_error='warn', verbose=None):
"""Renders all the files in the folder.
Parameters
----------
data_path : str
Path to the folder containing data whose HTML report will be
created.
pattern : str | list of str
Filename pattern(s) to include in the report.
Example: [\*raw.fif, \*ave.fif] will include Raw as well as Evoked
files.
n_jobs : int
Number of jobs to run in parallel.
mri_decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
sort_sections : bool
If True, sort sections in the order: raw -> events -> epochs
-> evoked -> covariance -> trans -> mri -> forward -> inverse.
on_error : str
What to do if a file cannot be rendered. Can be 'ignore',
'warn' (default), or 'raise'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
valid_errors = ['ignore', 'warn', 'raise']
if on_error not in valid_errors:
raise ValueError('on_error must be one of %s, not %s'
% (valid_errors, on_error))
self._sort = sort_sections
n_jobs = check_n_jobs(n_jobs)
self.data_path = data_path
if self.title is None:
self.title = 'MNE Report for ...%s' % self.data_path[-20:]
if not isinstance(pattern, (list, tuple)):
pattern = [pattern]
# iterate through the possible patterns
fnames = list()
for p in pattern:
fnames.extend(_recursive_search(self.data_path, p))
if self.info_fname is not None:
info = read_info(self.info_fname)
sfreq = info['sfreq']
else:
warnings.warn('`info_fname` not provided. Cannot render'
'-cov.fif(.gz) and -trans.fif(.gz) files.')
info, sfreq = None, None
cov = None
if self.cov_fname is not None:
cov = read_cov(self.cov_fname)
baseline = self.baseline
# render plots in parallel; check that n_jobs <= # of files
logger.info('Iterating over %s potential files (this may take some '
'time)' % len(fnames))
use_jobs = min(n_jobs, max(1, len(fnames)))
parallel, p_fun, _ = parallel_func(_iterate_files, use_jobs)
r = parallel(p_fun(self, fname, info, cov, baseline, sfreq, on_error)
for fname in np.array_split(fnames, use_jobs))
htmls, report_fnames, report_sectionlabels = zip(*r)
# combine results from n_jobs discarding plots not rendered
self.html = [html for html in sum(htmls, []) if html is not None]
self.fnames = [fname for fname in sum(report_fnames, []) if
fname is not None]
self._sectionlabels = [slabel for slabel in
sum(report_sectionlabels, [])
if slabel is not None]
# find unique section labels
self.sections = sorted(set(self._sectionlabels))
self._sectionvars = dict(zip(self.sections, self.sections))
# render mri
if self.subjects_dir is not None and self.subject is not None:
logger.info('Rendering BEM')
self.html.append(self._render_bem(self.subject, self.subjects_dir,
mri_decim, n_jobs))
self.fnames.append('bem')
self._sectionlabels.append('mri')
else:
warnings.warn('`subjects_dir` and `subject` not provided.'
' Cannot render MRI and -trans.fif(.gz) files.')
def save(self, fname=None, open_browser=True, overwrite=False):
"""Save html report and open it in browser.
Parameters
----------
fname : str
File name of the report.
open_browser : bool
Open html browser after saving if True.
overwrite : bool
If True, overwrite report if it already exists.
"""
if fname is None:
if not hasattr(self, 'data_path'):
self.data_path = op.dirname(__file__)
warnings.warn('`data_path` not provided. Using %s instead'
% self.data_path)
fname = op.realpath(op.join(self.data_path, 'report.html'))
else:
fname = op.realpath(fname)
self._render_toc()
html = footer_template.substitute(date=time.strftime("%B %d, %Y"),
current_year=time.strftime("%Y"))
self.html.append(html)
if not overwrite and op.isfile(fname):
msg = ('Report already exists at location %s. '
'Overwrite it (y/[n])? '
% fname)
answer = moves.input(msg)
if answer.lower() == 'y':
overwrite = True
if overwrite or not op.isfile(fname):
logger.info('Saving report to location %s' % fname)
fobj = codecs.open(fname, 'w', 'utf-8')
fobj.write(_fix_global_ids(u''.join(self.html)))
fobj.close()
# remove header, TOC and footer to allow more saves
self.html.pop(0)
self.html.pop(0)
self.html.pop()
if open_browser:
import webbrowser
webbrowser.open_new_tab('file://' + fname)
return fname
@verbose
def _render_toc(self, verbose=None):
"""Render the Table of Contents.
"""
logger.info('Rendering : Table of Contents')
html_toc = u'<div id="container">'
html_toc += u'<div id="toc"><center><h4>CONTENTS</h4></center>'
global_id = 1
# Reorder self.sections to reflect natural ordering
if self._sort_sections:
sections = list(set(self.sections) & set(SECTION_ORDER))
custom = [section for section in self.sections if section
not in SECTION_ORDER]
order = [sections.index(section) for section in SECTION_ORDER if
section in sections]
self.sections = np.array(sections)[order].tolist() + custom
# Sort by section
html, fnames, sectionlabels = [], [], []
for section in self.sections:
logger.info('%s' % section)
for sectionlabel, this_html, fname in (zip(self._sectionlabels,
self.html, self.fnames)):
if self._sectionvars[section] == sectionlabel:
html.append(this_html)
fnames.append(fname)
sectionlabels.append(sectionlabel)
logger.info('\t... %s' % fname[-20:])
color = _is_bad_fname(fname)
div_klass, tooltip, text = _get_toc_property(fname)
# loop through conditions for evoked
if fname.endswith(('-ave.fif', '-ave.fif.gz',
'(whitened)')):
text = os.path.basename(fname)
if fname.endswith('(whitened)'):
fname = fname[:-11]
# XXX: remove redundant read_evokeds
evokeds = read_evokeds(fname, verbose=False)
html_toc += toc_list.substitute(
div_klass=div_klass, id=None, tooltip=fname,
color='#428bca', text=text)
html_toc += u'<li class="evoked"><ul>'
for ev in evokeds:
html_toc += toc_list.substitute(
div_klass=div_klass, id=global_id,
tooltip=fname, color=color, text=ev.comment)
global_id += 1
html_toc += u'</ul></li>'
elif fname.endswith(tuple(VALID_EXTENSIONS +
['bem', 'custom'])):
html_toc += toc_list.substitute(div_klass=div_klass,
id=global_id,
tooltip=tooltip,
color=color,
text=text)
global_id += 1
html_toc += u'\n</ul></div>'
html_toc += u'<div id="content">'
# The sorted html (according to section)
self.html = html
self.fnames = fnames
self._sectionlabels = sectionlabels
html_header = header_template.substitute(title=self.title,
include=self.include,
sections=self.sections,
sectionvars=self._sectionvars)
self.html.insert(0, html_header) # Insert header at position 0
self.html.insert(1, html_toc) # insert TOC
def _render_array(self, array, global_id=None, cmap='gray',
limits=None, n_jobs=1):
"""Render mri without bem contours.
"""
html = []
html.append(u'<div class="row">')
# Axial
limits = limits or {}
axial_limit = limits.get('axial')
axial_slices_gen = _iterate_axial_slices(array, axial_limit)
html.append(
self._render_one_axis(axial_slices_gen, 'axial',
global_id, cmap, array.shape[1], n_jobs))
# Sagittal
sagittal_limit = limits.get('sagittal')
sagittal_slices_gen = _iterate_sagittal_slices(array, sagittal_limit)
html.append(
self._render_one_axis(sagittal_slices_gen, 'sagittal',
global_id, cmap, array.shape[1], n_jobs))
html.append(u'</div>')
html.append(u'<div class="row">')
# Coronal
coronal_limit = limits.get('coronal')
coronal_slices_gen = _iterate_coronal_slices(array, coronal_limit)
html.append(
self._render_one_axis(coronal_slices_gen, 'coronal',
global_id, cmap, array.shape[1], n_jobs))
# Close section
html.append(u'</div>')
return '\n'.join(html)
def _render_one_bem_axis(self, mri_fname, surf_fnames, global_id,
shape, orientation='coronal', decim=2, n_jobs=1):
"""Render one axis of bem contours.
"""
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
n_slices = shape[orientation_axis]
orig_size = np.roll(shape, orientation_axis)[[1, 2]]
name = orientation
html = []
html.append(u'<div class="col-xs-6 col-md-4">')
slides_klass = '%s-%s' % (name, global_id)
sl = np.arange(0, n_slices, decim)
kwargs = dict(mri_fname=mri_fname, surf_fnames=surf_fnames, show=False,
orientation=orientation, img_output=orig_size)
imgs = _figs_to_mrislices(sl, n_jobs, **kwargs)
slices = []
img_klass = 'slideimg-%s' % name
div_klass = 'span12 %s' % slides_klass
for ii, img in enumerate(imgs):
slice_id = '%s-%s-%s' % (name, global_id, sl[ii])
caption = u'Slice %s %s' % (name, sl[ii])
first = True if ii == 0 else False
slices.append(_build_html_image(img, slice_id, div_klass,
img_klass, caption, first))
# Render the slider
slider_id = 'select-%s-%s' % (name, global_id)
html.append(u'<div id="%s"></div>' % slider_id)
html.append(u'<ul class="thumbnails">')
# Render the slices
html.append(u'\n'.join(slices))
html.append(u'</ul>')
html.append(_build_html_slider(sl, slides_klass, slider_id))
html.append(u'</div>')
return '\n'.join(html)
def _render_image(self, image, cmap='gray', n_jobs=1):
"""Render one slice of mri without bem.
"""
import nibabel as nib
global_id = self._get_id()
if 'mri' not in self.sections:
self.sections.append('mri')
self._sectionvars['mri'] = 'mri'
nim = nib.load(image)
data = nim.get_data()
shape = data.shape
limits = {'sagittal': range(0, shape[0], 2),
'axial': range(0, shape[1], 2),
'coronal': range(0, shape[2], 2)}
name = op.basename(image)
html = u'<li class="mri" id="%d">\n' % global_id
html += u'<h2>%s</h2>\n' % name
html += self._render_array(data, global_id=global_id,
cmap=cmap, limits=limits,
n_jobs=n_jobs)
html += u'</li>\n'
return html
def _render_raw(self, raw_fname):
"""Render raw.
"""
global_id = self._get_id()
div_klass = 'raw'
caption = u'Raw : %s' % raw_fname
raw = Raw(raw_fname)
n_eeg = len(pick_types(raw.info, meg=False, eeg=True))
n_grad = len(pick_types(raw.info, meg='grad'))
n_mag = len(pick_types(raw.info, meg='mag'))
pick_eog = pick_types(raw.info, meg=False, eog=True)
if len(pick_eog) > 0:
eog = ', '.join(np.array(raw.info['ch_names'])[pick_eog])
else:
eog = 'Not available'
pick_ecg = pick_types(raw.info, meg=False, ecg=True)
if len(pick_ecg) > 0:
ecg = ', '.join(np.array(raw.info['ch_names'])[pick_ecg])
else:
ecg = 'Not available'
meas_date = raw.info['meas_date']
if meas_date is not None:
meas_date = dt.fromtimestamp(meas_date[0]).strftime("%B %d, %Y")
tmin = raw.first_samp / raw.info['sfreq']
tmax = raw.last_samp / raw.info['sfreq']
html = raw_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
info=raw.info,
meas_date=meas_date,
n_eeg=n_eeg, n_grad=n_grad,
n_mag=n_mag, eog=eog,
ecg=ecg, tmin=tmin, tmax=tmax)
return html
def _render_forward(self, fwd_fname):
"""Render forward.
"""
div_klass = 'forward'
caption = u'Forward: %s' % fwd_fname
fwd = read_forward_solution(fwd_fname)
repr_fwd = re.sub('>', '', re.sub('<', '', repr(fwd)))
global_id = self._get_id()
html = repr_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
repr=repr_fwd)
return html
def _render_inverse(self, inv_fname):
"""Render inverse.
"""
div_klass = 'inverse'
caption = u'Inverse: %s' % inv_fname
inv = read_inverse_operator(inv_fname)
repr_inv = re.sub('>', '', re.sub('<', '', repr(inv)))
global_id = self._get_id()
html = repr_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
repr=repr_inv)
return html
def _render_evoked(self, evoked_fname, baseline=None, figsize=None):
"""Render evoked.
"""
evokeds = read_evokeds(evoked_fname, baseline=baseline, verbose=False)
html = []
for ev in evokeds:
global_id = self._get_id()
kwargs = dict(show=False)
img = _fig_to_img(ev.plot, **kwargs)
caption = u'Evoked : %s (%s)' % (evoked_fname, ev.comment)
div_klass = 'evoked'
img_klass = 'evoked'
show = True
html.append(image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
has_types = []
if len(pick_types(ev.info, meg=False, eeg=True)) > 0:
has_types.append('eeg')
if len(pick_types(ev.info, meg='grad', eeg=False)) > 0:
has_types.append('grad')
if len(pick_types(ev.info, meg='mag', eeg=False)) > 0:
has_types.append('mag')
for ch_type in has_types:
kwargs.update(ch_type=ch_type)
img = _fig_to_img(ev.plot_topomap, **kwargs)
caption = u'Topomap (ch_type = %s)' % ch_type
html.append(image_template.substitute(img=img,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
return '\n'.join(html)
def _render_eve(self, eve_fname, sfreq=None):
"""Render events.
"""
global_id = self._get_id()
events = read_events(eve_fname)
kwargs = dict(events=events, sfreq=sfreq, show=False)
img = _fig_to_img(plot_events, **kwargs)
caption = 'Events : ' + eve_fname
div_klass = 'events'
img_klass = 'events'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_epochs(self, epo_fname):
"""Render epochs.
"""
global_id = self._get_id()
epochs = read_epochs(epo_fname)
kwargs = dict(subject=self.subject, show=False)
img = _fig_to_img(epochs.plot_drop_log, **kwargs)
caption = 'Epochs : ' + epo_fname
div_klass = 'epochs'
img_klass = 'epochs'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_cov(self, cov_fname, info_fname):
"""Render cov.
"""
global_id = self._get_id()
cov = read_cov(cov_fname)
fig, _ = plot_cov(cov, info_fname, show=False)
img = _fig_to_img(fig=fig)
caption = 'Covariance : %s (n_samples: %s)' % (cov_fname, cov.nfree)
div_klass = 'covariance'
img_klass = 'covariance'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_whitened_evoked(self, evoked_fname, noise_cov, baseline):
"""Show whitened evoked.
"""
global_id = self._get_id()
evokeds = read_evokeds(evoked_fname, verbose=False)
html = []
for ev in evokeds:
ev = read_evokeds(evoked_fname, ev.comment, baseline=baseline,
verbose=False)
global_id = self._get_id()
kwargs = dict(noise_cov=noise_cov, show=False)
img = _fig_to_img(ev.plot_white, **kwargs)
caption = u'Whitened evoked : %s (%s)' % (evoked_fname, ev.comment)
div_klass = 'evoked'
img_klass = 'evoked'
show = True
html.append(image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
return '\n'.join(html)
def _render_trans(self, trans, path, info, subject,
subjects_dir, image_format='png'):
"""Render trans.
"""
kwargs = dict(info=info, trans=trans, subject=subject,
subjects_dir=subjects_dir)
try:
img = _iterate_trans_views(function=plot_trans, **kwargs)
except IOError:
img = _iterate_trans_views(function=plot_trans, source='head',
**kwargs)
if img is not None:
global_id = self._get_id()
caption = 'Trans : ' + trans
div_klass = 'trans'
img_klass = 'trans'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
width=75,
show=show)
return html
def _render_bem(self, subject, subjects_dir, decim, n_jobs,
section='mri', caption='BEM'):
"""Render mri+bem.
"""
import nibabel as nib
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# Get the MRI filename
mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(mri_fname):
warnings.warn('MRI file "%s" does not exist' % mri_fname)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
warnings.warn('Subject bem directory "%s" does not exist' %
bem_path)
return self._render_image(mri_fname, cmap='gray', n_jobs=n_jobs)
surf_fnames = []
for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
else:
warnings.warn('No surface found for %s.' % surf_name)
return self._render_image(mri_fname, cmap='gray')
surf_fnames.append(surf_fname)
# XXX : find a better way to get max range of slices
nim = nib.load(mri_fname)
data = nim.get_data()
shape = data.shape
del data # free up memory
html = []
global_id = self._get_id()
if section == 'mri' and 'mri' not in self.sections:
self.sections.append('mri')
self._sectionvars['mri'] = 'mri'
name = caption
html += u'<li class="mri" id="%d">\n' % global_id
html += u'<h2>%s</h2>\n' % name
html += u'<div class="row">'
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'axial', decim, n_jobs)
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'sagittal', decim, n_jobs)
html += u'</div><div class="row">'
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'coronal', decim, n_jobs)
html += u'</div>'
html += u'</li>\n'
return ''.join(html)
def _clean_varnames(s):
# Remove invalid characters
s = re.sub('[^0-9a-zA-Z_]', '', s)
# add report_ at the beginning so that the javascript class names
# are valid ones
return 'report_' + s
def _recursive_search(path, pattern):
"""Auxiliary function for recursive_search of the directory.
"""
filtered_files = list()
for dirpath, dirnames, files in os.walk(path):
for f in fnmatch.filter(files, pattern):
# only the following file types are supported
# this ensures equitable distribution of jobs
if f.endswith(tuple(VALID_EXTENSIONS)):
filtered_files.append(op.realpath(op.join(dirpath, f)))
return filtered_files
def _fix_global_ids(html):
"""Auxiliary function for fixing the global_ids after reordering in
_render_toc().
"""
html = re.sub('id="\d+"', 'id="###"', html)
global_id = 1
while len(re.findall('id="###"', html)) > 0:
html = re.sub('id="###"', 'id="%s"' % global_id, html, count=1)
global_id += 1
return html
| bsd-3-clause |
thekatiebr/NEMO_II | KnowledgeBase/KnowledgeBase.py | 1 | 9835 | import MySQLdb
import MySQLdb.cursors
import sys
import pandas
import json
import pandas
##############################################################################################################
# KnowledgeBase class #
# Prototype KnowledgeBase for NEMO #
##############################################################################################################
# ****** INSTANCE VARIABLES ****** #
# HOST - host for the database (typically localhost), STRING #
# PORT - Port number for database, MUST BE AN INTEGER!! #
# USER - User name for database, STRING #
# PASSWD - Password for database, STRING #
# DATABASE - Database name, STRING #
# db - object connected to the database, DATABASE CONNECTION OBJECT #
# cursor - cursor that executes MySQL commands, CURSOR OBJECT #
# X - attribute names, LIST of STRINGS #
# Y - target name, STRING #
# schema - list containing MySQL compatible schema, format: ATTR_NAME ATTR_TYPE, LIST OF STRINGS #
# #
###############################################################################################################
class KnowledgeBase:
#method to import data into the MySQL server
#Preconditions:
# * KnowledgeBase object is created with connection to db established
# * data_file - a text file containing the data to be added to the databases
# Assumptions: The file is one comma-delimited record per line.
# The first value for each line is the value to classify
# The remaining values on each line are the attributes
# * schema_file - a text file containing the MySQL schema for the table
# Assumptions: column_name data_type
# Assumptions: On separate lines, the file contains the MySQL schema for creation of the DATA table
def importData(self, data_file, schema_file):
self.cursor.execute("drop table if exists DATA;")
#db.commit()
#read in schema
self.readSchemaFile(schema_file)
stmt = "create table DATA ( "
while len(self.schema) > 1:
stmt = stmt + self.schema.pop() + ", "
stmt = stmt + self.schema.pop() + " );"
#create new data table
self.cursor.execute(stmt);
self.db.commit()
#add new records
f = open(data_file, 'r')
for line in f:
#print line
stmt = "insert into DATA values ( "
curr_ = line.split(',')
for i in range(0,len(curr_)):
curr_[i] = curr_[i].strip('\n')
curr = tuple(curr_)
#print curr
#print len(curr)
for i in range(0, len(curr)-1):
stmt = stmt + "%s, "
stmt = stmt + "%s )"
#print stmt
self.cursor.execute(stmt, curr)
self.db.commit()
#close the database
f.close()
#method to read schema file
#Preconditions
# * schema_file - a text file containing the MySQL schema for the table
# Assumptions: column_name data_type
# Assumptions: On separate lines, the file contains the MySQL schema for creation of the DATA table
#Postconditions: Returns list object with schema
def readSchemaFile(self, schema_file):
f = open(schema_file, 'r')
self.schema = []
for line in f:
self.schema.append(line.strip("\n"))
f.close()
self.schema.reverse()
self.getXYTokens()
#return schema
#method to get a list of names from the attributes and targets
#Preconditions:
# * schema has already been read from file (ie readSchemaFile has already been called)
#Postconditions: self.X has names of the attributes, self.Y has the names of the target
def getXYTokens(self):
self.X = []
for i in range(0,len(self.schema)):
tokens = self.schema[i].split(' ')
if(tokens[0] != self.Y):
self.X.append(tokens[0])
self.X.reverse()
#tokens = self.schema[len(self.schema)-1].split(' ')
#self.Y = tokens[0]
def updateDatabaseWithResults(self, algorithm):
# results = (algorithm.results['ID'], algorithm.results['Name'], algorithm.results['Accuracy'], algorithm.results['Precision'], algorithm.results['Recall'], algorithm.results['F1'], str(algorithm.results['Confusion_Matrix']).replace('\n', ""))
# stmt = "insert into AlgorithmResults(algorithm_id, algorithm_name, accuracy, prec, recall, f1, confusion_matrix) values (%s,%s,%s,%s,%s,%s,%s)"
results = (algorithm.results['ID'], algorithm.results['Name'], algorithm.results['Accuracy'], algorithm.results['Precision'], algorithm.results['Recall'], algorithm.results['F1'])
stmt = "insert into AlgorithmResults(algorithm_id, algorithm_name, accuracy, prec, recall, f1) values (%s,%s,%s,%s,%s,%s)"
# print stmt
# print str(results)
self.executeQuery(stmt, results)
def getData(self):
stmt = "select * from DATA"
return pandas.read_sql_query(stmt, self.db)
#Constructor
#Preconditions:
# * login_file - a text file containing the login and database information
# Assumptions: On separate lines, the file must contain HOST, PORT, MySQL USER NAME, PASSWORD, DATABASE
#Postconditions: Connects to database
def __init__(self, config_file):
with open(config_file) as fd:
json_data = json.load(fd)
info = json_data['DATABASE']
self.HOST = info['HOST']
self.PORT = int(info['PORT'])
self.USER = info['USER']
self.PASSWD = info['PASS']
self.DATABASE = info['DB']
self.db = MySQLdb.connect(host = self.HOST, port = self.PORT, user = self.USER, passwd = self.PASSWD, db = self.DATABASE)
self.cursor = self.db.cursor()
file_info = json_data['DATA']
self.schema = None
self.X = None
self.Y = file_info['CLASS']
print file_info['DATA']
print file_info['SCHEMA']
print self.Y
self.multi = bool(file_info['MULTI-CLASS'])
self.importData(file_info['DATA'], file_info['SCHEMA'])
def executeQuery(self, query, args=None):
complete = False
#print query
#if args is not None: print args
while not complete:
try:
if args is None:
self.cursor.execute(query)
self.db.commit()
else:
self.cursor.execute(query, args)
except (AttributeError, MySQLdb.OperationalError):
self.db = MySQLdb.connect(host = self.HOST, port = self.PORT, user = self.USER, passwd = self.PASSWD, db = self.DATABASE)
self.cursor = self.db.cursor()
complete = True
def fetchOne(self):
complete = False
while not complete:
try:
return self.cursor.fetchone()
except (AttributeError, MySQLdb.OperationalError):
self.db = MySQLdb.connect(host = self.HOST, port = self.PORT, user = self.USER, passwd = self.PASSWD, db = self.DATABASE)
self.cursor = self.db.cursor()
complete=True
def fetchAll(self):
complete = False
while not complete:
try:
return self.cursor.fetchall()
except (AttributeError, MySQLdb.OperationalError):
self.db = MySQLdb.connect(host = self.HOST, port = self.PORT, user = self.USER, passwd = self.PASSWD, db = self.DATABASE)
self.cursor = self.db.cursor()
complete=True
def removeModelFromRepository(self, model):
stmt = "delete from ModelRepository where algorithm_id = " + model.algorithm_id
self.executeQuery(stmt)
def updateDatabaseWithModel(self, model):
#check to see if model is in database
#if so, add modified char to id (mod_id)
# update ModelRepository set algorithm_id = mod_id where algorithm_id = model.algorithm_id
stmt = "select * from ModelRepository where algorithm_id = " + model.algorithm_id
self.executeQuery(stmt)
row = self.fetchOne()
mod_id = None
if row is not None: #it exists in the database
mod_id = model.algorithm_id + "*"
stmt = "update ModelRepository set algorithm_id = \'" + mod_id + "\' where algorithm_id = \'" + model.algorithm_id + "\'"
#print stmt
self.executeQuery(stmt)
arguments = model.get_params()
#print arguments
for key, value in arguments.iteritems():
#print key + ": " + str(value)
stmt = "insert into ModelRepository (algorithm_id, algorithm_name, arg_type, arg_val) values ( %s, %s, %s, %s)"
args = (model.algorithm_id, model.algorithm_name, key, str(value))
self.executeQuery(stmt, args)
#remove backup model...
if mod_id is not None: #we had to use mod_id
stmt = "delete from ModelRepository where algorithm_id = \'" + mod_id + "\'"
#print stmt
self.executeQuery(stmt)
self.db.commit()
def addCurrentModel(self, model):
stmt = "insert into CurrentModel(algorithm_id) values (%s)"
args = (model.algorithm_id,)
self.executeQuery(stmt, args)
def removeCurrentModel(self, model):
stmt = "delete from CurrentModel where algorithm_id = " + model.algorithm_id
self.executeQuery(stmt)
def getData(self):
stmt = "select * from DATA"
return pandas.read_sql_query(stmt, self.db)
#DESTRUCTOR
#commits all changes to database and closes the connection
def __del__(self):
self.db.commit()
self.db.close()
############################################################################################################################################
# END OF KNOWLEDGE BASE CLASS #
############################################################################################################################################
######## Executable For Testing #########
def main():
kb = KnowledgeBase("config/config.json")
if __name__ == "__main__":
main()
| apache-2.0 |
probml/pyprobml | scripts/gp_classify_spaceflu_1d_pymc3.py | 1 | 1870 | # Gaussian process binary classification in 1d
# Code is based on
#https://github.com/aloctavodia/BAP/blob/master/code/Chp7/07_Gaussian%20process.ipynb
import pymc3 as pm
import numpy as np
import pandas as pd
from scipy import stats
from scipy.special import expit as logistic
import matplotlib.pyplot as plt
import arviz as az
from sklearn.datasets import load_iris
url = 'https://github.com/aloctavodia/BAP/blob/master/code/data/space_flu.csv?raw=true'
df_sf = pd.read_csv(url)
age = df_sf.age.values[:, None]
space_flu = df_sf.space_flu
ax = df_sf.plot.scatter('age', 'space_flu', figsize=(8, 5))
ax.set_yticks([0, 1])
ax.set_yticklabels(['healthy', 'sick'])
plt.savefig('../figures/space_flu.pdf', bbox_inches='tight')
with pm.Model() as model_space_flu:
ℓ = pm.HalfCauchy('ℓ', 1)
cov = pm.gp.cov.ExpQuad(1, ℓ) + pm.gp.cov.WhiteNoise(1E-5)
gp = pm.gp.Latent(cov_func=cov)
f = gp.prior('f', X=age)
y_ = pm.Bernoulli('y', p=pm.math.sigmoid(f), observed=space_flu)
trace_space_flu = pm.sample(
1000, chains=1, compute_convergence_checks=False)
X_new = np.linspace(0, 80, 200)[:, None]
with model_space_flu:
f_pred = gp.conditional('f_pred', X_new)
pred_samples = pm.sample_posterior_predictive(trace_space_flu,
var_names=['f_pred'],
samples=1000)
_, ax = plt.subplots(figsize=(10, 6))
fp = logistic(pred_samples['f_pred'])
fp_mean = np.nanmean(fp, 0)
ax.scatter(age, np.random.normal(space_flu, 0.02),
marker='.', color=[f'C{ci}' for ci in space_flu])
ax.plot(X_new[:, 0], fp_mean, 'C2', lw=3)
az.plot_hpd(X_new[:, 0], fp, color='C2')
ax.set_yticks([0, 1])
ax.set_yticklabels(['healthy', 'sick'])
ax.set_xlabel('age')
plt.savefig('../figures/gp_classify_spaceflu.pdf', dpi=300)
| mit |
xuewei4d/scikit-learn | examples/manifold/plot_compare_methods.py | 13 | 2823 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from collections import OrderedDict
from functools import partial
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
# Create figure
fig = plt.figure(figsize=(15, 8))
fig.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
# Add 3d scatter plot
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
# Set-up manifold methods
LLE = partial(manifold.LocallyLinearEmbedding,
n_neighbors, n_components, eigen_solver='auto')
methods = OrderedDict()
methods['LLE'] = LLE(method='standard')
methods['LTSA'] = LLE(method='ltsa')
methods['Hessian LLE'] = LLE(method='hessian')
methods['Modified LLE'] = LLE(method='modified')
methods['Isomap'] = manifold.Isomap(n_neighbors, n_components)
methods['MDS'] = manifold.MDS(n_components, max_iter=100, n_init=1)
methods['SE'] = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
methods['t-SNE'] = manifold.TSNE(n_components=n_components, init='pca',
random_state=0)
# Plot results
for i, (label, method) in enumerate(methods.items()):
t0 = time()
Y = method.fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (label, t1 - t0))
ax = fig.add_subplot(2, 5, 2 + i + (i > 3))
ax.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
ax.set_title("%s (%.2g sec)" % (label, t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
ax.axis('tight')
plt.show()
| bsd-3-clause |
bsipocz/pyspeckit | pyspeckit/spectrum/models/n2hp.py | 2 | 6184 | """
===========
N2H+ fitter
===========
Reference for line params:
Daniel, F., Dubernet, M.-L., Meuwly, M., Cernicharo, J., Pagani, L. 2005, MNRAS 363, 1083
http://www.strw.leidenuniv.nl/~moldata/N2H+.html
http://adsabs.harvard.edu/abs/2005MNRAS.363.1083D
Does not yet implement: http://adsabs.harvard.edu/abs/2010ApJ...716.1315K
"""
import numpy as np
from pyspeckit.mpfit import mpfit
from .. import units
from . import fitter,model,modelgrid
import matplotlib.cbook as mpcb
import copy
try:
from astropy.io import fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
import hyperfine
freq_dict={
'110-011':93.171617e9,
'112-011':93.171913e9,
'112-012':93.171913e9,
'111-010':93.172048e9,
'111-011':93.172048e9,
'111-012':93.172048e9,
'122-011':93.173475e9,
'122-012':93.173475e9,
'123-012':93.173772e9,
'121-010':93.173963e9,
'121-011':93.173963e9,
'121-012':93.173963e9,
'101-010':93.176261e9,
'101-011':93.176261e9,
'101-012':93.176261e9,
}
aval_dict = {
'110-011':3.628,
'112-011':0.907,
'112-012':2.721,
'111-010':1.209,
'111-011':0.907,
'111-012':1.512,
'122-011':2.721,
'122-012':0.907,
'123-012':3.628,
'121-010':2.015,
'121-011':1.512,
'121-012':0.101,
'101-010':0.403,
'101-011':1.209,
'101-012':2.016,
}
line_strength_dict = { # effectively the degeneracy per rotation state...
'110-011':0.333,
'112-011':0.417,
'112-012':1.250,
'111-010':0.333,
'111-011':0.250,
'111-012':0.417,
'122-011':1.250,
'122-012':0.417,
'123-012':2.330,
'121-010':0.556,
'121-011':0.417,
'121-012':0.028,
'101-010':0.111,
'101-011':0.333,
'101-012':0.55,
}
relative_strength_total_degeneracy = {
'110-011':9.0,
'112-011':9.0,
'112-012':9.0,
'111-010':9.0,
'111-011':9.0,
'111-012':9.0,
'122-011':9.0,
'122-012':9.0,
'123-012':9.0,
'121-010':9.0,
'121-011':9.0,
'121-012':9.0,
'101-010':9.0,
'101-011':9.0,
'101-012':9.0,
}
"""
Line strengths of the 15 hyperfine components in J=1-0 transition. The
thickness of the lines indicates their relative weight compared to the others.
Line strengths are normalized in such a way that summing over all initial J = 1
levels gives the degeneracy of the J = 0 levels, i.e., for JF1F 012,
three for JF1F 011, and one for JF1F 010. Thus, the sum over all 15
transitions gives the total spin degeneracy
"""
line_names = freq_dict.keys()
ckms = units.speedoflight_ms / 1e3 #2.99792458e5
voff_lines_dict = dict([(k,(v-93.176261e9)/93.176261e9*ckms) for k,v in freq_dict.iteritems()])
n2hp_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict, freq_dict,
line_strength_dict, relative_strength_total_degeneracy)
n2hp_vtau_fitter = n2hp_vtau.fitter
n2hp_vtau_vheight_fitter = n2hp_vtau.vheight_fitter
def n2hp_radex(xarr,
density=4,
column=13,
xoff_v=0.0,
width=1.0,
grid_vwidth=1.0,
grid_vwidth_scale=False,
texgrid=None,
taugrid=None,
hdr=None,
path_to_texgrid='',
path_to_taugrid='',
temperature_gridnumber=3,
debug=False,
verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
minfreq = (4.8,)
maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = copy.copy(xarr)
xarr.convert_to_unit('Hz', quiet=True)
tau_nu_cumul = np.zeros(len(xarr))
gridval1 = np.interp(density, densityarr[0,:], xinds[0,:])
gridval2 = np.interp(column, columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
tau = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[gridval2],[gridval1]]),order=1) for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[gridval2],[gridval1]]),order=1) for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if verbose:
print "density %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, column, tau, tex)
if debug:
import pdb; pdb.set_trace()
return n2hp_vtau(xarr,Tex=tex,tau=tau,xoff_v=xoff_v,width=width,**kwargs)
| mit |
KarlTDebiec/Ramaplot | __init__.py | 1 | 1945 | # -*- coding: utf-8 -*-
# ramaplot.__init__.py
#
# Copyright (C) 2015 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
General functions.
"""
################################### MODULES ###################################
from __future__ import absolute_import,division,print_function,unicode_literals
################################### CLASSES ###################################
def cmap_ff99SB():
"""
Generates purple->yellow->black colormap.
Generates colormap in style of:
Hornak, Viktor, Abel, Robert, Okur, Asim, Stockbine, Bentley,
Roitberg, Adrian, Simmerling, Carlos, Comparison of Multiple Amber
Force Fields and Development of Improved Protein Backbone
Parameters. Proteins: Structure, Function, and Bioinformatics.
2006. 65. 712-725.
Returns:
cmap (LinearSegmentedColormap): ff99SB-style colormap
"""
from matplotlib.colors import LinearSegmentedColormap
cdict = {"red": [(0, 1, 1)],
"green": [(0, 1, 1)],
"blue": [(0, 0, 0)]}
for i in range(1, 193, 1):
red = 1.0
green = 1.0 - ((i - 1) / 193)
blue = 0.0
cdict["red"] += [(i / 384, red, red)]
cdict["green"] += [(i / 384, green, green)]
cdict["blue"] += [(i / 384, blue, blue)]
for i in range(193, 385, 1):
red = (1.0 - ((i - 193) / 192)) * ((384 - i) / 192) ** 0.3
green = 0.0
blue = (0.0 + ((i - 193) / 192)) * ((384 - i) / 192) ** 0.2
cdict["red"] += [(i / 384, red, red)]
cdict["green"] += [(i / 384, green, green)]
cdict["blue"] += [(i / 384, blue, blue)]
cdict["red"] = tuple(cdict["red"])
cdict["green"] = tuple(cdict["green"])
cdict["blue"] = tuple(cdict["blue"])
return LinearSegmentedColormap("test", cdict)
| bsd-3-clause |
huzq/scikit-learn | sklearn/neural_network/_rbm.py | 2 | 12917 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from scipy.special import expit # logistic function
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.validation import check_is_fitted, _deprecate_positional_args
class BernoulliRBM(TransformerMixin, BaseEstimator):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, default=256
Number of binary hidden units.
learning_rate : float, default=0.1
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, default=10
Number of examples per minibatch.
n_iter : int, default=10
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, default=0
The verbosity level. The default, zero, means silent mode.
random_state : integer or RandomState, default=None
Determines random number generation for:
- Gibbs sampling from visible and hidden layers.
- Initializing components, sampling from layers during fit.
- Corrupting the data when scoring samples.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
h_samples_ : array-like, shape (batch_size, n_components)
Hidden Activation sampled from the model distribution,
where batch_size in the number of examples per minibatch and
n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(n_components=2)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
https://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
@_deprecate_positional_args
def __init__(self, n_components=256, *, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : ndarray of shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse='csr', dtype=(np.float64, np.float32))
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : ndarray of shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : ndarray of shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : ndarray of shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : ndarray of shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : ndarray of shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : ndarray of shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : ndarray of shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : ndarray of shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : ndarray of shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : ndarray of shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self)
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='F')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : ndarray of shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : ndarray of shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self)
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if sp.issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = self._validate_data(
X, accept_sparse='csr', dtype=(np.float64, np.float32)
)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='F',
dtype=X.dtype)
self.intercept_hidden_ = np.zeros(self.n_components, dtype=X.dtype)
self.intercept_visible_ = np.zeros(X.shape[1], dtype=X.dtype)
self.h_samples_ = np.zeros((self.batch_size, self.n_components),
dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples=n_samples))
verbose = self.verbose
begin = time.time()
for iteration in range(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
def _more_tags(self):
return {
'_xfail_checks': {
'check_methods_subset_invariance':
'fails for the decision_function method'
}
}
| bsd-3-clause |
brkrishna/freelance | amazon/process/spiders/review_urls_spider.py | 1 | 2549 | # -- coding: utf-8 --
import os.path
import re, json
import scrapy
from scrapy.selector import HtmlXPathSelector
from process.items import RvwUrl
import pandas as pd
#Constants
BASE_URL = 'http://www.amazon.cn'
class RvwSpider(scrapy.Spider):
name = "reviews"
allowed_domains = ["amazon.cn"]
start_urls = ["http://www.amazon.cn"]
def parse(self, response):
review_urls = set(open('review_urls').readlines())
if os.path.isfile('review_urls_done'):
finished_review_urls = set(open('review_urls_done').readlines())
review_urls -= finished_review_urls
for url in review_urls:
yield scrapy.Request(url.strip(), callback=self.get_reviews)
def _create_review(self, helpfulness, stars, author, date, body, url):
info_dict = {}
if helpfulness:
helpful_numbers = re.findall(r'\d+', helpfulness[0])
info_dict['helpful_num'] = int(helpful_numbers[-1])
info_dict['helpful_den'] = int(helpful_numbers[0])
date_numbers = re.findall(r'\d+', date[0])
info_dict['year'] = int(date_numbers[0])
info_dict['month'] = int(date_numbers[1])
info_dict['day'] = int(date_numbers[2])
info_dict['stars'] = int(stars[0])
info_dict['author'] = author[0]
info_dict['body'] = body[0]
info_dict['product_link'] = url
return info_dict
def get_reviews(self, response):
url = response.meta.get('redirect_urls', response.url)
reviews = response.xpath('//div[@class="a-section review"]')
if reviews:
review_list = []
for review in reviews:
helpfulness = review.xpath('.//div[@class="a-row helpful-votes-count"]/text()').extract()
stars = review.xpath('.//i[contains(@class, "review-rating")]/span/text()').extract()
author = review.xpath('.//a[@class="a-size-base a-link-normal author"]/text()').extract()
date = review.xpath('.//span[@class="a-size-base a-color-secondary review-date"]/text()').extract()
body = review.xpath('.//span[@class="a-size-base review-text"]/text()').extract()
info_dict = self._create_review(helpfulness, stars, author, date, body, url)
dumped_data = json.dumps(info_dict, ensure_ascii=False).encode('utf8') + '\n'
review_list.append(dumped_data)
with open('reviews', 'a') as sink:
for review_item in review_list:
sink.write(review_item)
next_page_url = response.xpath('//ul[@class="a-pagination"]/li[@class="a-last"]/a/@href').extract()
if next_page_url:
yield scrapy.Request(BASE_URL + next_page_url[0], self.get_reviews)
open('review_urls_done', 'a').write(url + '\n')
| gpl-2.0 |
pbrod/scipy | scipy/stats/tests/test_morestats.py | 5 | 55032 | # Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (TestCase, run_module_suite, assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_raises, assert_, assert_allclose, assert_equal, dec, assert_warns)
from scipy import stats
from common_tests import check_named_results
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestBayes_mvs(TestCase):
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
rtol=1e-14)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestMvsdist(TestCase):
def test_basic(self):
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.mvsdist(data)
assert_almost_equal(mean.mean(), 9.0)
assert_allclose(mean.interval(0.9), (7.1036502226125329,
10.896349777387467), rtol=1e-14)
assert_almost_equal(var.mean(), 10.0)
assert_allclose(var.interval(0.9), (3.1767242068607087,
24.45910381334018), rtol=1e-09)
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
assert_allclose(std.interval(0.9), (1.7823367265645145,
4.9456146050146312), rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.mvsdist, [])
def test_bad_arg(self):
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
def test_warns(self):
# regression test for gh-5270
# make sure there are no spurious divide-by-zero warnings
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
[x.mean() for x in stats.mvsdist([1, 2, 3])]
[x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
class TestShapiro(TestCase):
def test_basic(self):
x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75, 0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
# Verified against R
np.random.seed(12345678)
x3 = stats.norm.rvs(loc=5, scale=3, size=100)
w, pw = stats.shapiro(x3)
assert_almost_equal(w, 0.9772805571556091, decimal=6)
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
# Extracted from original paper
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
W_expected = 0.83467
p_expected = 0.000914
w, pw = stats.shapiro(x4)
assert_almost_equal(w, W_expected, decimal=4)
assert_almost_equal(pw, p_expected, decimal=5)
def test_2d(self):
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
def test_empty_input(self):
assert_raises(ValueError, stats.shapiro, [])
assert_raises(ValueError, stats.shapiro, [[], [], []])
def test_not_enough_values(self):
assert_raises(ValueError, stats.shapiro, [1, 2])
assert_raises(ValueError, stats.shapiro, [[], [2]])
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
w, pw = stats.shapiro(x)
assert_equal(w, np.nan)
assert_almost_equal(pw, 1.0)
class TestAnderson(TestCase):
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A, crit, sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
v = np.ones(10)
v[0] = 0
A, crit, sig = stats.anderson(v)
# The expected statistic 3.208057 was computed independently of scipy.
# For example, in R:
# > library(nortest)
# > v <- rep(1, 10)
# > v[1] <- 0
# > result <- ad.test(v)
# > result$statistic
# A
# 3.208057
assert_allclose(A, 3.208057)
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1, 'expon')
assert_array_less(A, crit[-2:])
olderr = np.seterr(all='ignore')
try:
A, crit, sig = stats.anderson(x2, 'expon')
finally:
np.seterr(**olderr)
assert_(A > crit[-1])
def test_gumbel(self):
# Regression test for gh-6306. Before that issue was fixed,
# this case would return a2=inf.
v = np.ones(100)
v[0] = 0.0
a2, crit, sig = stats.anderson(v, 'gumbel')
# A brief reimplementation of the calculation of the statistic.
n = len(v)
xbar, s = stats.gumbel_l.fit(v)
logcdf = stats.gumbel_l.logcdf(v, xbar, s)
logsf = stats.gumbel_l.logsf(v, xbar, s)
i = np.arange(1, n+1)
expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1]))
assert_allclose(a2, expected_a2)
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
def test_gumbel_l(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x = rs.gumbel(size=100)
A1, crit1, sig1 = stats.anderson(x, 'gumbel')
A2, crit2, sig2 = stats.anderson(x, 'gumbel_l')
assert_allclose(A2, A1)
def test_gumbel_r(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x1 = rs.gumbel(size=100)
x2 = np.ones(100)
A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r')
A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r')
assert_array_less(A1, crit1[-2:])
assert_(A2 > crit2[-1])
class TestAndersonKSamp(TestCase):
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
assert_warns(UserWarning, stats.anderson_ksamp, (t1, t2, t3, t4),
midrank=False)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0021, 4)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0020, 4)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
res = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAnsari(TestCase):
def test_small(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
W, pval = stats.ansari(x, y)
assert_almost_equal(W, 23.5, 11)
assert_almost_equal(pval, 0.13499256881897437, 11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108,
106, 99))
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message="Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W, 185.5, 11)
assert_almost_equal(pval, 0.18145819972867083, 11)
def test_exact(self):
W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12])
assert_almost_equal(W, 10.0, 11)
assert_almost_equal(pval, 0.533333333333333333, 7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBartlett(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T, 20.78587342806484, 7)
assert_almost_equal(pval, 0.0136358632781, 7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
class TestLevene(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W, 1.7059176930008939, 7)
assert_almost_equal(pval, 0.0990829755522, 7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed',
proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1, 1, 21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBinomP(TestCase):
def test_data(self):
pval = stats.binom_test(100, 250)
assert_almost_equal(pval, 0.0018833009350757682, 11)
pval = stats.binom_test(201, 405)
assert_almost_equal(pval, 0.92085205962670713, 11)
pval = stats.binom_test([682, 243], p=3.0/4)
assert_almost_equal(pval, 0.38249155957481695, 11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, stats.binom_test, [1, 2, 3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, stats.binom_test, [100])
# n less than x[0]
assert_raises(ValueError, stats.binom_test, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0)
def test_alternatives(self):
res = stats.binom_test(51, 235, p=1./6, alternative='less')
assert_almost_equal(res, 0.982022657605858)
res = stats.binom_test(51, 235, p=1./6, alternative='greater')
assert_almost_equal(res, 0.02654424571169085)
res = stats.binom_test(51, 235, p=1./6, alternative='two-sided')
assert_almost_equal(res, 0.0437479701823997)
class TestFligner(TestCase):
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1, x1**2),
(3.2282229927203536, 0.072379187848207877),
11)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean')
Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
class TestMood(TestCase):
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478),
11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
# Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is
# less than 3
assert_raises(ValueError, stats.mood, [1], [])
class TestProbplot(TestCase):
def test_basic(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
np.random.seed(123456)
x = stats.norm.rvs(size=100)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist(object):
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
np.random.seed(7654321)
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
def test_wilcoxon_bad_arg():
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1, 2])
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy")
def test_wilcoxon_arg_type():
# Should be able to accept list as arguments.
# Address issue 6070.
arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2]
_ = stats.wilcoxon(arr, zero_method="pratt")
_ = stats.wilcoxon(arr, zero_method="zsplit")
_ = stats.wilcoxon(arr, zero_method="wilcox")
class TestKstat(TestCase):
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = []
for n in [1, 2, 3, 4]:
moments.append(stats.kstat(data, n))
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
# test equivalence with `stats.moment`
m1 = stats.moment(data, moment=1)
m2 = stats.moment(data, moment=2)
m3 = stats.moment(data, moment=3)
assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
def test_empty_input(self):
assert_raises(ValueError, stats.kstat, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
class TestKstatVar(TestCase):
def test_empty_input(self):
assert_raises(ValueError, stats.kstatvar, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_bad_arg(self):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182,
0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10,
dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
class TestPpccMax(TestCase):
def test_ppcc_max_bad_arg(self):
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
def test_ppcc_max_basic(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5)
def test_dist(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# Test that we can specify distributions both by name and as objects.
max1 = stats.ppcc_max(x, dist='tukeylambda')
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
# Test that 'tukeylambda' is the default dist
max3 = stats.ppcc_max(x)
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
def test_brack(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
-0.71215366521264145, decimal=5)
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
-0.71215366521264145, decimal=5)
class TestBoxcox_llf(TestCase):
def test_basic(self):
np.random.seed(54321)
x = stats.norm.rvs(size=10000, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
class TestBoxcox(TestCase):
def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x)
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
class TestBoxcoxNormmax(TestCase):
def setUp(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
class TestBoxcoxNormplot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestCircFuncs(TestCase):
def test_circfuncs(self):
x = np.array([355, 5, 2, 359, 10, 350])
M = stats.circmean(x, high=360)
Mval = 0.167690146
assert_allclose(M, Mval, rtol=1e-7)
V = stats.circvar(x, high=360)
Vval = 42.51955609
assert_allclose(V, Vval, rtol=1e-7)
S = stats.circstd(x, high=360)
Sval = 6.520702116
assert_allclose(S, Sval, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20, 21, 22, 18, 19, 20.5, 19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
def test_circmean_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
def test_circfuncs_array_like(self):
x = [355, 5, 2, 359, 10, 350]
assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
def test_empty(self):
assert_(np.isnan(stats.circmean([])))
assert_(np.isnan(stats.circstd([])))
assert_(np.isnan(stats.circvar([])))
def test_circmean_scalar(self):
x = 1.
M1 = x
M2 = stats.circmean(x)
assert_allclose(M2, M1, rtol=1e-5)
def test_circmean_range(self):
# regression test for gh-6420: circmean(..., high, low) must be
# between `high` and `low`
m = stats.circmean(np.arange(0, 2, 0.1), np.pi, -np.pi)
assert_(m < np.pi)
assert_(m > -np.pi)
def test_circfuncs_unit8(self):
# regression test for gh-7255: overflow when working with
# numpy uint8 data type
x = np.array([150, 10], dtype='uint8')
assert_equal(stats.circmean(x, high=180), 170.0)
assert_allclose(stats.circvar(x, high=180), 437.45871686, rtol=1e-7)
assert_allclose(stats.circstd(x, high=180), 20.91551378, rtol=1e-7)
def test_accuracy_wilcoxon():
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt")
assert_allclose(T, 423)
assert_allclose(p, 0.00197547303533107)
T, p = stats.wilcoxon(x, y, "zsplit")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False)
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True)
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes():
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_tie():
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10)
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True)
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
class TestMedianTest(TestCase):
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
ties="foo")
def test_bad_nan_policy(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar')
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5],
foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency
# table, so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_nan_policy_options(self):
x = [1, 2, np.nan]
y = [4, 5, 6]
mt1 = stats.median_test(x, y, nan_policy='propagate')
s, p, m, t = stats.median_test(x, y, nan_policy='omit')
assert_equal(mt1, (np.nan, np.nan, np.nan, None))
assert_allclose(s, 0.31250000000000006)
assert_allclose(p, 0.57615012203057869)
assert_equal(m, 4.0)
assert_equal(t, np.array([[0, 2],[2, 1]]))
assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise')
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
lbishal/scikit-learn | examples/calibration/plot_calibration_curve.py | 17 | 5902 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.model_selection import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/IPython/extensions/sympyprinting.py | 12 | 5609 | """
A print function that pretty prints sympy Basic objects.
:moduleauthor: Brian Granger
Usage
=====
Once the extension is loaded, Sympy Basic objects are automatically
pretty-printed.
As of SymPy 0.7.2, maintenance of this extension has moved to SymPy under
sympy.interactive.ipythonprinting, any modifications to account for changes to
SymPy should be submitted to SymPy rather than changed here. This module is
maintained here for backwards compatablitiy with old SymPy versions.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from IPython.lib.latextools import latex_to_png
from IPython.utils.py3compat import string_types
try:
from sympy import pretty, latex
except ImportError:
pass
import warnings
#-----------------------------------------------------------------------------
# Definitions of special display functions for use with IPython
#-----------------------------------------------------------------------------
def print_basic_unicode(o, p, cycle):
"""A function to pretty print sympy Basic objects."""
if cycle:
return p.text('Basic(...)')
out = pretty(o, use_unicode=True)
if '\n' in out:
p.text(u'\n')
p.text(out)
def print_png(o):
"""
A function to display sympy expression using inline style LaTeX in PNG.
"""
s = latex(o, mode='inline')
# mathtext does not understand certain latex flags, so we try to replace
# them with suitable subs.
s = s.replace('\\operatorname','')
s = s.replace('\\overline', '\\bar')
png = latex_to_png(s)
return png
def print_display_png(o):
"""
A function to display sympy expression using display style LaTeX in PNG.
"""
s = latex(o, mode='plain')
s = s.strip('$')
# As matplotlib does not support display style, dvipng backend is
# used here.
png = latex_to_png(s, backend='dvipng', wrap=True)
return png
def can_print_latex(o):
"""
Return True if type o can be printed with LaTeX.
If o is a container type, this is True if and only if every element of o
can be printed with LaTeX.
"""
import sympy
if isinstance(o, (list, tuple, set, frozenset)):
return all(can_print_latex(i) for i in o)
elif isinstance(o, dict):
return all((isinstance(i, string_types) or can_print_latex(i)) and can_print_latex(o[i]) for i in o)
elif isinstance(o,(sympy.Basic, sympy.matrices.Matrix, int, long, float)):
return True
return False
def print_latex(o):
"""A function to generate the latex representation of sympy
expressions."""
if can_print_latex(o):
s = latex(o, mode='plain')
s = s.replace('\\dag','\\dagger')
s = s.strip('$')
return '$$%s$$' % s
# Fallback to the string printer
return None
_loaded = False
def load_ipython_extension(ip):
"""Load the extension in IPython."""
import sympy
# sympyprinting extension has been moved to SymPy as of 0.7.2, if it
# exists there, warn the user and import it
try:
import sympy.interactive.ipythonprinting
except ImportError:
pass
else:
warnings.warn("The sympyprinting extension in IPython is deprecated, "
"use 'from sympy import init_printing; init_printing()'")
ip.extension_manager.load_extension('sympy.interactive.ipythonprinting')
return
global _loaded
if not _loaded:
plaintext_formatter = ip.display_formatter.formatters['text/plain']
for cls in (object, str):
plaintext_formatter.for_type(cls, print_basic_unicode)
printable_containers = [list, tuple]
# set and frozen set were broken with SymPy's latex() function, but
# was fixed in the 0.7.1-git development version. See
# http://code.google.com/p/sympy/issues/detail?id=3062.
if sympy.__version__ > '0.7.1':
printable_containers += [set, frozenset]
else:
plaintext_formatter.for_type(cls, print_basic_unicode)
plaintext_formatter.for_type_by_name(
'sympy.core.basic', 'Basic', print_basic_unicode
)
plaintext_formatter.for_type_by_name(
'sympy.matrices.matrices', 'Matrix', print_basic_unicode
)
png_formatter = ip.display_formatter.formatters['image/png']
png_formatter.for_type_by_name(
'sympy.core.basic', 'Basic', print_png
)
png_formatter.for_type_by_name(
'sympy.matrices.matrices', 'Matrix', print_display_png
)
for cls in [dict, int, long, float] + printable_containers:
png_formatter.for_type(cls, print_png)
latex_formatter = ip.display_formatter.formatters['text/latex']
latex_formatter.for_type_by_name(
'sympy.core.basic', 'Basic', print_latex
)
latex_formatter.for_type_by_name(
'sympy.matrices.matrices', 'Matrix', print_latex
)
for cls in printable_containers:
# Use LaTeX only if every element is printable by latex
latex_formatter.for_type(cls, print_latex)
_loaded = True
| bsd-3-clause |
JohnGriffiths/nipype | doc/conf.py | 7 | 8353 | # emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set fileencoding=utf-8 ft=python sts=4 ts=4 sw=4 et:
#
# nipype documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 20 12:30:18 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
nipypepath = os.path.abspath('..')
sys.path.insert(1,nipypepath)
import nipype
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo',
'sphinx.ext.pngmath',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.graphviz',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.pngmath',
'sphinx.ext.autosummary',
'numpy_ext.numpydoc',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nipype'
copyright = u'2009-14, Neuroimaging in Python team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = nipype.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y, %H:%M PDT'
# List of documents that shouldn't be included in the build.
unused_docs = ['api/generated/gen']
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Sphinxext configuration ---------------------------------------------------
# Set attributes for layout of inheritance diagrams
inheritance_graph_attrs = dict(rankdir="LR", size='"6.0, 8.0"', fontsize=14,
ratio='compress')
inheritance_node_attrs = dict(shape='ellipse', fontsize=14, height=0.75,
color='dodgerblue1', style='filled')
# Flag to show todo items in rendered output
todo_include_todos = True
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'nipype.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'nipy pipeline and interfaces package'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# Content template for the index page.
html_index = 'index.html'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['gse.html','localtoc.html', 'sidebar_versions.html', 'indexsidebar.html'],
'searchresults' : ['sidebar_versions.html', 'indexsidebar.html'],
'version' : []}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'nipypedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('documentation', 'nipype.tex', u'nipype Documentation',
u'Neuroimaging in Python team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
exclude_patterns = ['interfaces/generated/gen.rst', 'api/generated/gen.rst']
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/linear_model/logistic.py | 105 | 56686 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in ['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
matthewfranglen/spark | python/pyspark/sql/tests/test_pandas_udf_window.py | 21 | 12850 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.utils import AnalysisException
from pyspark.sql.functions import array, explode, col, lit, mean, min, max, rank, \
udf, pandas_udf, PandasUDFType
from pyspark.sql.window import Window
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
from pandas.util.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class WindowPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
return udf(lambda v: v + 1, 'double')
@property
def pandas_scalar_time_two(self):
return pandas_udf(lambda v: v * 2, 'double')
@property
def pandas_agg_count_udf(self):
@pandas_udf('long', PandasUDFType.GROUPED_AGG)
def count(v):
return len(v)
return count
@property
def pandas_agg_mean_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_max_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max(v):
return v.max()
return max
@property
def pandas_agg_min_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def min(v):
return v.min()
return min
@property
def unbounded_window(self):
return Window.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing).orderBy('v')
@property
def ordered_window(self):
return Window.partitionBy('id').orderBy('v')
@property
def unpartitioned_window(self):
return Window.partitionBy()
@property
def sliding_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(-2, 1)
@property
def sliding_range_window(self):
return Window.partitionBy('id').orderBy('v').rangeBetween(-2, 4)
@property
def growing_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(Window.unboundedPreceding, 3)
@property
def growing_range_window(self):
return Window.partitionBy('id').orderBy('v') \
.rangeBetween(Window.unboundedPreceding, 4)
@property
def shrinking_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(-2, Window.unboundedFollowing)
@property
def shrinking_range_window(self):
return Window.partitionBy('id').orderBy('v') \
.rangeBetween(-3, Window.unboundedFollowing)
def test_simple(self):
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
def test_multiple_udfs(self):
df = self.data
w = self.unbounded_window
result1 = df.withColumn('mean_v', self.pandas_agg_mean_udf(df['v']).over(w)) \
.withColumn('max_v', self.pandas_agg_max_udf(df['v']).over(w)) \
.withColumn('min_w', self.pandas_agg_min_udf(df['w']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w)) \
.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('min_w', min(df['w']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_replace_existing(self):
df = self.data
w = self.unbounded_window
result1 = df.withColumn('v', self.pandas_agg_mean_udf(df['v']).over(w))
expected1 = df.withColumn('v', mean(df['v']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v', mean_udf(df['v'] * 2).over(w) + 1)
expected1 = df.withColumn('v', mean(df['v'] * 2).over(w) + 1)
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_udf(self):
df = self.data
w = self.unbounded_window
plus_one = self.python_plus_one
time_two = self.pandas_scalar_time_two
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn(
'v2',
plus_one(mean_udf(plus_one(df['v'])).over(w)))
expected1 = df.withColumn(
'v2',
plus_one(mean(plus_one(df['v'])).over(w)))
result2 = df.withColumn(
'v2',
time_two(mean_udf(time_two(df['v'])).over(w)))
expected2 = df.withColumn(
'v2',
time_two(mean(time_two(df['v'])).over(w)))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
def test_without_partitionBy(self):
df = self.data
w = self.unpartitioned_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v2', mean_udf(df['v']).over(w))
expected1 = df.withColumn('v2', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
def test_mixed_sql_and_udf(self):
df = self.data
w = self.unbounded_window
ow = self.ordered_window
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min_udf(df['v']).over(w))
expected1 = df.withColumn('v_diff', max(df['v']).over(w) - min(df['v']).over(w))
# Test mixing sql window function and window udf in the same expression
result2 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min(df['v']).over(w))
expected2 = expected1
# Test chaining sql aggregate function and udf
result3 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('min_v', min(df['v']).over(w)) \
.withColumn('v_diff', col('max_v') - col('min_v')) \
.drop('max_v', 'min_v')
expected3 = expected1
# Test mixing sql window function and udf
result4 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
expected4 = df.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
def test_array_type(self):
df = self.data
w = self.unbounded_window
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.withColumn('v2', array_udf(df['v']).over(w))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
w = self.unbounded_window
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*not supported within a window function'):
foo_udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
df.withColumn('v2', foo_udf(df['v']).over(w))
def test_bounded_simple(self):
from pyspark.sql.functions import mean, max, min, count
df = self.data
w1 = self.sliding_row_window
w2 = self.shrinking_range_window
plus_one = self.python_plus_one
count_udf = self.pandas_agg_count_udf
mean_udf = self.pandas_agg_mean_udf
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('mean_v', mean_udf(plus_one(df['v'])).over(w1)) \
.withColumn('count_v', count_udf(df['v']).over(w2)) \
.withColumn('max_v', max_udf(df['v']).over(w2)) \
.withColumn('min_v', min_udf(df['v']).over(w1))
expected1 = df.withColumn('mean_v', mean(plus_one(df['v'])).over(w1)) \
.withColumn('count_v', count(df['v']).over(w2)) \
.withColumn('max_v', max(df['v']).over(w2)) \
.withColumn('min_v', min(df['v']).over(w1))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_growing_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.growing_row_window
w2 = self.growing_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_sliding_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.sliding_row_window
w2 = self.sliding_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_shrinking_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.shrinking_row_window
w2 = self.shrinking_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_bounded_mixed(self):
from pyspark.sql.functions import mean, max
df = self.data
w1 = self.sliding_row_window
w2 = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
max_udf = self.pandas_agg_max_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w1)) \
.withColumn('max_v', max_udf(df['v']).over(w2)) \
.withColumn('mean_unbounded_v', mean_udf(df['v']).over(w1))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w1)) \
.withColumn('max_v', max(df['v']).over(w2)) \
.withColumn('mean_unbounded_v', mean(df['v']).over(w1))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_window import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| mit |
tosolveit/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 150 | 3651 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_,ms2.cluster_centers_)
assert_array_equal(ms1.labels_,ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
ankurankan/scikit-learn | sklearn/utils/tests/test_utils.py | 23 | 6045 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
"""Check the check_random_state utility function behavior"""
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
"""Border case not worth mentioning in doctests"""
assert_true(resample() is None)
def test_deprecated():
"""Test whether the deprecated decorator issues appropriate warnings"""
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
| bsd-3-clause |
ch3ll0v3k/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
RoyBoy432/QB2017_Moger-Reischer | Week8-Projects/Scripts/20170220_N.mineralzn_make.ts.py | 1 | 4282 | from __future__ import division
import sys;import math; import re; import subprocess; import os
from random import choice, randrange
import matplotlib.pyplot as plt; import numpy as np; import scipy as sc; from scipy import stats; import scipy.stats.distributions
import csv; import pandas as pd; import itertools; import Bio
'''def chooseseason(month):
row = ''
if 1 <= month <= 3:
row = 'q1'
elif 4 <= month <= 6:
row = 'q2'
elif 7 <= month <= 9:
row = 'q3'
elif 10 <= month <= 12:
row = 'q4'
else:
row = "??"
return row'''
def chooseseason(month):
row = ''
if 4 <= month <= 9:
row = 'q1'
elif 1 <= month <= 3 or 10 <= month <= 12:
row = 'q2'
else:
row = "??"
return row
def makets(thedf, OP):
thedf['season'] = pd.Series([chooseseason(x) for x in thedf['month']], index=thedf.index)#add a column to the df with
#month translated into season
groups = thedf.groupby(['year', 'season'])#group the df by year and season
temp = list(range(0, len(groups), 1))#make a list from 0 to number.of.groups
qdf = pd.DataFrame(columns=['year', 'season'], index=temp)#intialize an empty df with the right column names
for index,guy in enumerate(groups):#for each group in the grouped data
qdf.loc[index]=[guy[0][0],guy[0][1]]#populate a row of the empty df with the year and season
#for index, row in enumerate(qdf.iterrows()):
# row = [groups[index][0][0], groups[index][0][1]]
qdf['Nminavg']=pd.Series([groups['netnh4'].mean()[x] for x in temp], index=qdf.index)#now add a column with avg
#soil respiration for each year&season group
OP.write(pd.DataFrame.to_csv(qdf))
return qdf
#mydf = pd.read_csv("C:\\Users\\rmoge\\GitHub\\QB2017_DivPro\\Data\\HF_nmin.csv", infer_datetime_format=True)
mydf = pd.read_csv("C:\\Users\\rmoge\\GitHub\\QB2017_DivPro\\Data\\HF_nmin_NA.omit_RZM.csv", infer_datetime_format=True)
'''cdfna = mydf.loc[mydf['trt'] == "C"]
cdf=cdfna.loc[cdfna['netnh4']!="nan"]
hdf = mydf.loc[mydf['trt'] == "H" and mydf['netnh4'] != "NA"]
hndf = mydf.loc[mydf['trt'] == "HN" and mydf['netnh4'] != "NA"]
ndf = mydf.loc[mydf['trt'] == "N" and mydf['netnh4'] != "NA"]'''
cdf = mydf.loc[mydf['trt'] == "C"]
hdf = mydf.loc[mydf['trt'] == "H"]
hndf = mydf.loc[mydf['trt'] == "HN"]
ndf = mydf.loc[mydf['trt'] == "N"]
mincdf=cdf.loc[cdf['hor']=='mineral']
minhdf=hdf.loc[hdf['hor']=='mineral']
minhndf=hndf.loc[hndf['hor']=='mineral']
minndf=ndf.loc[ndf['hor']=='mineral']
orgcdf=cdf.loc[cdf['hor']=='organic']
orghdf=hdf.loc[hdf['hor']=='organic']
orghndf=hndf.loc[hndf['hor']=='organic']
orgndf=ndf.loc[ndf['hor']=='organic']
'''for i in (cdfna['netnh4']):
print(i)
print(type(i))
'''
try:
orgNminc=open("C:\\Users\\rmoge\\GitHub\\QB2017_DivPro\\Data\\orgNminc.csv",'w')
orgNminh=open("C:\\Users\\rmoge\\GitHub\\QB2017_DivPro\\Data\\orgNminh.csv",'w')
orgNminhn=open("C:\\Users\\rmoge\\GitHub\\QB2017_DivPro\\Data\\orgNminhn.csv",'w')
orgNminn=open("C:\\Users\\rmoge\\GitHub\\QB2017_DivPro\\Data\\orgNminn.csv",'w')
minNminc=open("C:\\Users\\rmoge\\GitHub\\QB2017_DivPro\\Data\\minNminc.csv",'w')
minNminh=open("C:\\Users\\rmoge\\GitHub\\QB2017_DivPro\\Data\\minNminh.csv",'w')
minNminhn=open("C:\\Users\\rmoge\\GitHub\\QB2017_DivPro\\Data\\minNminhn.csv",'w')
minNminn=open("C:\\Users\\rmoge\\GitHub\\QB2017_DivPro\\Data\\minNminn.csv",'w')
orgc=makets(orgcdf,orgNminc); orgh=makets(orghdf,orgNminh); orghn=makets(orghndf,orgNminhn); orgn=makets(orgndf,orgNminn)
minc=makets(mincdf,minNminc); minh=makets(minhdf,minNminh); minhn=makets(minhndf,minNminhn); minn=makets(minndf,minNminn)
finally:
orgNminc.close(); orgNminh.close(), orgNminhn.close(), orgNminn.close()
minNminc.close(); minNminh.close(), minNminhn.close(), minNminn.close()
'''grouped = ndf.groupby(['year', 'month'])
meanspermonth = grouped['co2flux'].mean()
tester = list(range(0, len(grouped), 1))#make a list from 0 to number.of.groups
ndfg = ndf.groupby(['year', 'month']).groups
type(ndf.groupby(['year', 'month']).groups)
sl = (ndf['month'])
ndff = ndf
ndff['e'] = pd.Series([chooseseason(x) for x in ndff['month']], index=ndff.index)
print(chooseseason(6))'''
| gpl-3.0 |
MartinDelzant/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
myron0330/caching-research | section_cmab/simulation/display_rewards.py | 1 | 5386 | # -*- coding: UTF-8 -*-
# **********************************************************************************#
# File:
# **********************************************************************************#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib import font_manager
from matplotlib.ticker import FormatStrFormatter
DEFAULT_COLORS = {
0: '#363636',
1: '#FA8072',
2: '#1E90FF',
3: '#32CD32',
4: '#FFD700',
}
AVAILABLE_MARKERS = ['o', '*', 'p', 's', '^', '<', '>']
font_properties = font_manager.FontProperties()
def display_rewards_(fig_size=(11, 8), line_width=1,
title_size=18, label_size=16, marker_size=10, legend_size=10,
title='', x_label=u'迭代次数', y_label=u'回报收益',
save_path=None, x_axis=None, loc=None,
texts=None, **kwargs):
"""
Display multiple simulation rewards
Args:
fig_size(tuple): figure size
line_width(float): line width
title_size(float): title size
label_size(float): label size
marker_size(float): marker size
legend_size(float): legend_size
title(string): figure title
save_path(string): save path
x_label(string): x label string
y_label(string): y label string
x_axis(list): x_axis
loc(int): legend location
"""
fig = plt.figure(figsize=fig_size)
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_color('black')
ax.spines['right'].set_color('black')
ax.spines['top'].set_color('black')
ax.spines['bottom'].set_color('black')
x_axis = range(0, 10)
y_axis = range(0, 10)[::-1]
current_marker = AVAILABLE_MARKERS[0]
current_color = DEFAULT_COLORS.get(2)
plt.plot(x_axis, y_axis, color=current_color, linewidth=line_width,
marker=current_marker, markersize=marker_size, markerfacecolor='None',
markeredgecolor=current_color, markeredgewidth=line_width)
plt.title(title, fontsize=title_size, verticalalignment='bottom',
horizontalalignment='center', color='k', fontproperties=font_properties)
font_properties.set_size(label_size)
plt.xlabel(x_label, fontsize=label_size, verticalalignment='top',
horizontalalignment='center', fontproperties=font_properties)
plt.ylabel(y_label, fontsize=label_size, verticalalignment='bottom',
horizontalalignment='center', rotation=90, fontproperties=font_properties)
legend = ['reward-latency']
rcParams.update({'font.size': 14})
xmajorFormatter = FormatStrFormatter('')
ymajorFormatter = FormatStrFormatter('')
ax.xaxis.set_major_formatter(xmajorFormatter)
ax.yaxis.set_major_formatter(ymajorFormatter)
plt.xlim(0, 10)
plt.ylim(0, 10)
plt.plot([0, 4.5], [4.5]*2, '--k', linewidth=line_width)
plt.plot([4.5]*2, [0, 4.5], '--k', linewidth=line_width)
if loc is not None:
plt.legend(legend, loc=loc, fontsize=legend_size)
else:
plt.legend(legend, loc='best', fontsize=legend_size)
if texts is not None:
for text in texts:
plt.text(*text['args'], **text['kwargs'])
if save_path is not None:
plt.savefig(save_path)
plt.show()
if __name__ == '__main__':
parameters = {
'display_length': 50,
'line_width': 2.5,
'title_size': 20,
'label_size': 16,
'marker': '',
'marker_size': 8,
'title': '',
'x_label': u'',
'y_label': u'',
'all_curves': True,
'with_standardize': True,
'standardize_init': 25,
'sigma': 0.6,
'loc': '3',
'legend_size': 15,
'fixed_theta': True,
'y_min_lim': 0,
'texts': [
{
'args': (9, -0.3, '$\Delta T$'),
'kwargs': {
'horizontalalignment': 'center',
'verticalalignment': 'center',
'fontsize': 14,
}
},
{
'args': (-0.6, 9, '$d_{b,k,t}\Delta TR_{0}$'),
'kwargs': {
'horizontalalignment': 'center',
'verticalalignment': 'center',
'fontsize': 14,
'rotation': 0,
}
},
{
'args': (4.5, -0.4, '$l_{b,k,t}$'),
'kwargs': {
'horizontalalignment': 'center',
'verticalalignment': 'center',
'fontsize': 20,
}
},
{
'args': (-0.5, 4.5, '$r_{b,k,t}$'),
'kwargs': {
'horizontalalignment': 'center',
'verticalalignment': 'center',
'fontsize': 20,
'rotation': 0,
}
},
{
'args': (-0.3, -0.3, '$0$'),
'kwargs': {
'horizontalalignment': 'center',
'verticalalignment': 'center',
'fontsize': 14,
'rotation': 0,
}
}
],
'save_path': '../plots/latency_vs_rewards.jpg',
}
display_rewards_(**parameters)
| mit |
MatthieuBizien/scikit-learn | sklearn/utils/estimator_checks.py | 17 | 56571 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import NMF, ProjectedGradientNMF
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
# Estimators with deprecated transform methods. Should be removed in 0.19 when
# _LearntSelectorMixin is removed.
DEPRECATED_TRANSFORM = [
"RandomForestClassifier", "RandomForestRegressor", "ExtraTreesClassifier",
"ExtraTreesRegressor", "DecisionTreeClassifier",
"DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor",
"LinearSVC", "SGDClassifier", "SGDRegressor", "Perceptron",
"LogisticRegression", "LogisticRegressionCV",
"GradientBoostingClassifier", "GradientBoostingRegressor"]
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def check_supervised_y_no_nan(name, Estimator):
# Checks that the Estimator targets are not NaN.
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(name, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
Estimator().fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised warning as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
if name not in DEPRECATED_TRANSFORM:
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check. Estimator is a class object (not an instance).
"""
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
if isinstance(estimator, NMF):
if not isinstance(estimator, ProjectedGradientNMF):
estimator.set_params(solver='cd')
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
if name in DEPRECATED_TRANSFORM:
funcs = ["score"]
else:
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
if name in DEPRECATED_TRANSFORM:
funcs = ["fit", "score", "partial_fit", "fit_predict"]
else:
funcs = [
"fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
if name in DEPRECATED_TRANSFORM:
methods = ["predict", "decision_function", "predict_proba"]
else:
methods = [
"predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = ("0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
if name in DEPRECATED_TRANSFORM:
check_methods = ["predict", "decision_function", "predict_proba"]
else:
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
if not hasattr(alg, 'partial_fit'):
# check again as for mlp this depends on algorithm
return
set_testing_parameters(alg)
try:
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
except NotImplementedError:
return
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self'
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in name:
return np.reshape(y, (-1, 1))
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteration
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = np.reshape(y_, (-1, 1))
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
# HuberRegressor depends on scipy.optimize.fmin_l_bfgs_b
# which doesn't return a n_iter for old versions of SciPy.
if not (name == 'HuberRegressor' and estimator.n_iter_ is None):
assert_greater_equal(estimator.n_iter_, 1)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater_equal(iter_, 1)
else:
assert_greater_equal(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
def check_classifiers_regression_target(name, Estimator):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = Estimator()
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
| bsd-3-clause |
Insight-book/data-science-from-scratch | scratch/working_with_data.py | 3 | 18501 | from typing import List, Dict
from collections import Counter
import math
import matplotlib.pyplot as plt
def bucketize(point: float, bucket_size: float) -> float:
"""Floor the point to the next lower multiple of bucket_size"""
return bucket_size * math.floor(point / bucket_size)
def make_histogram(points: List[float], bucket_size: float) -> Dict[float, int]:
"""Buckets the points and counts how many in each bucket"""
return Counter(bucketize(point, bucket_size) for point in points)
def plot_histogram(points: List[float], bucket_size: float, title: str = ""):
histogram = make_histogram(points, bucket_size)
plt.bar(histogram.keys(), histogram.values(), width=bucket_size)
plt.title(title)
import random
from scratch.probability import inverse_normal_cdf
def random_normal() -> float:
"""Returns a random draw from a standard normal distribution"""
return inverse_normal_cdf(random.random())
xs = [random_normal() for _ in range(1000)]
ys1 = [ x + random_normal() / 2 for x in xs]
ys2 = [-x + random_normal() / 2 for x in xs]
plt.scatter(xs, ys1, marker='.', color='black', label='ys1')
plt.scatter(xs, ys2, marker='.', color='gray', label='ys2')
plt.xlabel('xs')
plt.ylabel('ys')
plt.legend(loc=9)
plt.title("Very Different Joint Distributions")
# plt.show()
plt.savefig('im/working_scatter.png')
plt.gca().clear()
from scratch.statistics import correlation
assert 0.89 < correlation(xs, ys1) < 0.91
assert -0.91 < correlation(xs, ys2) < -0.89
from scratch.linear_algebra import Matrix, Vector, make_matrix
def correlation_matrix(data: List[Vector]) -> Matrix:
"""
Returns the len(data) x len(data) matrix whose (i, j)-th entry
is the correlation between data[i] and data[j]
"""
def correlation_ij(i: int, j: int) -> float:
return correlation(data[i], data[j])
return make_matrix(len(data), len(data), correlation_ij)
vectors = [xs, ys1, ys2]
assert correlation_matrix(vectors) == [
[correlation(xs, xs), correlation(xs, ys1), correlation(xs, ys2)],
[correlation(ys1, xs), correlation(ys1, ys1), correlation(ys1, ys2)],
[correlation(ys2, xs), correlation(ys2, ys1), correlation(ys2, ys2)],
]
import datetime
stock_price = {'closing_price': 102.06,
'date': datetime.date(2014, 8, 29),
'symbol': 'AAPL'}
# oops, typo
stock_price['cosing_price'] = 103.06
prices: Dict[datetime.date, float] = {}
from typing import NamedTuple
class StockPrice(NamedTuple):
symbol: str
date: datetime.date
closing_price: float
def is_high_tech(self) -> bool:
"""It's a class, so we can add methods too"""
return self.symbol in ['MSFT', 'GOOG', 'FB', 'AMZN', 'AAPL']
price = StockPrice('MSFT', datetime.date(2018, 12, 14), 106.03)
assert price.symbol == 'MSFT'
assert price.closing_price == 106.03
assert price.is_high_tech()
from dateutil.parser import parse
def parse_row(row: List[str]) -> StockPrice:
symbol, date, closing_price = row
return StockPrice(symbol=symbol,
date=parse(date).date(),
closing_price=float(closing_price))
# Now test our function
stock = parse_row(["MSFT", "2018-12-14", "106.03"])
assert stock.symbol == "MSFT"
assert stock.date == datetime.date(2018, 12, 14)
assert stock.closing_price == 106.03
from typing import Optional
import re
def try_parse_row(row: List[str]) -> Optional[StockPrice]:
symbol, date_, closing_price_ = row
# Stock symbol should be all capital letters
if not re.match(r"^[A-Z]+$", symbol):
return None
try:
date = parse(date_).date()
except ValueError:
return None
try:
closing_price = float(closing_price_)
except ValueError:
return None
return StockPrice(symbol, date, closing_price)
# Should return None for errors
assert try_parse_row(["MSFT0", "2018-12-14", "106.03"]) is None
assert try_parse_row(["MSFT", "2018-12--14", "106.03"]) is None
assert try_parse_row(["MSFT", "2018-12-14", "x"]) is None
# But should return same as before if data is good.
assert try_parse_row(["MSFT", "2018-12-14", "106.03"]) == stock
from dateutil.parser import parse
import csv
with open("stocks.csv", "r") as f:
reader = csv.DictReader(f)
rows = [[row['Symbol'], row['Date'], row['Close']]
for row in reader]
# skip header
maybe_data = [try_parse_row(row) for row in rows]
# Make sure they all loaded successfully:
assert maybe_data
assert all(sp is not None for sp in maybe_data)
# This is just to make mypy happy
data = [sp for sp in maybe_data if sp is not None]
max_aapl_price = max(stock_price.closing_price
for stock_price in data
if stock_price.symbol == "AAPL")
from collections import defaultdict
max_prices: Dict[str, float] = defaultdict(lambda: float('-inf'))
for sp in data:
symbol, closing_price = sp.symbol, sp.closing_price
if closing_price > max_prices[symbol]:
max_prices[symbol] = closing_price
from typing import List
from collections import defaultdict
# Collect the prices by symbol
prices: Dict[str, List[StockPrice]] = defaultdict(list)
for sp in data:
prices[sp.symbol].append(sp)
# Order the prices by date
prices = {symbol: sorted(symbol_prices)
for symbol, symbol_prices in prices.items()}
def pct_change(yesterday: StockPrice, today: StockPrice) -> float:
return today.closing_price / yesterday.closing_price - 1
class DailyChange(NamedTuple):
symbol: str
date: datetime.date
pct_change: float
def day_over_day_changes(prices: List[StockPrice]) -> List[DailyChange]:
"""
Assumes prices are for one stock and are in order
"""
return [DailyChange(symbol=today.symbol,
date=today.date,
pct_change=pct_change(yesterday, today))
for yesterday, today in zip(prices, prices[1:])]
all_changes = [change
for symbol_prices in prices.values()
for change in day_over_day_changes(symbol_prices)]
max_change = max(all_changes, key=lambda change: change.pct_change)
# see, e.g. http://news.cnet.com/2100-1001-202143.html
assert max_change.symbol == 'AAPL'
assert max_change.date == datetime.date(1997, 8, 6)
assert 0.33 < max_change.pct_change < 0.34
min_change = min(all_changes, key=lambda change: change.pct_change)
# see, e.g. http://money.cnn.com/2000/09/29/markets/techwrap/
assert min_change.symbol == 'AAPL'
assert min_change.date == datetime.date(2000, 9, 29)
assert -0.52 < min_change.pct_change < -0.51
changes_by_month: List[DailyChange] = {month: [] for month in range(1, 13)}
for change in all_changes:
changes_by_month[change.date.month].append(change)
avg_daily_change = {
month: sum(change.pct_change for change in changes) / len(changes)
for month, changes in changes_by_month.items()
}
# October is the best month
assert avg_daily_change[10] == max(avg_daily_change.values())
from scratch.linear_algebra import distance
a_to_b = distance([63, 150], [67, 160]) # 10.77
a_to_c = distance([63, 150], [70, 171]) # 22.14
b_to_c = distance([67, 160], [70, 171]) # 11.40
a_to_b = distance([160, 150], [170.2, 160]) # 14.28
a_to_c = distance([160, 150], [177.8, 171]) # 27.53
b_to_c = distance([170.2, 160], [177.8, 171]) # 13.37
from typing import Tuple
from scratch.linear_algebra import vector_mean
from scratch.statistics import standard_deviation
def scale(data: List[Vector]) -> Tuple[Vector, Vector]:
"""returns the means and standard deviations for each position"""
dim = len(data[0])
means = vector_mean(data)
stdevs = [standard_deviation([vector[i] for vector in data])
for i in range(dim)]
return means, stdevs
vectors = [[-3, -1, 1], [-1, 0, 1], [1, 1, 1]]
means, stdevs = scale(vectors)
assert means == [-1, 0, 1]
assert stdevs == [2, 1, 0]
def rescale(data: List[Vector]) -> List[Vector]:
"""
Rescales the input data so that each position has
mean 0 and standard deviation 1. (Leaves a position
as is if its standard deviation is 0.)
"""
dim = len(data[0])
means, stdevs = scale(data)
# Make a copy of each vector
rescaled = [v[:] for v in data]
for v in rescaled:
for i in range(dim):
if stdevs[i] > 0:
v[i] = (v[i] - means[i]) / stdevs[i]
return rescaled
means, stdevs = scale(rescale(vectors))
assert means == [0, 0, 1]
assert stdevs == [1, 1, 0]
import tqdm
pca_data = [
[20.9666776351559,-13.1138080189357],
[22.7719907680008,-19.8890894944696],
[25.6687103160153,-11.9956004517219],
[18.0019794950564,-18.1989191165133],
[21.3967402102156,-10.8893126308196],
[0.443696899177716,-19.7221132386308],
[29.9198322142127,-14.0958668502427],
[19.0805843080126,-13.7888747608312],
[16.4685063521314,-11.2612927034291],
[21.4597664701884,-12.4740034586705],
[3.87655283720532,-17.575162461771],
[34.5713920556787,-10.705185165378],
[13.3732115747722,-16.7270274494424],
[20.7281704141919,-8.81165591556553],
[24.839851437942,-12.1240962157419],
[20.3019544741252,-12.8725060780898],
[21.9021426929599,-17.3225432396452],
[23.2285885715486,-12.2676568419045],
[28.5749111681851,-13.2616470619453],
[29.2957424128701,-14.6299928678996],
[15.2495527798625,-18.4649714274207],
[26.5567257400476,-9.19794350561966],
[30.1934232346361,-12.6272709845971],
[36.8267446011057,-7.25409849336718],
[32.157416823084,-10.4729534347553],
[5.85964365291694,-22.6573731626132],
[25.7426190674693,-14.8055803854566],
[16.237602636139,-16.5920595763719],
[14.7408608850568,-20.0537715298403],
[6.85907008242544,-18.3965586884781],
[26.5918329233128,-8.92664811750842],
[-11.2216019958228,-27.0519081982856],
[8.93593745011035,-20.8261235122575],
[24.4481258671796,-18.0324012215159],
[2.82048515404903,-22.4208457598703],
[30.8803004755948,-11.455358009593],
[15.4586738236098,-11.1242825084309],
[28.5332537090494,-14.7898744423126],
[40.4830293441052,-2.41946428697183],
[15.7563759125684,-13.5771266003795],
[19.3635588851727,-20.6224770470434],
[13.4212840786467,-19.0238227375766],
[7.77570680426702,-16.6385739839089],
[21.4865983854408,-15.290799330002],
[12.6392705930724,-23.6433305964301],
[12.4746151388128,-17.9720169566614],
[23.4572410437998,-14.602080545086],
[13.6878189833565,-18.9687408182414],
[15.4077465943441,-14.5352487124086],
[20.3356581548895,-10.0883159703702],
[20.7093833689359,-12.6939091236766],
[11.1032293684441,-14.1383848928755],
[17.5048321498308,-9.2338593361801],
[16.3303688220188,-15.1054735529158],
[26.6929062710726,-13.306030567991],
[34.4985678099711,-9.86199941278607],
[39.1374291499406,-10.5621430853401],
[21.9088956482146,-9.95198845621849],
[22.2367457578087,-17.2200123442707],
[10.0032784145577,-19.3557700653426],
[14.045833906665,-15.871937521131],
[15.5640911917607,-18.3396956121887],
[24.4771926581586,-14.8715313479137],
[26.533415556629,-14.693883922494],
[12.8722580202544,-21.2750596021509],
[24.4768291376862,-15.9592080959207],
[18.2230748567433,-14.6541444069985],
[4.1902148367447,-20.6144032528762],
[12.4332594022086,-16.6079789231489],
[20.5483758651873,-18.8512560786321],
[17.8180560451358,-12.5451990696752],
[11.0071081078049,-20.3938092335862],
[8.30560561422449,-22.9503944138682],
[33.9857852657284,-4.8371294974382],
[17.4376502239652,-14.5095976075022],
[29.0379635148943,-14.8461553663227],
[29.1344666599319,-7.70862921632672],
[32.9730697624544,-15.5839178785654],
[13.4211493998212,-20.150199857584],
[11.380538260355,-12.8619410359766],
[28.672631499186,-8.51866271785711],
[16.4296061111902,-23.3326051279759],
[25.7168371582585,-13.8899296143829],
[13.3185154732595,-17.8959160024249],
[3.60832478605376,-25.4023343597712],
[39.5445949652652,-11.466377647931],
[25.1693484426101,-12.2752652925707],
[25.2884257196471,-7.06710309184533],
[6.77665715793125,-22.3947299635571],
[20.1844223778907,-16.0427471125407],
[25.5506805272535,-9.33856532270204],
[25.1495682602477,-7.17350567090738],
[15.6978431006492,-17.5979197162642],
[37.42780451491,-10.843637288504],
[22.974620174842,-10.6171162611686],
[34.6327117468934,-9.26182440487384],
[34.7042513789061,-6.9630753351114],
[15.6563953929008,-17.2196961218915],
[25.2049825789225,-14.1592086208169]
]
from scratch.linear_algebra import subtract
def de_mean(data: List[Vector]) -> List[Vector]:
"""Recenters the data to have mean 0 in every dimension"""
mean = vector_mean(data)
return [subtract(vector, mean) for vector in data]
from scratch.linear_algebra import magnitude
def direction(w: Vector) -> Vector:
mag = magnitude(w)
return [w_i / mag for w_i in w]
from scratch.linear_algebra import dot
def directional_variance(data: List[Vector], w: Vector) -> float:
"""
Returns the variance of x in the direction of w
"""
w_dir = direction(w)
return sum(dot(v, w_dir) ** 2 for v in data)
def directional_variance_gradient(data: List[Vector], w: Vector) -> Vector:
"""
The gradient of directional variance with respect to w
"""
w_dir = direction(w)
return [sum(2 * dot(v, w_dir) * v[i] for v in data)
for i in range(len(w))]
from scratch.gradient_descent import gradient_step
def first_principal_component(data: List[Vector],
n: int = 100,
step_size: float = 0.1) -> Vector:
# Start with a random guess
guess = [1.0 for _ in data[0]]
with tqdm.trange(n) as t:
for _ in t:
dv = directional_variance(data, guess)
gradient = directional_variance_gradient(data, guess)
guess = gradient_step(guess, gradient, step_size)
t.set_description(f"dv: {dv:.3f}")
return direction(guess)
from scratch.linear_algebra import scalar_multiply
def project(v: Vector, w: Vector) -> Vector:
"""return the projection of v onto the direction w"""
projection_length = dot(v, w)
return scalar_multiply(projection_length, w)
from scratch.linear_algebra import subtract
def remove_projection_from_vector(v: Vector, w: Vector) -> Vector:
"""projects v onto w and subtracts the result from v"""
return subtract(v, project(v, w))
def remove_projection(data: List[Vector], w: Vector) -> List[Vector]:
return [remove_projection_from_vector(v, w) for v in data]
def pca(data: List[Vector], num_components: int) -> List[Vector]:
components: List[Vector] = []
for _ in range(num_components):
component = first_principal_component(data)
components.append(component)
data = remove_projection(data, component)
return components
def transform_vector(v: Vector, components: List[Vector]) -> Vector:
return [dot(v, w) for w in components]
def transform(data: List[Vector], components: List[Vector]) -> List[Vector]:
return [transform_vector(v, components) for v in data]
def main():
# I don't know why this is necessary
plt.gca().clear()
plt.close()
import random
from scratch.probability import inverse_normal_cdf
random.seed(0)
# uniform between -100 and 100
uniform = [200 * random.random() - 100 for _ in range(10000)]
# normal distribution with mean 0, standard deviation 57
normal = [57 * inverse_normal_cdf(random.random())
for _ in range(10000)]
plot_histogram(uniform, 10, "Uniform Histogram")
plt.savefig('im/working_histogram_uniform.png')
plt.gca().clear()
plt.close()
plot_histogram(normal, 10, "Normal Histogram")
plt.savefig('im/working_histogram_normal.png')
plt.gca().clear()
from scratch.statistics import correlation
print(correlation(xs, ys1)) # about 0.9
print(correlation(xs, ys2)) # about -0.9
from typing import List
# Just some random data to show off correlation scatterplots
num_points = 100
def random_row() -> List[float]:
row = [0.0, 0, 0, 0]
row[0] = random_normal()
row[1] = -5 * row[0] + random_normal()
row[2] = row[0] + row[1] + 5 * random_normal()
row[3] = 6 if row[2] > -2 else 0
return row
random.seed(0)
# each row has 4 points, but really we want the columns
corr_rows = [random_row() for _ in range(num_points)]
corr_data = [list(col) for col in zip(*corr_rows)]
# corr_data is a list of four 100-d vectors
num_vectors = len(corr_data)
fig, ax = plt.subplots(num_vectors, num_vectors)
for i in range(num_vectors):
for j in range(num_vectors):
# Scatter column_j on the x-axis vs column_i on the y-axis,
if i != j: ax[i][j].scatter(corr_data[j], corr_data[i])
# unless i == j, in which case show the series name.
else: ax[i][j].annotate("series " + str(i), (0.5, 0.5),
xycoords='axes fraction',
ha="center", va="center")
# Then hide axis labels except left and bottom charts
if i < num_vectors - 1: ax[i][j].xaxis.set_visible(False)
if j > 0: ax[i][j].yaxis.set_visible(False)
# Fix the bottom right and top left axis labels, which are wrong because
# their charts only have text in them
ax[-1][-1].set_xlim(ax[0][-1].get_xlim())
ax[0][0].set_ylim(ax[0][1].get_ylim())
# plt.show()
plt.savefig('im/working_scatterplot_matrix.png')
plt.gca().clear()
plt.close()
plt.clf()
import csv
data: List[StockPrice] = []
with open("comma_delimited_stock_prices.csv") as f:
reader = csv.reader(f)
for row in reader:
maybe_stock = try_parse_row(row)
if maybe_stock is None:
print(f"skipping invalid row: {row}")
else:
data.append(maybe_stock)
from typing import List
def primes_up_to(n: int) -> List[int]:
primes = [2]
with tqdm.trange(3, n) as t:
for i in t:
# i is prime if no smaller prime divides it.
i_is_prime = not any(i % p == 0 for p in primes)
if i_is_prime:
primes.append(i)
t.set_description(f"{len(primes)} primes")
return primes
my_primes = primes_up_to(100_000)
de_meaned = de_mean(pca_data)
fpc = first_principal_component(de_meaned)
assert 0.923 < fpc[0] < 0.925
assert 0.382 < fpc[1] < 0.384
if __name__ == "__main__": main()
| unlicense |
geektoni/Influenza-Like-Illness-Predictor | data_analysis/generate_weekly_data.py | 1 | 3881 | #!/usr/bin/env python
# Given a complete year files with data in the form (page, week, visits)
# this script will generate a convenient csv file which will store for
# each page and for each years's week the total number of visits.
#
# Written by Giovanni De Toni (2017)
# Email: giovanni.det at gmail.com
"""Generate year files with page visits
Usage:
generate_weekly_data.py <input> <output> [-c <columns>]
Options:
-c specify custom columns name.
-h, --help
"""
import fileinput
import requests
import json
import pandas as pd
import numpy as np
import datetime
from tqdm import *
from docopt import docopt
# Parse the command line
arguments = docopt(__doc__)
# Set up an empty dictionary
all_data={}
# Set up the wikipedia API link to check when a page was created
wiki_url = "https://en.wikipedia.org/w/api.php?action=query&prop=revisions&rvlimit=1&rvprop=timestamp&rvdir=newer&format=json&titles="
# If the columns are set, add them to the dataframe
columns_df = []
with open(arguments["<columns>"], "r") as f:
for line in f:
c_name = line.replace("\n", "").replace("\\", "")
columns_df.append(c_name)
all_data[c_name]=[0 for x in range(53)]
# Future index
index_year=[];
# Read from file
with open(arguments["<input>"], "r") as f:
for line in f:
# Split the line given
# 0: page name
# 1: date-hour
# 2: visits count
# 3: page size
total = line.split(",")
# Create the page only with specified columns
found = False
for p in columns_df:
if (p == total[0].replace("\\", "")):
found = True
break
if (found):
# Get date-hour pair
# 0: date
# 1: hour
date = total[1].split("-")
# Generate year month and day.
year = date[0][0:4]
month = date[0][4:6]
day = date[0][6:8]
# Get week number
week_number = datetime.date(int(year), int(month), int(day)).isocalendar()[1]
# Set up an empty list if the key
# is null
if all_data.get(total[0].replace("\\", ""), []) == []:
all_data[total[0].replace("\\", "")] = [0 for x in range(53)]
# Sum the visits
try:
all_data[total[0].replace("\\", "")][int(week_number)-1] += int(total[2]);
except ValueError:
print("[*] Catch value error here: {}".format(total[2]))
for i in range(1, 54):
if i<10:
number="0"+str(i)
else:
number=str(i)
index_year.append(year+"-"+number)
# Generate a pandas dataframe with all the data
print(index_year)
df = pd.DataFrame(all_data);
df = df.set_index([index_year]);
# Go through all data and set column value to NaN if the page was created
# year after
for c in tqdm(df.columns):
df[c] = df[c].astype(np.float64)
r = requests.get(wiki_url+str(c))
wiki_data = json.loads(r.text)
for i in range(0, 53):
# Generate week number original
week_number = df.index[i]
year_orig = week_number.split("-")[0]
week_orig = week_number.split("-")[1]
for key, value in wiki_data["query"]["pages"].items():
if key != u'-1':
week_str = value["revisions"][0]["timestamp"]
# Generate year month and day.
date = week_str.split("T")[0].split("-");
year = date[0]
month = date[1]
day = date[2]
week = datetime.date(int(year), int(month), int(day)).isocalendar()[1]
if (int(year) > int(year_orig) or int(week) > int(week_orig)):
df.loc[week_number, c] = np.NaN
# Print the dataframe to show the result
print(df)
# Save it to file
df.to_csv(arguments["<output>"], index_label="Week")
| mit |
amolkahat/pandas | pandas/tests/test_register_accessor.py | 6 | 2263 | import contextlib
import pytest
import pandas as pd
import pandas.util.testing as tm
@contextlib.contextmanager
def ensure_removed(obj, attr):
"""Ensure that an attribute added to 'obj' during the test is
removed when we're done"""
try:
yield
finally:
try:
delattr(obj, attr)
except AttributeError:
pass
obj._accessors.discard(attr)
class MyAccessor(object):
def __init__(self, obj):
self.obj = obj
self.item = 'item'
@property
def prop(self):
return self.item
def method(self):
return self.item
@pytest.mark.parametrize('obj, registrar', [
(pd.Series, pd.api.extensions.register_series_accessor),
(pd.DataFrame, pd.api.extensions.register_dataframe_accessor),
(pd.Index, pd.api.extensions.register_index_accessor)
])
def test_register(obj, registrar):
with ensure_removed(obj, 'mine'):
before = set(dir(obj))
registrar('mine')(MyAccessor)
assert obj([]).mine.prop == 'item'
after = set(dir(obj))
assert (before ^ after) == {'mine'}
assert 'mine' in obj._accessors
def test_accessor_works():
with ensure_removed(pd.Series, 'mine'):
pd.api.extensions.register_series_accessor('mine')(MyAccessor)
s = pd.Series([1, 2])
assert s.mine.obj is s
assert s.mine.prop == 'item'
assert s.mine.method() == 'item'
def test_overwrite_warns():
# Need to restore mean
mean = pd.Series.mean
try:
with tm.assert_produces_warning(UserWarning) as w:
pd.api.extensions.register_series_accessor('mean')(MyAccessor)
s = pd.Series([1, 2])
assert s.mean.prop == 'item'
msg = str(w[0].message)
assert 'mean' in msg
assert 'MyAccessor' in msg
assert 'Series' in msg
finally:
pd.Series.mean = mean
def test_raises_attribute_error():
with ensure_removed(pd.Series, 'bad'):
@pd.api.extensions.register_series_accessor("bad")
class Bad(object):
def __init__(self, data):
raise AttributeError("whoops")
with tm.assert_raises_regex(AttributeError, "whoops"):
pd.Series([]).bad
| bsd-3-clause |
matthewalbani/scipy | scipy/special/basic.py | 9 | 71091 | #
# Author: Travis Oliphant, 2002
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
import math
from scipy._lib.six import xrange
from numpy import (pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt,
where, mgrid, sin, place, issubdtype, extract,
less, inexact, nan, zeros, atleast_1d, sinc)
from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma,
psi, _zeta, hankel1, hankel2, yv, kv, _gammaln,
ndtri, errprint, poch, binom, hyp0f1)
from . import specfun
from . import orthogonal
from ._comb import _comb_int
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk',
'erf_zeros', 'erfcinv', 'erfinv', 'errprint', 'euler', 'factorial',
'factorialk', 'factorial2', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta',
'SpecialFunctionWarning']
class SpecialFunctionWarning(Warning):
"""Warning that can be issued with ``errprint(True)``"""
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
def diric(x, n):
"""Periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x) = sin(x * n/2) / (n * sin(x / 2)),
where `n` is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8, 8));
>>> for idx, n in enumerate([2, 3, 4, 9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
The following example demonstrates that `diric` gives the magnitudes
(modulo the sign and scaling) of the Fourier coefficients of a
rectangular pulse.
Suppress output of values that are effectively 0:
>>> np.set_printoptions(suppress=True)
Create a signal `x` of length `m` with `k` ones:
>>> m = 8
>>> k = 3
>>> x = np.zeros(m)
>>> x[:k] = 1
Use the FFT to compute the Fourier transform of `x`, and
inspect the magnitudes of the coefficients:
>>> np.abs(np.fft.fft(x))
array([ 3. , 2.41421356, 1. , 0.41421356, 1. ,
0.41421356, 1. , 2.41421356])
Now find the same values (up to sign) using `diric`. We multiply
by `k` to account for the different scaling conventions of
`numpy.fft.fft` and `diric`:
>>> theta = np.linspace(0, 2*np.pi, m, endpoint=False)
>>> k * special.diric(theta, k)
array([ 3. , 2.41421356, 1. , -0.41421356, -1. ,
-0.41421356, 1. , 2.41421356])
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def gammaln(x):
"""
Logarithm of the absolute value of the Gamma function for real inputs.
Parameters
----------
x : array-like
Values on the real line at which to compute ``gammaln``
Returns
-------
gammaln : ndarray
Values of ``gammaln`` at x.
See Also
--------
gammasgn : sign of the gamma function
loggamma : principal branch of the logarithm of the gamma function
Notes
-----
When used in conjunction with `gammasgn`, this function is useful
for working in logspace on the real axis without having to deal with
complex numbers, via the relation ``exp(gammaln(x)) = gammasgn(x)*gamma(x)``.
Note that `gammaln` currently accepts complex-valued inputs, but it is not
the same function as for real-valued inputs, and the branch is not
well-defined --- using `gammaln` with complex is deprecated and will be
disallowed in future Scipy versions.
For complex-valued log-gamma, use `loggamma` instead of `gammaln`.
"""
if np.iscomplexobj(x):
warnings.warn(("Use of gammaln for complex arguments is "
"deprecated as of scipy 0.18.0. Use "
"scipy.special.loggamma instead."),
DeprecationWarning)
return _gammaln(x)
def jnjnp_zeros(nt):
"""Compute zeros of integer-order Bessel functions Jn and Jn'.
Results are arranged in order of the magnitudes of the zeros.
Parameters
----------
nt : int
Number (<=1200) of zeros to compute
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n, m, t, zo = specfun.jdzo(nt)
return zo[1:nt+1], n[:nt], m[:nt], t[:nt]
def jnyn_zeros(n, nt):
"""Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
Returns 4 arrays of length `nt`, corresponding to the first `nt` zeros of
Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively.
Parameters
----------
n : int
Order of the Bessel functions
nt : int
Number (<=1200) of zeros to compute
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n), nt)
def jn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Jn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[0]
def jnp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Jn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[1]
def yn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Yn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[2]
def ynp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Yn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[3]
def y0_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z0n : ndarray
Location of nth zero of Y0(z)
y0pz0n : ndarray
Value of derivative Y0'(z0) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1n : ndarray
Location of nth zero of Y1(z)
y1pz1n : ndarray
Value of derivative Y1'(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1p_zeros(nt, complex=False):
"""Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1pn : ndarray
Location of nth zero of Y1'(z)
y1z1pn : ndarray
Value of derivative Y1(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = not complex
return specfun.cyzo(nt, kf, kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1
# L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
v = asarray(v)
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
bessel_diff_formula = np.deprecate(_bessel_diff_formula,
message="bessel_diff_formula is a private function, do not use it!")
def jvp(v, z, n=1):
"""Compute nth derivative of Bessel function Jv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v, z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
def yvp(v, z, n=1):
"""Compute nth derivative of Bessel function Yv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v, z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
def kvp(v, z, n=1):
"""Compute nth derivative of real-order modified Bessel function Kv(z)
Kv(z) is the modified Bessel function of the second kind.
Derivative is calculated with respect to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int
Order of derivative. Default is first derivative.
Returns
-------
out : ndarray
The results
Examples
--------
Calculate multiple values at order 5:
>>> from scipy.special import kvp
>>> kvp(5, (1, 2, 3+5j))
array([-1849.0354+0.j , -25.7735+0.j , -0.0307+0.0875j])
Calculate for a single value at multiple orders:
>>> kvp((4, 4.5, 5), 1)
array([ -184.0309, -568.9585, -1849.0354])
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.29.E5
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v, z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v, z, n=1):
"""Compute nth derivative of modified Bessel function Iv(z) with respect
to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.29.E5
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v, z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v, z, n=1):
"""Compute nth derivative of Hankel function H1v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
def h2vp(v, z, n=1):
"""Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
@np.deprecate(message="scipy.special.sph_jn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_jn instead. "
"Note that the new function has a different signature.")
def sph_jn(n, z):
"""Compute spherical Bessel function jn(z) and derivative.
This function computes the value and first derivative of jn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of jn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
See also
--------
spherical_jn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)]
@np.deprecate(message="scipy.special.sph_yn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_yn instead. "
"Note that the new function has a different signature.")
def sph_yn(n, z):
"""Compute spherical Bessel function yn(z) and derivative.
This function computes the value and first derivative of yn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of yn to compute
z : complex
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
See also
--------
spherical_yn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
return yn[:(n+1)], ynp[:(n+1)]
@np.deprecate(message="scipy.special.sph_jnyn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_jn and "
"scipy.special.spherical_yn instead. "
"Note that the new function has a different signature.")
def sph_jnyn(n, z):
"""Compute spherical Bessel functions jn(z) and yn(z) and derivatives.
This function computes the value and first derivative of jn(z) and yn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of jn and yn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
See also
--------
spherical_jn
spherical_yn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)], yn[:(n+1)], ynp[:(n+1)]
@np.deprecate(message="scipy.special.sph_in is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_in instead. "
"Note that the new function has a different signature.")
def sph_in(n, z):
"""Compute spherical Bessel function in(z) and derivative.
This function computes the value and first derivative of in(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of in to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
See also
--------
spherical_in
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
return In[:(n+1)], Inp[:(n+1)]
@np.deprecate(message="scipy.special.sph_kn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_kn instead. "
"Note that the new function has a different signature.")
def sph_kn(n, z):
"""Compute spherical Bessel function kn(z) and derivative.
This function computes the value and first derivative of kn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of kn to compute
z : complex
Argument at which to evaluate
Returns
-------
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
See also
--------
spherical_kn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, kn, knp = specfun.sphk(n1, z)
return kn[:(n+1)], knp[:(n+1)]
@np.deprecate(message="scipy.special.sph_inkn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_in and "
"scipy.special.spherical_kn instead. "
"Note that the new function has a different signature.")
def sph_inkn(n, z):
"""Compute spherical Bessel functions in(z), kn(z), and derivatives.
This function computes the value and first derivative of in(z) and kn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of in and kn to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
See also
--------
spherical_in
spherical_kn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
nm, kn, knp = specfun.sphk(n1, z)
return In[:(n+1)], Inp[:(n+1)], kn[:(n+1)], knp[:(n+1)]
def riccati_jn(n, x):
r"""Compute Ricatti-Bessel function of the first kind and its derivative.
The Ricatti-Bessel function of the first kind is defined as :math:`x
j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first
kind of order :math:`n`.
This function computes the value and first derivative of the
Ricatti-Bessel function for all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(x), ..., jn(x)
jnp : ndarray
First derivative j0'(x), ..., jn'(x)
Notes
-----
The computation is carried out via backward recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rctj(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def riccati_yn(n, x):
"""Compute Ricatti-Bessel function of the second kind and its derivative.
The Ricatti-Bessel function of the second kind is defined as :math:`x
y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second
kind of order :math:`n`.
This function computes the value and first derivative of the function for
all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(x), ..., yn(x)
ynp : ndarray
First derivative y0'(x), ..., yn'(x)
Notes
-----
The computation is carried out via ascending recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rcty(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def erfinv(y):
"""Inverse function for erf.
"""
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
"""Inverse function for erfc.
"""
return -ndtri(0.5*y)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of error function erf(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of cosine Fresnel integral C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1, nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of sine Fresnel integral S(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt), specfun.fcszo(1, nt)
def assoc_laguerre(x, n, k=0.0):
"""Compute the generalized (associated) Laguerre polynomial of degree n and order k.
The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**k`` with ``k > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function n.
This is the nth derivative of the digamma (psi) function.
Parameters
----------
n : array_like of int
The order of the derivative of `psi`.
x : array_like
Where to evaluate the polygamma function.
Returns
-------
polygamma : ndarray
The result.
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the even solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
.. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Ak : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/28.4#i
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m, q)
fc = specfun.fcoef(kd, m, q, a)
return fc[:km]
def mathieu_odd_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the odd solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
.. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd
input m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Bk : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m, q)
fc = specfun.fcoef(kd, m, q, b)
return fc[:km]
def lpmn(m, n, z):
"""Associated Legendre function of the first kind, Pmn(z).
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
sv = errprint(0)
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p, pd = specfun.lpmn(mp, n, z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind, Pmn(z).
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int, optional
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not(type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
sv = errprint(0)
if type == 2:
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p, pd = specfun.clpmn(mp, n, real(z), imag(z), type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def lqmn(m, n, z):
"""Associated Legendre function of the second kind, Qmn(z).
Computes the associated Legendre function of the second kind of order m and
degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : complex
Input value.
Returns
-------
Qmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Qmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1, m)
nn = max(1, n)
if iscomplex(z):
q, qd = specfun.clqmn(mm, nn, z)
else:
q, qd = specfun.lqmn(mm, nn, z)
return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)]
def bernoulli(n):
"""Bernoulli numbers B0..Bn (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Euler numbers E0..En (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n, z):
"""Legendre functions of the first kind, Pn(z).
Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn, pd = specfun.clpn(n1, z)
else:
pn, pd = specfun.lpn(n1, z)
return pn[:(n+1)], pd[:(n+1)]
def lqn(n, z):
"""Legendre functions of the second kind, Qn(z).
Compute sequence of Legendre functions of the second kind, Qn(z) and
derivatives for all degrees from 0 to n (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn, qd = specfun.clqn(n1, z)
else:
qn, qd = specfun.lqnb(n1, z)
return qn[:(n+1)], qd[:(n+1)]
def ai_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Ai and its derivative.
Computes the first `nt` zeros, `a`, of the Airy function Ai(x);
first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x);
the corresponding values Ai(a');
and the corresponding values Ai'(a).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
a : ndarray
First `nt` zeros of Ai(x)
ap : ndarray
First `nt` zeros of Ai'(x)
ai : ndarray
Values of Ai(x) evaluated at first `nt` zeros of Ai'(x)
aip : ndarray
Values of Ai'(x) evaluated at first `nt` zeros of Ai(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def bi_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Bi and its derivative.
Computes the first `nt` zeros, b, of the Airy function Bi(x);
first `nt` zeros, b', of the derivative of the Airy function Bi'(x);
the corresponding values Bi(b');
and the corresponding values Bi'(b).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
b : ndarray
First `nt` zeros of Bi(x)
bp : ndarray
First `nt` zeros of Bi'(x)
bi : ndarray
Values of Bi(x) evaluated at first `nt` zeros of Bi'(x)
bip : ndarray
Values of Bi'(x) evaluated at first `nt` zeros of Bi(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def lmbda(v, x):
r"""Jahnke-Emden Lambda function, Lambdav(x).
This function is defined as [2]_,
.. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v},
where :math:`\Gamma` is the gamma function and :math:`J_v` is the
Bessel function of the first kind.
Parameters
----------
v : float
Order of the Lambda function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
vl : ndarray
Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dl : ndarray
Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and
Curves" (4th ed.), Dover, 1945
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1, x)
else:
vm, vl, dl = specfun.lamn(v1, x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v, x):
"""Parabolic cylinder functions Dv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbdv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbvv_seq(v, x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbvv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbdn_seq(n, z):
"""Parabolic cylinder functions Dn(z) and derivatives.
Parameters
----------
n : int
Order of the parabolic cylinder function
z : complex
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_i(z), for i=0, ..., i=n.
dp : ndarray
Derivatives D_i'(z), for i=0, ..., i=n.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb, cpd = specfun.cpbdn(n1, z)
return cpb[:n1+1], cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei(x).
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 4)
def berp_zeros(nt):
"""Compute nt zeros of the Kelvin function ber'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 5)
def beip_zeros(nt):
"""Compute nt zeros of the Kelvin function bei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 6)
def kerp_zeros(nt):
"""Compute nt zeros of the Kelvin function ker'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 7)
def keip_zeros(nt):
"""Compute nt zeros of the Kelvin function kei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 8)
def kelvin_zeros(nt):
"""Compute nt zeros of all Kelvin functions.
Returned in a length-8 tuple of arrays of length nt. The tuple contains
the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return (specfun.klvnzo(nt, 1),
specfun.klvnzo(nt, 2),
specfun.klvnzo(nt, 3),
specfun.klvnzo(nt, 4),
specfun.klvnzo(nt, 5),
specfun.klvnzo(nt, 6),
specfun.klvnzo(nt, 7),
specfun.klvnzo(nt, 8))
def pro_cv_seq(m, n, c):
"""Characteristic values for prolate spheroidal wave functions.
Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, 1)[1][:maxL]
def obl_cv_seq(m, n, c):
"""Characteristic values for oblate spheroidal wave functions.
Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, -1)[1][:maxL]
def ellipk(m):
"""Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`, which this
function calls.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
"""
return ellipkm1(1 - asarray(m))
def agm(a, b):
"""Arithmetic, Geometric Mean.
Start with a_0=a and b_0=b and iteratively compute
a_{n+1} = (a_n+b_n)/2
b_{n+1} = sqrt(a_n*b_n)
until a_n=b_n. The result is agm(a, b)
agm(a, b)=agm(b, a)
agm(a, a) = a
min(a, b) < agm(a, b) < max(a, b)
"""
s = a + b + 0.0
return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)
def comb(N, k, exact=False, repetition=False):
"""The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
Returns
-------
val : int, ndarray
The total number of combinations.
See Also
--------
binom : Binomial coefficient ufunc
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import comb
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> comb(n, k, exact=False)
array([ 120., 210.])
>>> comb(10, 3, exact=True)
120L
>>> comb(10, 3, exact=True, repetition=True)
220L
"""
if repetition:
return comb(N + k - 1, k, exact)
if exact:
return _comb_int(N, k)
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import perm
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in xrange(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
# http://stackoverflow.com/a/16327037/125507
def _range_prod(lo, hi):
"""
Product of a range of numbers.
Returns the product of
lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi
= hi! / (lo-1)!
Breaks into smaller products first for speed:
_range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9))
"""
if lo + 1 < hi:
mid = (hi + lo) // 2
return _range_prod(lo, mid) * _range_prod(mid + 1, hi)
if lo == hi:
return lo
return lo * hi
def factorial(n, exact=False):
"""
The factorial of a number or array of numbers.
The factorial of non-negative integer `n` is the product of all
positive integers less than or equal to `n`::
n! = n * (n - 1) * (n - 2) * ... * 1
Parameters
----------
n : int or array_like of ints
Input values. If ``n < 0``, the return value is 0.
exact : bool, optional
If True, calculate the answer exactly using long integer arithmetic.
If False, result is approximated in floating point rapidly using the
`gamma` function.
Default is False.
Returns
-------
nf : float or int or ndarray
Factorial of `n`, as integer or float depending on `exact`.
Notes
-----
For arrays with ``exact=True``, the factorial is computed only once, for
the largest input, with each other result computed in the process.
The output dtype is increased to ``int64`` or ``object`` if necessary.
With ``exact=False`` the factorial is approximated using the gamma
function:
.. math:: n! = \Gamma(n+1)
Examples
--------
>>> from scipy.special import factorial
>>> arr = np.array([3, 4, 5])
>>> factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> factorial(arr, exact=True)
array([ 6, 24, 120])
>>> factorial(5, exact=True)
120L
"""
if exact:
if np.ndim(n) == 0:
return 0 if n < 0 else math.factorial(n)
else:
n = asarray(n)
un = np.unique(n).astype(object)
# Convert to object array of long ints if np.int can't handle size
if un[-1] > 20:
dt = object
elif un[-1] > 12:
dt = np.int64
else:
dt = np.int
out = np.empty_like(n, dtype=dt)
# Handle invalid/trivial values
un = un[un > 1]
out[n < 2] = 1
out[n < 0] = 0
# Calculate products of each range of numbers
if un.size:
val = math.factorial(un[0])
out[n == un[0]] = val
for i in xrange(len(un) - 1):
prev = un[i] + 1
current = un[i + 1]
val *= _range_prod(prev, current)
out[n == current] = val
return out
else:
n = asarray(n)
vals = gamma(n + 1)
return where(n >= 0, vals, 0)
def factorial2(n, exact=False):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
* 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n, 0, -2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape, 'd')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1, n)
evenn = extract(cond2, n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))
place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e))
return vals
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
In particular, for any integer ``n``, we have
factorialk(n, 1) = factorial(n)
factorialk(n, 2) = factorial2(n)
Parameters
----------
n : int
Calculate multifactorial. If `n` < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multifactorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120L
>>> factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in xrange(n, 0, -k):
val = val*j
return val
else:
raise NotImplementedError
def zeta(x, q=None, out=None):
r"""
Riemann zeta function.
The two-argument version is the Hurwitz zeta function:
.. math:: \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x},
Riemann zeta function corresponds to ``q = 1``.
See also
--------
zetac
"""
if q is None:
q = 1
return _zeta(x, q, out)
| bsd-3-clause |
nrhine1/scikit-learn | sklearn/utils/tests/test_validation.py | 133 | 18339 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = check_array(42, ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
balister/GNU-Radio | gr-utils/python/utils/plot_fft_base.py | 53 | 10449 | #!/usr/bin/env python
#
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_fft_base:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = getattr(scipy, datatype)
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.94, ("File: %s" % filename), weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_iq.get_xlim()
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % (self.position))
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
else:
self.iq_fft = self.dofft(self.iq)
tstep = 1.0 / self.sample_rate
#self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))])
self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))])
self.freq = self.calc_freq(self.time, self.sample_rate)
def dofft(self, iq):
N = len(iq)
iq_fft = scipy.fftpack.fftshift(scipy.fft(iq)) # fft and shift axis
iq_fft = 20*scipy.log10(abs((iq_fft+1e-15)/N)) # convert to decibels, adjust power
# adding 1e-15 (-300 dB) to protect against value errors if an item in iq_fft is 0
return iq_fft
def calc_freq(self, time, sample_rate):
N = len(time)
Fs = 1.0 / (time.max() - time.min())
Fn = 0.5 * sample_rate
freq = scipy.array([-Fn + i*Fs for i in xrange(N)])
return freq
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=[0.075, 0.2, 0.4, 0.6])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for FFT plot
self.sp_fft = self.fig.add_subplot(2,2,2, position=[0.575, 0.2, 0.4, 0.6])
self.sp_fft.set_title(("FFT"), fontsize=self.title_font_size, fontweight="bold")
self.sp_fft.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_fft.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time() # draw the plot
self.plot_fft = self.sp_fft.plot([], 'bo-') # make plot for FFT
self.draw_fft() # draw the plot
draw()
def draw_time(self):
reals = self.iq.real
imags = self.iq.imag
self.plot_iq[0].set_data([self.time, reals])
self.plot_iq[1].set_data([self.time, imags])
self.sp_iq.set_xlim(self.time.min(), self.time.max())
self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]),
1.5*max([reals.max(), imags.max()])])
def draw_fft(self):
self.plot_fft[0].set_data([self.freq, self.iq_fft])
self.sp_fft.set_xlim(self.freq.min(), self.freq.max())
self.sp_fft.set_ylim([self.iq_fft.min()-10, self.iq_fft.max()+10])
def update_plots(self):
self.draw_time()
self.draw_fft()
self.xlim = self.sp_iq.get_xlim()
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
self.xlim = newxlim
#xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0] - self.position))))
#xmax = min(int(ceil(self.sample_rate*(self.xlim[1] - self.position))), len(self.iq))
xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0]))))
xmax = min(int(ceil(self.sample_rate*(self.xlim[1]))), len(self.iq))
iq = self.iq[xmin : xmax]
time = self.time[xmin : xmax]
iq_fft = self.dofft(iq)
freq = self.calc_freq(time, self.sample_rate)
self.plot_fft[0].set_data(freq, iq_fft)
self.sp_fft.axis([freq.min(), freq.max(),
iq_fft.min()-10, iq_fft.max()+10])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.get_data()
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
self.get_data()
self.update_plots()
@staticmethod
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio complex binary file and displays the I&Q data versus time as well as the frequency domain (FFT) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. This value defaults to 1000. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples."
parser = OptionParser(conflict_handler="resolve", usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=1000,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
parser = plot_fft_base.setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = plot_fft_base(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
MohamedAbdultawab/FOC_RiceUniv | algorithmic-thinking-2/module-3-project-and-application/02_application-3-comparison-of-clustering-algorithms/alg_clusters_matplotlib.py | 1 | 3984 | """
Some provided code for plotting the clusters using matplotlib
"""
import math
# import urllib.request as urllib2
import matplotlib.pyplot as plt
# URLS for various important datasets
DIRECTORY = "/media/toba/FOCUS/2_Fundamentals of Computing Rice University/algorithmic-thinking-2/02_module-3-project-and-application/02_application-3-comparison-of-clustering-algorithms/"
MAP_URL = DIRECTORY + "USA_Counties.png"
# Define colors for clusters. Display a max of 16 clusters.
COLORS = ['Aqua', 'Yellow', 'Blue', 'Fuchsia', 'Black', 'Green', 'Lime', 'Maroon', 'Navy', 'Olive', 'Orange', 'Purple', 'Red', 'Brown', 'Teal']
# Helper functions
def circle_area(pop):
"""
Compute area of circle proportional to population
"""
return math.pi * pop / (200.0 ** 2)
def plot_clusters(data_table, cluster_list, draw_centers=False):
"""
Create a plot of clusters of counties
"""
fips_to_line = {}
for line_idx in range(len(data_table)):
fips_to_line[data_table[line_idx][0]] = line_idx
# Load map image
# map_file = open(MAP_URL)
map_img = plt.imread(MAP_URL)
# Scale plot to get size similar to CodeSkulptor version
ypixels, xpixels, bands = map_img.shape
DPI = 60.0 # adjust this constant to resize your plot
xinch = xpixels / DPI
yinch = ypixels / DPI
plt.figure(figsize=(xinch, yinch))
implot = plt.imshow(map_img)
# draw the counties colored by cluster on the map
if not draw_centers:
for cluster_idx in range(len(cluster_list)):
cluster = cluster_list[cluster_idx]
cluster_color = COLORS[cluster_idx % len(COLORS)]
for fips_code in cluster.fips_codes():
line = data_table[fips_to_line[fips_code]]
plt.scatter(x=[line[1]],
y=[line[2]],
s=circle_area(line[3]),
lw=1,
facecolors=cluster_color,
edgecolors=cluster_color)
# add cluster centers and lines from center to counties
else:
for cluster_idx in range(len(cluster_list)):
cluster = cluster_list[cluster_idx]
cluster_color = COLORS[cluster_idx % len(COLORS)]
for fips_code in cluster.fips_codes():
line = data_table[fips_to_line[fips_code]]
plt.scatter(x=[line[1]],
y=[line[2]],
s=circle_area(line[3]),
lw=1,
facecolors=cluster_color,
edgecolors=cluster_color,
zorder=1)
for cluster_idx in range(len(cluster_list)):
cluster = cluster_list[cluster_idx]
cluster_color = COLORS[cluster_idx % len(COLORS)]
cluster_center = (cluster.horiz_center(), cluster.vert_center())
for fips_code in cluster.fips_codes():
line = data_table[fips_to_line[fips_code]]
plt.plot([cluster_center[0], line[1]],
[cluster_center[1], line[2]],
cluster_color, lw=1,
zorder=2)
for cluster_idx in range(len(cluster_list)):
cluster = cluster_list[cluster_idx]
cluster_color = COLORS[cluster_idx % len(COLORS)]
cluster_center = (cluster.horiz_center(), cluster.vert_center())
cluster_pop = cluster.total_population()
plt.scatter(x=[cluster_center[0]],
y=[cluster_center[1]],
s=circle_area(cluster_pop),
lw=2,
facecolors="none",
edgecolors="black",
zorder=3)
# plt.show()
plt.savefig('Q6', dpi=300, format='png', transparent=False, orientation='landscape', bbox_inches='tight', pad_inches=0.3)
| gpl-3.0 |
vortex-ape/scikit-learn | examples/decomposition/plot_pca_3d.py | 49 | 2356 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# #############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
# #############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
fivejjs/pyhsmm | examples/concentration-resampling.py | 5 | 1680 | from __future__ import division
import numpy as np
np.seterr(divide='ignore') # these warnings are usually harmless for this code
from matplotlib import pyplot as plt
import os
import scipy.stats as stats
import pyhsmm
from pyhsmm.util.text import progprint_xrange
###############
# load data #
###############
T = 1000
data = np.loadtxt(os.path.join(os.path.dirname(__file__),'example-data.txt'))[:T]
#########################
# posterior inference #
#########################
Nmax = 20
obs_dim = data.shape[1]
obs_hypparams = {'mu_0':np.zeros(obs_dim),
'sigma_0':np.eye(obs_dim),
'kappa_0':0.25,
'nu_0':obs_dim+2}
dur_hypparams = {'alpha_0':2*30,
'beta_0':2}
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]
dur_distns = [pyhsmm.distributions.PoissonDuration(**dur_hypparams) for state in range(Nmax)]
posteriormodel = pyhsmm.models.WeakLimitHDPHSMM(
# NOTE: instead of passing in alpha_0 and gamma_0, we pass in parameters
# for priors over those concentration parameters
alpha_a_0=1.,alpha_b_0=1./4,
gamma_a_0=1.,gamma_b_0=1./4,
init_state_concentration=6.,
obs_distns=obs_distns,
dur_distns=dur_distns)
posteriormodel.add_data(data,trunc=70)
for idx in progprint_xrange(100):
posteriormodel.resample_model()
plt.figure()
posteriormodel.plot()
plt.gcf().suptitle('Sampled after 100 iterations')
plt.figure()
t = np.linspace(0.01,30,1000)
plt.plot(t,stats.gamma.pdf(t,1.,scale=4.)) # NOTE: numpy/scipy scale is inverted compared to my scale
plt.title('Prior on concentration parameters')
plt.show()
| mit |
jniediek/mne-python | mne/inverse_sparse/mxne_optim.py | 8 | 36918 | from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>
# Daniel Strohmeier <[email protected]>
#
# License: Simplified BSD
from copy import deepcopy
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from .mxne_debiasing import compute_bias
from ..utils import logger, verbose, sum_squared, warn
from ..time_frequency.stft import stft_norm2, stft, istft
from ..externals.six.moves import xrange as range
def groups_norm2(A, n_orient):
"""compute squared L2 norms of groups inplace"""
n_positions = A.shape[0] // n_orient
return np.sum(np.power(A, 2, A).reshape(n_positions, -1), axis=1)
def norm_l2inf(A, n_orient, copy=True):
"""L2-inf norm"""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return sqrt(np.max(groups_norm2(A, n_orient)))
def norm_l21(A, n_orient, copy=True):
"""L21 norm"""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return np.sum(np.sqrt(groups_norm2(A, n_orient)))
def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False):
"""proximity operator for l21 norm
L2 over columns and L1 over rows => groups contain n_orient rows.
It can eventually take into account the negative frequencies
when a complex value is passed and is_stft=True.
Example
-------
>>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y)
[[ 0. 4. 3. 0. 0.]
[ 0. 4. 3. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l21(Y, 2, 2)
>>> print(Yp)
[[ 0. 2.86862915 2.15147186 0. 0. ]
[ 0. 2.86862915 2.15147186 0. 0. ]]
>>> print(active_set)
[ True True False False]
"""
if len(Y) == 0:
return np.zeros_like(Y), np.zeros((0,), dtype=np.bool)
if shape is not None:
shape_init = Y.shape
Y = Y.reshape(*shape)
n_positions = Y.shape[0] // n_orient
if is_stft:
rows_norm = np.sqrt(stft_norm2(Y).reshape(n_positions, -1).sum(axis=1))
else:
rows_norm = np.sqrt((Y * Y.conj()).real.reshape(n_positions,
-1).sum(axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(rows_norm, alpha), 0.0)
active_set = shrink > 0.0
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
shrink = np.tile(shrink[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if shape is None:
Y *= shrink[active_set][:, np.newaxis]
else:
Y *= shrink[active_set][:, np.newaxis, np.newaxis]
Y = Y.reshape(-1, *shape_init[1:])
return Y, active_set
def prox_l1(Y, alpha, n_orient):
"""proximity operator for l1 norm with multiple orientation support
L2 over orientation and L1 over position (space + time)
Example
-------
>>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y)
[[ 1. 2. 3. 2. 0.]
[ 1. 2. 3. 2. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l1(Y, 2, 2)
>>> print(Yp)
[[ 0. 0.58578644 1.58578644 0.58578644 0. ]
[ 0. 0.58578644 1.58578644 0.58578644 0. ]]
>>> print(active_set)
[ True True False False]
"""
n_positions = Y.shape[0] // n_orient
norms = np.sqrt((Y * Y.conj()).real.T.reshape(-1, n_orient).sum(axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(norms, alpha), 0.0)
shrink = shrink.reshape(-1, n_positions).T
active_set = np.any(shrink > 0.0, axis=1)
shrink = shrink[active_set]
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if len(Y) > 0:
for o in range(n_orient):
Y[o::n_orient] *= shrink
return Y, active_set
def dgap_l21(M, G, X, active_set, alpha, n_orient):
"""Duality gaps for the mixed norm inverse problem
For details see:
Gramfort A., Kowalski M. and Hamalainen, M,
Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods, Physics in Medicine and Biology, 2012
http://dx.doi.org/10.1088/0031-9155/57/7/1937
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_active)
The gain matrix a.k.a. lead field.
X : array, shape (n_active, n_times)
Sources
active_set : array of bool
Mask of active sources
alpha : float
Regularization parameter
n_orient : int
Number of dipoles per locations (typically 1 or 3)
Returns
-------
gap : float
Dual gap
pobj : float
Primal cost
dobj : float
Dual cost. gap = pobj - dobj
R : array, shape (n_sensors, n_times)
Current residual of M - G * X
"""
GX = np.dot(G[:, active_set], X)
R = M - GX
penalty = norm_l21(X, n_orient, copy=True)
nR2 = sum_squared(R)
pobj = 0.5 * nR2 + alpha * penalty
dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False)
scaling = alpha / dual_norm
scaling = min(scaling, 1.0)
dobj = 0.5 * (scaling ** 2) * nR2 + scaling * np.sum(R * GX)
gap = pobj - dobj
return gap, pobj, dobj, R
@verbose
def _mixed_norm_solver_prox(M, G, alpha, lipschitz_constant, maxit=200,
tol=1e-8, verbose=None, init=None, n_orient=1):
"""Solves L21 inverse problem with proximal iterations and FISTA"""
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
if n_sources < n_sensors:
gram = np.dot(G.T, G)
GTM = np.dot(G.T, M)
else:
gram = None
if init is None:
X = 0.0
R = M.copy()
if gram is not None:
R = np.dot(G.T, R)
else:
X = init
if gram is None:
R = M - np.dot(G, X)
else:
R = GTM - np.dot(gram, X)
t = 1.0
Y = np.zeros((n_sources, n_times)) # FISTA aux variable
E = [] # track cost function
active_set = np.ones(n_sources, dtype=np.bool) # start with full AS
for i in range(maxit):
X0, active_set_0 = X, active_set # store previous values
if gram is None:
Y += np.dot(G.T, R) / lipschitz_constant # ISTA step
else:
Y += R / lipschitz_constant # ISTA step
X, active_set = prox_l21(Y, alpha / lipschitz_constant, n_orient)
t0 = t
t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
Y.fill(0.0)
dt = ((t0 - 1.0) / t)
Y[active_set] = (1.0 + dt) * X
Y[active_set_0] -= dt * X0
Y_as = active_set_0 | active_set
if gram is None:
R = M - np.dot(G[:, Y_as], Y[Y_as])
else:
R = GTM - np.dot(gram[:, Y_as], Y[Y_as])
gap, pobj, dobj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
E.append(pobj)
logger.debug("pobj : %s -- gap : %s" % (pobj, gap))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)' % (gap, tol))
break
return X, active_set, E
@verbose
def _mixed_norm_solver_cd(M, G, alpha, lipschitz_constant, maxit=10000,
tol=1e-8, verbose=None, init=None, n_orient=1):
"""Solves L21 inverse problem with coordinate descent"""
from sklearn.linear_model.coordinate_descent import MultiTaskLasso
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
if init is not None:
init = init.T
clf = MultiTaskLasso(alpha=alpha / len(M), tol=tol, normalize=False,
fit_intercept=False, max_iter=maxit,
warm_start=True)
clf.coef_ = init
clf.fit(G, M)
X = clf.coef_.T
active_set = np.any(X, axis=1)
X = X[active_set]
gap, pobj, dobj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
return X, active_set, pobj
@verbose
def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200,
tol=1e-8, verbose=None, init=None, n_orient=1):
"""Solves L21 inverse problem with block coordinate descent"""
# First make G fortran for faster access to blocks of columns
G = np.asfortranarray(G)
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
n_positions = n_sources // n_orient
if init is None:
X = np.zeros((n_sources, n_times))
R = M.copy()
else:
X = init
R = M - np.dot(G, X)
E = [] # track cost function
active_set = np.zeros(n_sources, dtype=np.bool) # start with full AS
alpha_lc = alpha / lipschitz_constant
for i in range(maxit):
for j in range(n_positions):
idx = slice(j * n_orient, (j + 1) * n_orient)
G_j = G[:, idx]
X_j = X[idx]
X_j_new = np.dot(G_j.T, R) / lipschitz_constant[j]
was_non_zero = np.any(X_j)
if was_non_zero:
R += np.dot(G_j, X_j)
X_j_new += X_j
block_norm = linalg.norm(X_j_new, 'fro')
if block_norm <= alpha_lc[j]:
X_j.fill(0.)
active_set[idx] = False
else:
shrink = np.maximum(1.0 - alpha_lc[j] / block_norm, 0.0)
X_j_new *= shrink
R -= np.dot(G_j, X_j_new)
X_j[:] = X_j_new
active_set[idx] = True
gap, pobj, dobj, _ = dgap_l21(M, G, X[active_set], active_set, alpha,
n_orient)
E.append(pobj)
logger.debug("Iteration %d :: pobj %f :: dgap %f :: n_active %d" % (
i + 1, pobj, gap, np.sum(active_set) / n_orient))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)' % (gap, tol))
break
X = X[active_set]
return X, active_set, E
@verbose
def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
active_set_size=50, debias=True, n_orient=1,
solver='auto'):
"""Solves L1/L2 mixed-norm inverse problem with active set strategy
Algorithm is detailed in:
Gramfort A., Kowalski M. and Hamalainen, M,
Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods, Physics in Medicine and Biology, 2012
http://dx.doi.org/10.1088/0031-9155/57/7/1937
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
maxit : int
The number of iterations.
tol : float
Tolerance on dual gap for convergence checking.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization.
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over the iterations.
"""
n_dipoles = G.shape[1]
n_positions = n_dipoles // n_orient
n_sensors, n_times = M.shape
alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
logger.info("-- ALPHA MAX : %s" % alpha_max)
alpha = float(alpha)
has_sklearn = True
try:
from sklearn.linear_model.coordinate_descent import MultiTaskLasso # noqa
except ImportError:
has_sklearn = False
if solver == 'auto':
if has_sklearn and (n_orient == 1):
solver = 'cd'
else:
solver = 'bcd'
if solver == 'cd':
if n_orient == 1 and not has_sklearn:
warn('Scikit-learn >= 0.12 cannot be found. Using block coordinate'
' descent instead of coordinate descent.')
solver = 'bcd'
if n_orient > 1:
warn('Coordinate descent is only available for fixed orientation. '
'Using block coordinate descent instead of coordinate '
'descent')
solver = 'bcd'
if solver == 'cd':
logger.info("Using coordinate descent")
l21_solver = _mixed_norm_solver_cd
lc = None
elif solver == 'bcd':
logger.info("Using block coordinate descent")
l21_solver = _mixed_norm_solver_bcd
G = np.asfortranarray(G)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
else:
logger.info("Using proximal iterations")
l21_solver = _mixed_norm_solver_prox
lc = 1.01 * linalg.norm(G, ord=2) ** 2
if active_set_size is not None:
E = list()
X_init = None
active_set = np.zeros(n_dipoles, dtype=np.bool)
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :]).ravel()
active_set[new_active_idx] = True
as_size = np.sum(active_set)
for k in range(maxit):
if solver == 'bcd':
lc_tmp = lc[active_set[::n_orient]]
elif solver == 'cd':
lc_tmp = None
else:
lc_tmp = 1.01 * linalg.norm(G[:, active_set], ord=2) ** 2
X, as_, _ = l21_solver(M, G[:, active_set], alpha, lc_tmp,
maxit=maxit, tol=tol, init=X_init,
n_orient=n_orient)
active_set[active_set] = as_.copy()
idx_old_active_set = np.where(active_set)[0]
gap, pobj, dobj, R = dgap_l21(M, G, X, active_set, alpha,
n_orient)
E.append(pobj)
logger.info("Iteration %d :: pobj %f :: dgap %f ::"
"n_active_start %d :: n_active_end %d" % (
k + 1, pobj, gap, as_size // n_orient,
np.sum(active_set) // n_orient))
if gap < tol:
logger.info('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
# add sources if not last iteration
if k < (maxit - 1):
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R),
n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :])
new_active_idx = new_active_idx.ravel()
active_set[new_active_idx] = True
idx_active_set = np.where(active_set)[0]
as_size = np.sum(active_set)
X_init = np.zeros((as_size, n_times), dtype=X.dtype)
idx = np.searchsorted(idx_active_set, idx_old_active_set)
X_init[idx] = X
else:
warn('Did NOT converge ! (gap: %s > %s)' % (gap, tol))
else:
X, active_set, E = l21_solver(M, G, alpha, lc, maxit=maxit,
tol=tol, n_orient=n_orient, init=None)
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
logger.info('Final active set size: %s' % (np.sum(active_set) // n_orient))
return X, active_set, E
@verbose
def iterative_mixed_norm_solver(M, G, alpha, n_mxne_iter, maxit=3000,
tol=1e-8, verbose=None, active_set_size=50,
debias=True, n_orient=1, solver='auto'):
"""Solves L0.5/L2 mixed-norm inverse problem with active set strategy
Algorithm is detailed in:
Strohmeier D., Haueisen J., and Gramfort A.:
Improved MEG/EEG source localization with reweighted mixed-norms,
4th International Workshop on Pattern Recognition in Neuroimaging,
Tuebingen, 2014
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
maxit : int
The number of iterations.
tol : float
Tolerance on dual gap for convergence checking.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization.
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over the iterations.
"""
def g(w):
return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient)))
def gprime(w):
return 2. * np.repeat(g(w), n_orient).ravel()
E = list()
active_set = np.ones(G.shape[1], dtype=np.bool)
weights = np.ones(G.shape[1])
X = np.zeros((G.shape[1], M.shape[1]))
for k in range(n_mxne_iter):
X0 = X.copy()
active_set_0 = active_set.copy()
G_tmp = G[:, active_set] * weights[np.newaxis, :]
if active_set_size is not None:
if np.sum(active_set) > (active_set_size * n_orient):
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=active_set_size,
solver=solver, verbose=verbose)
else:
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=None, solver=solver,
verbose=verbose)
else:
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=None, solver=solver,
verbose=verbose)
logger.info('active set size %d' % (_active_set.sum() / n_orient))
if _active_set.sum() > 0:
active_set[active_set] = _active_set
# Reapply weights to have correct unit
X *= weights[_active_set][:, np.newaxis]
weights = gprime(X)
p_obj = 0.5 * linalg.norm(M - np.dot(G[:, active_set], X),
'fro') ** 2. + alpha * np.sum(g(X))
E.append(p_obj)
# Check convergence
if ((k >= 1) and np.all(active_set == active_set_0) and
np.all(np.abs(X - X0) < tol)):
print('Convergence reached after %d reweightings!' % k)
break
else:
active_set = np.zeros_like(active_set)
p_obj = 0.5 * linalg.norm(M) ** 2.
E.append(p_obj)
break
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
###############################################################################
# TF-MxNE
@verbose
def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None):
"""Compute lipschitz constant for FISTA
It uses a power iteration method.
"""
n_times = M.shape[1]
n_points = G.shape[1]
iv = np.ones((n_points, n_times), dtype=np.float)
v = phi(iv)
L = 1e100
for it in range(100):
L_old = L
logger.info('Lipschitz estimation: iteration = %d' % it)
iv = np.real(phiT(v))
Gv = np.dot(G, iv)
GtGv = np.dot(G.T, Gv)
w = phi(GtGv)
L = np.max(np.abs(w)) # l_inf norm
v = w / L
if abs((L - L_old) / L_old) < tol:
break
return L
def safe_max_abs(A, ia):
"""Compute np.max(np.abs(A[ia])) possible with empty A"""
if np.sum(ia): # ia is not empty
return np.max(np.abs(A[ia]))
else:
return 0.
def safe_max_abs_diff(A, ia, B, ib):
"""Compute np.max(np.abs(A)) possible with empty A"""
A = A[ia] if np.sum(ia) else 0.0
B = B[ib] if np.sum(ia) else 0.0
return np.max(np.abs(A - B))
class _Phi(object):
"""Util class to have phi stft as callable without using
a lambda that does not pickle"""
def __init__(self, wsize, tstep, n_coefs):
self.wsize = wsize
self.tstep = tstep
self.n_coefs = n_coefs
def __call__(self, x):
return stft(x, self.wsize, self.tstep,
verbose=False).reshape(-1, self.n_coefs)
class _PhiT(object):
"""Util class to have phi.T istft as callable without using
a lambda that does not pickle"""
def __init__(self, tstep, n_freq, n_step, n_times):
self.tstep = tstep
self.n_freq = n_freq
self.n_step = n_step
self.n_times = n_times
def __call__(self, z):
return istft(z.reshape(-1, self.n_freq, self.n_step), self.tstep,
self.n_times)
def norm_l21_tf(Z, shape, n_orient):
if Z.shape[0]:
Z2 = Z.reshape(*shape)
l21_norm = np.sqrt(stft_norm2(Z2).reshape(-1, n_orient).sum(axis=1))
l21_norm = l21_norm.sum()
else:
l21_norm = 0.
return l21_norm
def norm_l1_tf(Z, shape, n_orient):
if Z.shape[0]:
n_positions = Z.shape[0] // n_orient
Z_ = np.sqrt(np.sum((np.abs(Z) ** 2.).reshape((n_orient, -1),
order='F'), axis=0))
Z_ = Z_.reshape((n_positions, -1), order='F').reshape(*shape)
l1_norm = (2. * Z_.sum(axis=2).sum(axis=1) - np.sum(Z_[:, 0, :],
axis=1) - np.sum(Z_[:, -1, :], axis=1))
l1_norm = l1_norm.sum()
else:
l1_norm = 0.
return l1_norm
@verbose
def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, alpha_space, alpha_time,
lipschitz_constant, phi, phiT,
wsize=64, tstep=4, n_orient=1,
maxit=200, tol=1e-8, log_objective=True,
perc=None, verbose=None):
# First make G fortran for faster access to blocks of columns
G = np.asfortranarray(G)
n_sensors, n_times = M.shape
n_sources = G.shape[1]
n_positions = n_sources // n_orient
n_step = int(ceil(n_times / float(tstep)))
n_freq = wsize // 2 + 1
shape = (-1, n_freq, n_step)
G = dict(zip(np.arange(n_positions), np.hsplit(G, n_positions)))
R = M.copy() # residual
active = np.where(active_set)[0][::n_orient] // n_orient
for idx in active:
R -= np.dot(G[idx], phiT(Z[idx]))
E = [] # track cost function
alpha_time_lc = alpha_time / lipschitz_constant
alpha_space_lc = alpha_space / lipschitz_constant
converged = False
for i in range(maxit):
val_norm_l21_tf = 0.0
val_norm_l1_tf = 0.0
max_diff = 0.0
active_set_0 = active_set.copy()
for j in range(n_positions):
ids = j * n_orient
ide = ids + n_orient
G_j = G[j]
Z_j = Z[j]
active_set_j = active_set[ids:ide]
Z0 = deepcopy(Z_j)
was_active = np.any(active_set_j)
# gradient step
GTR = np.dot(G_j.T, R) / lipschitz_constant[j]
X_j_new = GTR.copy()
if was_active:
X_j = phiT(Z_j)
R += np.dot(G_j, X_j)
X_j_new += X_j
rows_norm = linalg.norm(X_j_new, 'fro')
if rows_norm <= alpha_space_lc[j]:
if was_active:
Z[j] = 0.0
active_set_j[:] = False
else:
if was_active:
Z_j_new = Z_j + phi(GTR)
else:
Z_j_new = phi(GTR)
col_norm = np.sqrt(np.sum(np.abs(Z_j_new) ** 2, axis=0))
if np.all(col_norm <= alpha_time_lc[j]):
Z[j] = 0.0
active_set_j[:] = False
else:
# l1
shrink = np.maximum(1.0 - alpha_time_lc[j] / np.maximum(
col_norm, alpha_time_lc[j]), 0.0)
Z_j_new *= shrink[np.newaxis, :]
# l21
shape_init = Z_j_new.shape
Z_j_new = Z_j_new.reshape(*shape)
row_norm = np.sqrt(stft_norm2(Z_j_new).sum())
if row_norm <= alpha_space_lc[j]:
Z[j] = 0.0
active_set_j[:] = False
else:
shrink = np.maximum(1.0 - alpha_space_lc[j] /
np.maximum(row_norm,
alpha_space_lc[j]), 0.0)
Z_j_new *= shrink
Z[j] = Z_j_new.reshape(-1, *shape_init[1:]).copy()
active_set_j[:] = True
R -= np.dot(G_j, phiT(Z[j]))
if log_objective:
val_norm_l21_tf += norm_l21_tf(
Z[j], shape, n_orient)
val_norm_l1_tf += norm_l1_tf(
Z[j], shape, n_orient)
max_diff = np.maximum(max_diff, np.max(np.abs(Z[j] - Z0)))
if log_objective: # log cost function value
pobj = (0.5 * (R ** 2.).sum() + alpha_space * val_norm_l21_tf +
alpha_time * val_norm_l1_tf)
E.append(pobj)
logger.info("Iteration %d :: pobj %f :: n_active %d" % (i + 1,
pobj, np.sum(active_set) / n_orient))
else:
logger.info("Iteration %d" % (i + 1))
if perc is not None:
if np.sum(active_set) / float(n_orient) <= perc * n_positions:
break
if np.array_equal(active_set, active_set_0):
if max_diff < tol:
logger.info("Convergence reached !")
converged = True
break
return Z, active_set, E, converged
@verbose
def _tf_mixed_norm_solver_bcd_active_set(
M, G, alpha_space, alpha_time, lipschitz_constant, phi, phiT,
Z_init=None, wsize=64, tstep=4, n_orient=1, maxit=200, tol=1e-8,
log_objective=True, perc=None, verbose=None):
"""Solves TF L21+L1 inverse solver with BCD and active set approach
Algorithm is detailed in:
Strohmeier D., Gramfort A., and Haueisen J.:
MEG/EEG source imaging with a non-convex penalty in the time-
frequency domain,
5th International Workshop on Pattern Recognition in Neuroimaging,
Stanford University, 2015
Parameters
----------
M : array
The data.
G : array
The forward operator.
alpha_space : float in [0, 100]
Regularization parameter for spatial sparsity. If larger than 100,
then no source will be active.
alpha_time : float in [0, 100]
Regularization parameter for temporal sparsity. It set to 0,
no temporal regularization is applied. It this case, TF-MxNE is
equivalent to MxNE with L21 norm.
lipschitz_constant : float
The lipschitz constant of the spatio temporal linear operator.
phi : instance of _Phi
The TF operator.
phiT : instance of _PhiT
The transpose of the TF operator.
Z_init : None | array
The initialization of the TF coefficient matrix. If None, zeros
will be used for all coefficients.
wsize: int
length of the STFT window in samples (must be a multiple of 4).
tstep: int
step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
maxit : int
The number of iterations.
tol : float
If absolute difference between estimates at 2 successive iterations
is lower than tol, the convergence is reached.
log_objective : bool
If True, the value of the minimized objective function is computed
and stored at every iteration.
perc : None | float in [0, 1]
The early stopping parameter used for BCD with active set approach.
If the active set size is smaller than perc * n_sources, the
subproblem limited to the active set is stopped. If None, full
convergence will be achieved.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
X : array
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function at each iteration. If log_objective
is False, it will be empty.
"""
n_sources = G.shape[1]
n_positions = n_sources // n_orient
if Z_init is None:
Z = dict.fromkeys(range(n_positions), 0.0)
active_set = np.zeros(n_sources, dtype=np.bool)
else:
active_set = np.zeros(n_sources, dtype=np.bool)
active = list()
for i in range(n_positions):
if np.any(Z_init[i * n_orient:(i + 1) * n_orient]):
active_set[i * n_orient:(i + 1) * n_orient] = True
active.append(i)
Z = dict.fromkeys(range(n_positions), 0.0)
if len(active):
Z.update(dict(zip(active, np.vsplit(Z_init[active_set],
len(active)))))
Z, active_set, E, _ = _tf_mixed_norm_solver_bcd_(
M, G, Z, active_set, alpha_space, alpha_time, lipschitz_constant,
phi, phiT, wsize=wsize, tstep=tstep, n_orient=n_orient, maxit=1,
tol=tol, log_objective=log_objective, perc=None, verbose=verbose)
while active_set.sum():
active = np.where(active_set)[0][::n_orient] // n_orient
Z_init = dict(zip(range(len(active)), [Z[idx] for idx in active]))
Z, as_, E_tmp, converged = _tf_mixed_norm_solver_bcd_(
M, G[:, active_set], Z_init,
np.ones(len(active) * n_orient, dtype=np.bool),
alpha_space, alpha_time,
lipschitz_constant[active_set[::n_orient]],
phi, phiT, wsize=wsize, tstep=tstep, n_orient=n_orient,
maxit=maxit, tol=tol, log_objective=log_objective,
perc=0.5, verbose=verbose)
E += E_tmp
active = np.where(active_set)[0][::n_orient] // n_orient
Z_init = dict.fromkeys(range(n_positions), 0.0)
Z_init.update(dict(zip(active, Z.values())))
active_set[active_set] = as_
active_set_0 = active_set.copy()
Z, active_set, E_tmp, _ = _tf_mixed_norm_solver_bcd_(
M, G, Z_init, active_set, alpha_space, alpha_time,
lipschitz_constant, phi, phiT, wsize=wsize, tstep=tstep,
n_orient=n_orient, maxit=1, tol=tol, log_objective=log_objective,
perc=None, verbose=verbose)
E += E_tmp
if converged:
if np.array_equal(active_set_0, active_set):
break
if active_set.sum():
Z = np.vstack([Z_ for Z_ in list(Z.values()) if np.any(Z_)])
X = phiT(Z)
else:
n_sensors, n_times = M.shape
n_step = int(ceil(n_times / float(tstep)))
n_freq = wsize // 2 + 1
Z = np.zeros((0, n_step * n_freq), dtype=np.complex)
X = np.zeros((0, n_times))
return X, Z, active_set, E
@verbose
def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
n_orient=1, maxit=200, tol=1e-8, log_objective=True,
debias=True, verbose=None):
"""Solves TF L21+L1 inverse solver with BCD and active set approach
Algorithm is detailed in:
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations
Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
DOI: 10.1016/j.neuroimage.2012.12.051.
Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries
Gramfort A., Strohmeier D., Haueisen J., Hamalainen M. and Kowalski M.
INFORMATION PROCESSING IN MEDICAL IMAGING
Lecture Notes in Computer Science, 2011, Volume 6801/2011,
600-611, DOI: 10.1007/978-3-642-22092-0_49
http://dx.doi.org/10.1007/978-3-642-22092-0_49
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha_space : float
The spatial regularization parameter. It should be between 0 and 100.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series.
wsize: int
length of the STFT window in samples (must be a multiple of 4).
tstep: int
step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
maxit : int
The number of iterations.
tol : float
If absolute difference between estimates at 2 successive iterations
is lower than tol, the convergence is reached.
log_objective : bool
If True, the value of the minimized objective function is computed
and stored at every iteration.
debias : bool
Debias source estimates.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function at each iteration. If log_objective
is False, it will be empty.
"""
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
n_positions = n_sources // n_orient
n_step = int(ceil(n_times / float(tstep)))
n_freq = wsize // 2 + 1
n_coefs = n_step * n_freq
phi = _Phi(wsize, tstep, n_coefs)
phiT = _PhiT(tstep, n_freq, n_step, n_times)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
logger.info("Using block coordinate descent and active set approach")
X, Z, active_set, E = _tf_mixed_norm_solver_bcd_active_set(
M, G, alpha_space, alpha_time, lc, phi, phiT, Z_init=None,
wsize=wsize, tstep=tstep, n_orient=n_orient, maxit=maxit, tol=tol,
log_objective=log_objective, verbose=None)
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
| bsd-3-clause |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/colors.py | 6 | 66672 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of colors called
a colormap. Colormapping typically involves two steps: a data array is first
mapped onto the range 0-1 using an instance of :class:`Normalize` or of a
subclass; then this number in the 0-1 range is mapped to a color using an
instance of a subclass of :class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all the built-in
colormap instances, but is also useful for making custom colormaps, and
:class:`ListedColormap`, which is used for generating a custom colormap from a
list of color specifications.
The module also provides functions for checking whether an object can be
interpreted as a color (:func:`is_color_like`), for converting such an object
to an RGBA tuple (:func:`to_rgba`) or to an HTML-like hex string in the
`#rrggbb` format (:func:`to_hex`), and a sequence of colors to an `(n, 4)`
RGBA array (:func:`to_rgba_array`). Caching is used for efficiency.
Commands which take color arguments can use several formats to specify
the colors. For the basic built-in colors, you can use a single letter
- `b`: blue
- `g`: green
- `r`: red
- `c`: cyan
- `m`: magenta
- `y`: yellow
- `k`: black
- `w`: white
To use the colors that are part of the active color cycle in the current style,
use `C` followed by a digit. For example:
- `C0`: The first color in the cycle
- `C1`: The second color in the cycle
Gray shades can be given as a string encoding a float in the 0-1 range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify the
color using an html hex string, as in::
color = '#eeefff'
(possibly specifying an alpha value as well), or you can pass an `(r, g, b)`
or `(r, g, b, a)` tuple, where each of `r`, `g`, `b` and `a` are in the range
[0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and 'chartreuse'
are supported.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import re
import six
from six.moves import zip
import warnings
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
from ._color_data import BASE_COLORS, TABLEAU_COLORS, CSS4_COLORS, XKCD_COLORS
class _ColorMapping(dict):
def __init__(self, mapping):
super(_ColorMapping, self).__init__(mapping)
self.cache = {}
def __setitem__(self, key, value):
super(_ColorMapping, self).__setitem__(key, value)
self.cache.clear()
def __delitem__(self, key, value):
super(_ColorMapping, self).__delitem__(key, value)
self.cache.clear()
_colors_full_map = {}
# Set by reverse priority order.
_colors_full_map.update(XKCD_COLORS)
_colors_full_map.update({k.replace('grey', 'gray'): v
for k, v in XKCD_COLORS.items()
if 'grey' in k})
_colors_full_map.update(CSS4_COLORS)
_colors_full_map.update(TABLEAU_COLORS)
_colors_full_map.update({k.replace('gray', 'grey'): v
for k, v in TABLEAU_COLORS.items()
if 'gray' in k})
_colors_full_map.update(BASE_COLORS)
_colors_full_map = _ColorMapping(_colors_full_map)
def get_named_colors_mapping():
"""Return the global mapping of names to named colors.
"""
return _colors_full_map
def _is_nth_color(c):
"""Return whether `c` can be interpreted as an item in the color cycle.
"""
return isinstance(c, six.string_types) and re.match(r"\AC[0-9]\Z", c)
def is_color_like(c):
"""Return whether `c` can be interpreted as an RGB(A) color.
"""
# Special-case nth color syntax because it cannot be parsed during
# setup.
if _is_nth_color(c):
return True
try:
to_rgba(c)
except ValueError:
return False
else:
return True
def to_rgba(c, alpha=None):
"""Convert `c` to an RGBA color.
If `alpha` is not `None`, it forces the alpha value, except if `c` is
"none" (case-insensitive), which always maps to `(0, 0, 0, 0)`.
"""
# Special-case nth color syntax because it should not be cached.
if _is_nth_color(c):
from matplotlib import rcParams
prop_cycler = rcParams['axes.prop_cycle']
colors = prop_cycler.by_key().get('color', ['k'])
c = colors[int(c[1]) % len(colors)]
try:
rgba = _colors_full_map.cache[c, alpha]
except (KeyError, TypeError): # Not in cache, or unhashable.
rgba = _to_rgba_no_colorcycle(c, alpha)
try:
_colors_full_map.cache[c, alpha] = rgba
except TypeError:
pass
return rgba
def _to_rgba_no_colorcycle(c, alpha=None):
"""Convert `c` to an RGBA color, with no support for color-cycle syntax.
If `alpha` is not `None`, it forces the alpha value, except if `c` is
"none" (case-insensitive), which always maps to `(0, 0, 0, 0)`.
"""
orig_c = c
if isinstance(c, six.string_types):
if c.lower() == "none":
return (0., 0., 0., 0.)
# Named color.
try:
# This may turn c into a non-string, so we check again below.
c = _colors_full_map[c.lower()]
except KeyError:
pass
if isinstance(c, six.string_types):
# hex color with no alpha.
match = re.match(r"\A#[a-fA-F0-9]{6}\Z", c)
if match:
return (tuple(int(n, 16) / 255
for n in [c[1:3], c[3:5], c[5:7]])
+ (alpha if alpha is not None else 1.,))
# hex color with alpha.
match = re.match(r"\A#[a-fA-F0-9]{8}\Z", c)
if match:
color = [int(n, 16) / 255
for n in [c[1:3], c[3:5], c[5:7], c[7:9]]]
if alpha is not None:
color[-1] = alpha
return tuple(color)
# string gray.
try:
return (float(c),) * 3 + (alpha if alpha is not None else 1.,)
except ValueError:
pass
raise ValueError("Invalid RGBA argument: {!r}".format(orig_c))
# tuple color.
# Python 2.7 / numpy 1.6 apparently require this to return builtin floats,
# not numpy floats.
try:
c = tuple(map(float, c))
except TypeError:
raise ValueError("Invalid RGBA argument: {!r}".format(orig_c))
if len(c) not in [3, 4]:
raise ValueError("RGBA sequence should have length 3 or 4")
if len(c) == 3 and alpha is None:
alpha = 1
if alpha is not None:
c = c[:3] + (alpha,)
if any(elem < 0 or elem > 1 for elem in c):
raise ValueError("RGBA values should be within 0-1 range")
return c
def to_rgba_array(c, alpha=None):
"""Convert `c` to a (n, 4) array of RGBA colors.
If `alpha` is not `None`, it forces the alpha value. If `c` is "none"
(case-insensitive) or an empty list, an empty array is returned.
"""
# Single value?
if isinstance(c, six.string_types) and c.lower() == "none":
return np.zeros((0, 4), float)
try:
return np.array([to_rgba(c, alpha)], float)
except (ValueError, TypeError):
pass
# Special-case inputs that are already arrays, for performance. (If the
# array has the wrong kind or shape, raise the error during one-at-a-time
# conversion.)
if (isinstance(c, np.ndarray) and c.dtype.kind in "if"
and c.ndim == 2 and c.shape[1] in [3, 4]):
if c.shape[1] == 3:
result = np.column_stack([c, np.zeros(len(c))])
result[:, -1] = alpha if alpha is not None else 1.
elif c.shape[1] == 4:
result = c.copy()
if alpha is not None:
result[:, -1] = alpha
if np.any((result < 0) | (result > 1)):
raise ValueError("RGBA values should be within 0-1 range")
return result
# Convert one at a time.
result = np.empty((len(c), 4), float)
for i, cc in enumerate(c):
result[i] = to_rgba(cc, alpha)
return result
def to_rgb(c):
"""Convert `c` to an RGB color, silently dropping the alpha channel.
"""
return to_rgba(c)[:3]
def to_hex(c, keep_alpha=False):
"""Convert `c` to a hex color.
Uses the #rrggbb format if `keep_alpha` is False (the default), `#rrggbbaa`
otherwise.
"""
c = to_rgba(c)
if not keep_alpha:
c = c[:3]
return "#" + "".join(format(int(np.round(val * 255)), "02x")
for val in c)
### Backwards-compatible color-conversion API
cnames = CSS4_COLORS
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def rgb2hex(c):
'Given an rgb or rgba sequence of 0-1 floats, return the hex string'
return to_hex(c)
def hex2color(c):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
return ColorConverter.to_rgb(c)
class ColorConverter(object):
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = _colors_full_map
cache = _colors_full_map.cache
@staticmethod
def to_rgb(arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a string representation of a float, like '0.4',
indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
return to_rgb(arg)
@staticmethod
def to_rgba(arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
In addition, if *arg* is "none" (case-insensitive),
then (0,0,0,0) will be returned.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
return to_rgba(arg, alpha)
@staticmethod
def to_rgba_array(arg, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
return to_rgba_array(arg, alpha)
colorConverter = ColorConverter()
### End of backwards-compatible color-conversion API
def makeMappingArray(N, data, gamma=1.0):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
Alternatively, data can be a function mapping values between 0 - 1
to 0 - 1.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
if six.callable(data):
xind = np.linspace(0, 1, N) ** gamma
lut = np.clip(np.array(data(xind), dtype=np.float), 0, 1)
return lut
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 or shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:, 0]
y0 = adata[:, 1]
y1 = adata[:, 2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x) - x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N - 1)
lut = np.zeros((N,), np.float)
xind = (N - 1) * np.linspace(0, 1, N) ** gamma
ind = np.searchsorted(x, xind)[1:-1]
distance = (xind[1:-1] - x[ind - 1]) / (x[ind] - x[ind - 1])
lut[1:-1] = distance * (y0[ind] - y1[ind - 1]) + y1[ind - 1]
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
return np.clip(lut, 0.0, 1.0)
class Colormap(object):
"""
Baseclass for all scalar to RGBA mappings.
Typically Colormap instances are used to convert data values (floats) from
the interval ``[0, 1]`` to the RGBA color that the respective Colormap
represents. For scaling of data into the ``[0, 1]`` interval see
:class:`matplotlib.colors.Normalize`. It is worth noting that
:class:`matplotlib.cm.ScalarMappable` subclasses make heavy use of this
``data->normalize->map-to-color`` processing chain.
"""
def __init__(self, name, N=256):
"""
Parameters
----------
name : str
The name of the colormap.
N : int
The number of rgb quantization levels.
"""
self.name = name
self.N = int(N) # ensure that N is always int
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = self.N
self._i_over = self.N + 1
self._i_bad = self.N + 2
self._isinit = False
#: When this colormap exists on a scalar mappable and colorbar_extend
#: is not False, colorbar creation will pick up ``colorbar_extend`` as
#: the default value for the ``extend`` keyword in the
#: :class:`matplotlib.colorbar.Colorbar` constructor.
self.colorbar_extend = False
def __call__(self, X, alpha=None, bytes=False):
"""
Parameters
----------
X : scalar, ndarray
The data value(s) to convert to RGBA.
For floats, X should be in the interval ``[0.0, 1.0]`` to
return the RGBA values ``X*100`` percent along the Colormap line.
For integers, X should be in the interval ``[0, Colormap.N)`` to
return RGBA values *indexed* from the Colormap with index ``X``.
alpha : float, None
Alpha must be a scalar between 0 and 1, or None.
bytes : bool
If False (default), the returned RGBA values will be floats in the
interval ``[0, 1]`` otherwise they will be uint8s in the interval
``[0, 255]``.
Returns
-------
Tuple of RGBA values if X is scalar, othewise an array of
RGBA values with a shape of ``X.shape + (4, )``.
"""
# See class docstring for arg/kwarg documentation.
if not self._isinit:
self._init()
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.array(X, copy=True) # Copy here to avoid side effects.
mask_bad = xma.mask # Mask will be used below.
xa = xma.filled() # Fill to avoid infs, etc.
del xma
# Calculations with native byteorder are faster, and avoid a
# bug that otherwise can occur with putmask when the last
# argument is a numpy scalar.
if not xa.dtype.isnative:
xa = xa.byteswap().newbyteorder()
if xa.dtype.kind == "f":
# Treat 1.0 as slightly less than 1.
vals = np.array([1, 0], dtype=xa.dtype)
almost_one = np.nextafter(*vals)
cbook._putmask(xa, xa == 1.0, almost_one)
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
xa *= self.N
np.clip(xa, -1, self.N, out=xa)
# ensure that all 'under' values will still have negative
# value after casting to int
cbook._putmask(xa, xa < 0.0, -1)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
cbook._putmask(xa, xa > self.N - 1, self._i_over)
cbook._putmask(xa, xa < 0, self._i_under)
if mask_bad is not None:
if mask_bad.shape == xa.shape:
cbook._putmask(xa, mask_bad, self._i_bad)
elif mask_bad:
xa.fill(self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut.copy() # Don't let alpha modify original _lut.
if alpha is not None:
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
if bytes:
alpha = int(alpha * 255)
if (lut[-1] == 0).all():
lut[:-1, -1] = alpha
# All zeros is taken as a flag for the default bad
# color, which is no color--fully transparent. We
# don't want to override this.
else:
lut[:, -1] = alpha
# If the bad value is set to have a color, then we
# override its alpha just as for any other value.
rgba = np.empty(shape=xa.shape + (4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
if vtype == 'scalar':
rgba = tuple(rgba[0, :])
return rgba
def set_bad(self, color='k', alpha=None):
"""Set color to be used for masked values.
"""
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def set_under(self, color='k', alpha=None):
"""Set color to be used for low out-of-range values.
Requires norm.clip = False
"""
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def set_over(self, color='k', alpha=None):
"""Set color to be used for high out-of-range values.
Requires norm.clip = False
"""
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N - 1]
self._lut[self._i_bad] = self._rgba_bad
def _init(self):
"""Generate the lookup table, self._lut"""
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit:
self._init()
return (np.alltrue(self._lut[:, 0] == self._lut[:, 1]) and
np.alltrue(self._lut[:, 0] == self._lut[:, 2]))
def _resample(self, lutsize):
"""
Return a new color map with *lutsize* entries.
"""
raise NotImplementedError()
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256, gamma=1.0):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table. Entries for alpha are optional.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:meth:`LinearSegmentedColormap.from_list`
Static method; factory function for generating a
smoothly-varying LinearSegmentedColormap.
:func:`makeMappingArray`
For information about making a mapping array.
"""
# True only if all colors in map are identical; needed for contouring.
self.monochrome = False
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
self._gamma = gamma
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(
self.N, self._segmentdata['red'], self._gamma)
self._lut[:-3, 1] = makeMappingArray(
self.N, self._segmentdata['green'], self._gamma)
self._lut[:-3, 2] = makeMappingArray(
self.N, self._segmentdata['blue'], self._gamma)
if 'alpha' in self._segmentdata:
self._lut[:-3, 3] = makeMappingArray(
self.N, self._segmentdata['alpha'], 1)
self._isinit = True
self._set_extremes()
def set_gamma(self, gamma):
"""
Set a new gamma value and regenerate color map.
"""
self._gamma = gamma
self._init()
@staticmethod
def from_list(name, colors, N=256, gamma=1.0):
"""
Make a linear segmented colormap with *name* from a sequence
of *colors* which evenly transitions from colors[0] at val=0
to colors[-1] at val=1. *N* is the number of rgb quantization
levels.
Alternatively, a list of (value, color) tuples can be given
to divide the range unevenly.
"""
if not cbook.iterable(colors):
raise ValueError('colors must be iterable')
if cbook.iterable(colors[0]) and len(colors[0]) == 2 and \
not cbook.is_string_like(colors[0]):
# List of value, color pairs
vals, colors = list(zip(*colors))
else:
vals = np.linspace(0., 1., len(colors))
cdict = dict(red=[], green=[], blue=[], alpha=[])
for val, color in zip(vals, colors):
r, g, b, a = colorConverter.to_rgba(color)
cdict['red'].append((val, r, r))
cdict['green'].append((val, g, g))
cdict['blue'].append((val, b, b))
cdict['alpha'].append((val, a, a))
return LinearSegmentedColormap(name, cdict, N, gamma)
def _resample(self, lutsize):
"""
Return a new color map with *lutsize* entries.
"""
return LinearSegmentedColormap(self.name, self._segmentdata, lutsize)
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name='from_list', N=None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 or Nx4 floating point array
(*N* rgb or rgba values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are
# identical; needed for contouring.
if N is None:
N = len(self.colors)
else:
if (cbook.is_string_like(self.colors) and
cbook.is_hashable(self.colors)):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try:
gray = float(self.colors)
except TypeError:
pass
else:
self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgba = colorConverter.to_rgba_array(self.colors)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3] = rgba
self._isinit = True
self._set_extremes()
def _resample(self, lutsize):
"""
Return a new color map with *lutsize* entries.
"""
colors = self(np.linspace(0, 1, lutsize))
return ListedColormap(colors, name=self.name)
class Normalize(object):
"""
A class which, when called, can normalize data into
the ``[0.0, 1.0]`` interval.
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are initialized from the
minimum and maximum value respectively of the first input
processed. That is, *__call__(A)* calls *autoscale_None(A)*.
If *clip* is *True* and the given value falls outside the range,
the returned value will be 0 or 1, whichever is closer.
Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
@staticmethod
def process_value(value):
"""
Homogenize the input *value* for easy and efficient normalization.
*value* can be a scalar or sequence.
Returns *result*, *is_scalar*, where *result* is a
masked array matching *value*. Float dtypes are preserved;
integer types with two bytes or smaller are converted to
np.float32, and larger types are converted to np.float.
Preserving float32 when possible, and using in-place operations,
can greatly improve speed for large arrays.
Experimental; we may want to add an option to force the
use of float32.
"""
is_scalar = not cbook.iterable(value)
if is_scalar:
value = [value]
dtype = np.min_scalar_type(value)
if np.issubdtype(dtype, np.integer) or dtype.type is np.bool_:
# bool_/int8/int16 -> float32; int32/int64 -> float64
dtype = np.promote_types(dtype, np.float32)
result = np.ma.array(value, dtype=dtype, copy=True)
return result, is_scalar
def __call__(self, value, clip=None):
"""
Normalize *value* data in the ``[vmin, vmax]`` interval into
the ``[0.0, 1.0]`` interval and return it. *clip* defaults
to *self.clip* (which defaults to *False*). If not already
initialized, *vmin* and *vmax* are initialized using
*autoscale_None(value)*.
"""
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
# Convert at least to float, without losing precision.
(vmin,), _ = self.process_value(self.vmin)
(vmax,), _ = self.process_value(self.vmax)
if vmin == vmax:
result.fill(0) # Or should it be all masked? Or 0.5?
elif vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
else:
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# ma division is very slow; we can take a shortcut
# use np.asarray so data passed in as an ndarray subclass are
# interpreted as an ndarray. See issue #6622.
resdat = np.asarray(result.data)
resdat -= vmin
resdat /= (vmax - vmin)
result = np.ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
(vmin,), _ = self.process_value(self.vmin)
(vmax,), _ = self.process_value(self.vmax)
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
self.vmin = ma.min(A)
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None and np.size(A) > 0:
self.vmin = ma.min(A)
if self.vmax is None and np.size(A) > 0:
self.vmax = ma.max(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
result = ma.masked_less_equal(result, 0, copy=False)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin <= 0:
raise ValueError("values must all be positive")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# in-place equivalent of above can be much faster
resdat = result.data
mask = result.mask
if mask is np.ma.nomask:
mask = (resdat <= 0)
else:
mask |= resdat <= 0
cbook._putmask(resdat, mask, 1)
np.log(resdat, resdat)
resdat -= np.log(vmin)
resdat /= (np.log(vmax) - np.log(vmin))
result = np.ma.array(resdat, mask=mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax / vmin), val)
else:
return vmin * pow((vmax / vmin), value)
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
A = ma.masked_less_equal(A, 0, copy=False)
self.vmin = ma.min(A)
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is not None and self.vmax is not None:
return
A = ma.masked_less_equal(A, 0, copy=False)
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
class SymLogNorm(Normalize):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
"""
def __init__(self, linthresh, linscale=1.0,
vmin=None, vmax=None, clip=False):
"""
*linthresh*:
The range within which the plot is linear (to
avoid having the plot go to infinity around zero).
*linscale*:
This allows the linear range (-*linthresh* to *linthresh*)
to be stretched relative to the logarithmic range. Its
value is the number of decades to use for each half of the
linear range. For example, when *linscale* == 1.0 (the
default), the space used for the positive and negative
halves of the linear range will be equal to one decade in
the logarithmic range. Defaults to 1.
"""
Normalize.__init__(self, vmin, vmax, clip)
self.linthresh = float(linthresh)
self._linscale_adj = (linscale / (1.0 - np.e ** -1))
if vmin is not None and vmax is not None:
self._transform_vmin_vmax()
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# in-place equivalent of above can be much faster
resdat = self._transform(result.data)
resdat -= self._lower
resdat /= (self._upper - self._lower)
if is_scalar:
result = result[0]
return result
def _transform(self, a):
"""
Inplace transformation.
"""
masked = np.abs(a) > self.linthresh
sign = np.sign(a[masked])
log = (self._linscale_adj + np.log(np.abs(a[masked]) / self.linthresh))
log *= sign * self.linthresh
a[masked] = log
a[~masked] *= self._linscale_adj
return a
def _inv_transform(self, a):
"""
Inverse inplace Transformation.
"""
masked = np.abs(a) > (self.linthresh * self._linscale_adj)
sign = np.sign(a[masked])
exp = np.exp(sign * a[masked] / self.linthresh - self._linscale_adj)
exp *= sign * self.linthresh
a[masked] = exp
a[~masked] /= self._linscale_adj
return a
def _transform_vmin_vmax(self):
"""
Calculates vmin and vmax in the transformed system.
"""
vmin, vmax = self.vmin, self.vmax
arr = np.array([vmax, vmin]).astype(np.float)
self._upper, self._lower = self._transform(arr)
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
val = ma.asarray(value)
val = val * (self._upper - self._lower) + self._lower
return self._inv_transform(val)
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
self.vmin = ma.min(A)
self.vmax = ma.max(A)
self._transform_vmin_vmax()
def autoscale_None(self, A):
""" autoscale only None-valued vmin or vmax """
if self.vmin is not None and self.vmax is not None:
pass
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
self._transform_vmin_vmax()
class PowerNorm(Normalize):
"""
Normalize a given value to the ``[0, 1]`` interval with a power-law
scaling. This will clip any negative data points to 0.
"""
def __init__(self, gamma, vmin=None, vmax=None, clip=False):
Normalize.__init__(self, vmin, vmax, clip)
self.gamma = gamma
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
gamma = self.gamma
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result.fill(0)
else:
res_mask = result.data < 0
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
resdat = result.data
resdat -= vmin
np.power(resdat, gamma, resdat)
resdat /= (vmax - vmin) ** gamma
result = np.ma.array(resdat, mask=result.mask, copy=False)
result[res_mask] = 0
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
gamma = self.gamma
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return ma.power(val, 1. / gamma) * (vmax - vmin) + vmin
else:
return pow(value, 1. / gamma) * (vmax - vmin) + vmin
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
self.vmin = ma.min(A)
if self.vmin < 0:
self.vmin = 0
warnings.warn("Power-law scaling on negative values is "
"ill-defined, clamping to 0.")
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None and np.size(A) > 0:
self.vmin = ma.min(A)
if self.vmin < 0:
self.vmin = 0
warnings.warn("Power-law scaling on negative values is "
"ill-defined, clamping to 0.")
if self.vmax is None and np.size(A) > 0:
self.vmax = ma.max(A)
class BoundaryNorm(Normalize):
"""
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
"""
def __init__(self, boundaries, ncolors, clip=False):
"""
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped
to -1 if low and ncolors if high; these are converted
to valid indices by
:meth:`Colormap.__call__` .
If clip == True, out-of-range values
are mapped to 0 if low and ncolors-1 if high.
"""
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N - 1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
xx, is_scalar = self.process_value(value)
mask = ma.getmaskarray(xx)
xx = np.atleast_1d(xx.filled(self.vmax + 1))
if clip:
np.clip(xx, self.vmin, self.vmax, out=xx)
max_col = self.Ncmap - 1
else:
max_col = self.Ncmap
iret = np.zeros(xx.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx >= b] = i
if self._interp:
scalefac = float(self.Ncmap - 1) / (self.N - 2)
iret = (iret * scalefac).astype(np.int16)
iret[xx < self.vmin] = -1
iret[xx >= self.vmax] = max_col
ret = ma.array(iret, mask=mask)
if is_scalar:
ret = int(ret[0]) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
"""
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
"""
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
def rgb_to_hsv(arr):
"""
convert float rgb values (in the range [0, 1]), in a numpy array to hsv
values.
Parameters
----------
arr : (..., 3) array-like
All values must be in the range [0, 1]
Returns
-------
hsv : (..., 3) ndarray
Colors converted to hsv values in range [0, 1]
"""
# make sure it is an ndarray
arr = np.asarray(arr)
# check length of the last dimension, should be _some_ sort of rgb
if arr.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {shp} was found.".format(shp=arr.shape))
in_ndim = arr.ndim
if arr.ndim == 1:
arr = np.array(arr, ndmin=2)
# make sure we don't have an int image
if arr.dtype.kind in ('iu'):
arr = arr.astype(np.float32)
out = np.zeros_like(arr)
arr_max = arr.max(-1)
ipos = arr_max > 0
delta = arr.ptp(-1)
s = np.zeros_like(delta)
s[ipos] = delta[ipos] / arr_max[ipos]
ipos = delta > 0
# red is max
idx = (arr[..., 0] == arr_max) & ipos
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[..., 1] == arr_max) & ipos
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[..., 2] == arr_max) & ipos
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out[..., 0] = (out[..., 0] / 6.0) % 1.0
out[..., 1] = s
out[..., 2] = arr_max
if in_ndim == 1:
out.shape = (3,)
return out
def hsv_to_rgb(hsv):
"""
convert hsv values in a numpy array to rgb values
all values assumed to be in range [0, 1]
Parameters
----------
hsv : (..., 3) array-like
All values assumed to be in range [0, 1]
Returns
-------
rgb : (..., 3) ndarray
Colors converted to RGB values in range [0, 1]
"""
hsv = np.asarray(hsv)
# check length of the last dimension, should be _some_ sort of rgb
if hsv.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {shp} was found.".format(shp=hsv.shape))
# if we got pased a 1D array, try to treat as
# a single color and reshape as needed
in_ndim = hsv.ndim
if in_ndim == 1:
hsv = np.array(hsv, ndmin=2)
# make sure we don't have an int image
if hsv.dtype.kind in ('iu'):
hsv = hsv.astype(np.float32)
h = hsv[..., 0]
s = hsv[..., 1]
v = hsv[..., 2]
r = np.empty_like(h)
g = np.empty_like(h)
b = np.empty_like(h)
i = (h * 6.0).astype(np.int)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
idx = i % 6 == 0
r[idx] = v[idx]
g[idx] = t[idx]
b[idx] = p[idx]
idx = i == 1
r[idx] = q[idx]
g[idx] = v[idx]
b[idx] = p[idx]
idx = i == 2
r[idx] = p[idx]
g[idx] = v[idx]
b[idx] = t[idx]
idx = i == 3
r[idx] = p[idx]
g[idx] = q[idx]
b[idx] = v[idx]
idx = i == 4
r[idx] = t[idx]
g[idx] = p[idx]
b[idx] = v[idx]
idx = i == 5
r[idx] = v[idx]
g[idx] = p[idx]
b[idx] = q[idx]
idx = s == 0
r[idx] = v[idx]
g[idx] = v[idx]
b[idx] = v[idx]
rgb = np.empty_like(hsv)
rgb[..., 0] = r
rgb[..., 1] = g
rgb[..., 2] = b
if in_ndim == 1:
rgb.shape = (3, )
return rgb
class LightSource(object):
"""
Create a light source coming from the specified azimuth and elevation.
Angles are in degrees, with the azimuth measured
clockwise from north and elevation up from the zero plane of the surface.
The :meth:`shade` is used to produce "shaded" rgb values for a data array.
:meth:`shade_rgb` can be used to combine an rgb image with
The :meth:`shade_rgb`
The :meth:`hillshade` produces an illumination map of a surface.
"""
def __init__(self, azdeg=315, altdeg=45, hsv_min_val=0, hsv_max_val=1,
hsv_min_sat=1, hsv_max_sat=0):
"""
Specify the azimuth (measured clockwise from south) and altitude
(measured up from the plane of the surface) of the light source
in degrees.
Parameters
----------
azdeg : number, optional
The azimuth (0-360, degrees clockwise from North) of the light
source. Defaults to 315 degrees (from the northwest).
altdeg : number, optional
The altitude (0-90, degrees up from horizontal) of the light
source. Defaults to 45 degrees from horizontal.
Notes
-----
For backwards compatibility, the parameters *hsv_min_val*,
*hsv_max_val*, *hsv_min_sat*, and *hsv_max_sat* may be supplied at
initialization as well. However, these parameters will only be used if
"blend_mode='hsv'" is passed into :meth:`shade` or :meth:`shade_rgb`.
See the documentation for :meth:`blend_hsv` for more details.
"""
self.azdeg = azdeg
self.altdeg = altdeg
self.hsv_min_val = hsv_min_val
self.hsv_max_val = hsv_max_val
self.hsv_min_sat = hsv_min_sat
self.hsv_max_sat = hsv_max_sat
def hillshade(self, elevation, vert_exag=1, dx=1, dy=1, fraction=1.):
"""
Calculates the illumination intensity for a surface using the defined
azimuth and elevation for the light source.
Imagine an artificial sun placed at infinity in some azimuth and
elevation position illuminating our surface. The parts of the surface
that slope toward the sun should brighten while those sides facing away
should become darker.
Parameters
----------
elevation : array-like
A 2d array (or equivalent) of the height values used to generate an
illumination map
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs meters) or to exaggerate
or de-emphasize topographic effects.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
fraction : number, optional
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
Returns
-------
intensity : ndarray
A 2d array of illumination values between 0-1, where 0 is
completely in shadow and 1 is completely illuminated.
"""
# Azimuth is in degrees clockwise from North. Convert to radians
# counterclockwise from East (mathematical notation).
az = np.radians(90 - self.azdeg)
alt = np.radians(self.altdeg)
# Because most image and raster GIS data has the first row in the array
# as the "top" of the image, dy is implicitly negative. This is
# consistent to what `imshow` assumes, as well.
dy = -dy
# Calculate the intensity from the illumination angle
dy, dx = np.gradient(vert_exag * elevation, dy, dx)
# The aspect is defined by the _downhill_ direction, thus the negative
aspect = np.arctan2(-dy, -dx)
slope = 0.5 * np.pi - np.arctan(np.hypot(dx, dy))
intensity = (np.sin(alt) * np.sin(slope) +
np.cos(alt) * np.cos(slope) * np.cos(az - aspect))
# Apply contrast stretch
imin, imax = intensity.min(), intensity.max()
intensity *= fraction
# Rescale to 0-1, keeping range before contrast stretch
# If constant slope, keep relative scaling (i.e. flat should be 0.5,
# fully occluded 0, etc.)
if (imax - imin) > 1e-6:
# Strictly speaking, this is incorrect. Negative values should be
# clipped to 0 because they're fully occluded. However, rescaling
# in this manner is consistent with the previous implementation and
# visually appears better than a "hard" clip.
intensity -= imin
intensity /= (imax - imin)
intensity = np.clip(intensity, 0, 1, intensity)
return intensity
def shade(self, data, cmap, norm=None, blend_mode='overlay', vmin=None,
vmax=None, vert_exag=1, dx=1, dy=1, fraction=1, **kwargs):
"""
Combine colormapped data values with an illumination intensity map
(a.k.a. "hillshade") of the values.
Parameters
----------
data : array-like
A 2d array (or equivalent) of the height values used to generate a
shaded map.
cmap : `~matplotlib.colors.Colormap` instance
The colormap used to color the *data* array. Note that this must be
a `~matplotlib.colors.Colormap` instance. For example, rather than
passing in `cmap='gist_earth'`, use
`cmap=plt.get_cmap('gist_earth')` instead.
norm : `~matplotlib.colors.Normalize` instance, optional
The normalization used to scale values before colormapping. If
None, the input will be linearly scaled between its min and max.
blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional
The type of blending used to combine the colormapped data
values with the illumination intensity. Default is
"overlay". Note that for most topographic surfaces,
"overlay" or "soft" appear more visually realistic. If a
user-defined function is supplied, it is expected to
combine an MxNx3 RGB array of floats (ranging 0 to 1) with
an MxNx1 hillshade array (also 0 to 1). (Call signature
`func(rgb, illum, **kwargs)`) Additional kwargs supplied
to this function will be passed on to the *blend_mode*
function.
vmin : scalar or None, optional
The minimum value used in colormapping *data*. If *None* the
minimum value in *data* is used. If *norm* is specified, then this
argument will be ignored.
vmax : scalar or None, optional
The maximum value used in colormapping *data*. If *None* the
maximum value in *data* is used. If *norm* is specified, then this
argument will be ignored.
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs meters) or to exaggerate
or de-emphasize topography.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
fraction : number, optional
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
Additional kwargs are passed on to the *blend_mode* function.
Returns
-------
rgba : ndarray
An MxNx4 array of floats ranging between 0-1.
"""
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
if norm is None:
norm = Normalize(vmin=vmin, vmax=vmax)
rgb0 = cmap(norm(data))
rgb1 = self.shade_rgb(rgb0, elevation=data, blend_mode=blend_mode,
vert_exag=vert_exag, dx=dx, dy=dy,
fraction=fraction, **kwargs)
# Don't overwrite the alpha channel, if present.
rgb0[..., :3] = rgb1[..., :3]
return rgb0
def shade_rgb(self, rgb, elevation, fraction=1., blend_mode='hsv',
vert_exag=1, dx=1, dy=1, **kwargs):
"""
Take the input RGB array (ny*nx*3) adjust their color values
to given the impression of a shaded relief map with a
specified light source using the elevation (ny*nx).
A new RGB array ((ny*nx*3)) is returned.
Parameters
----------
rgb : array-like
An MxNx3 RGB array, assumed to be in the range of 0 to 1.
elevation : array-like
A 2d array (or equivalent) of the height values used to generate a
shaded map.
fraction : number
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional
The type of blending used to combine the colormapped data values
with the illumination intensity. For backwards compatibility, this
defaults to "hsv". Note that for most topographic surfaces,
"overlay" or "soft" appear more visually realistic. If a
user-defined function is supplied, it is expected to combine an
MxNx3 RGB array of floats (ranging 0 to 1) with an MxNx1 hillshade
array (also 0 to 1). (Call signature `func(rgb, illum, **kwargs)`)
Additional kwargs supplied to this function will be passed on to
the *blend_mode* function.
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs meters) or to exaggerate
or de-emphasize topography.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
Additional kwargs are passed on to the *blend_mode* function.
Returns
-------
shaded_rgb : ndarray
An MxNx3 array of floats ranging between 0-1.
"""
# Calculate the "hillshade" intensity.
intensity = self.hillshade(elevation, vert_exag, dx, dy, fraction)
intensity = intensity[..., np.newaxis]
# Blend the hillshade and rgb data using the specified mode
lookup = {
'hsv': self.blend_hsv,
'soft': self.blend_soft_light,
'overlay': self.blend_overlay,
}
if blend_mode in lookup:
blend = lookup[blend_mode](rgb, intensity, **kwargs)
else:
try:
blend = blend_mode(rgb, intensity, **kwargs)
except TypeError:
msg = '"blend_mode" must be callable or one of {0}'
raise ValueError(msg.format(lookup.keys))
# Only apply result where hillshade intensity isn't masked
if hasattr(intensity, 'mask'):
mask = intensity.mask[..., 0]
for i in range(3):
blend[..., i][mask] = rgb[..., i][mask]
return blend
def blend_hsv(self, rgb, intensity, hsv_max_sat=None, hsv_max_val=None,
hsv_min_val=None, hsv_min_sat=None):
"""
Take the input data array, convert to HSV values in the given colormap,
then adjust those color values to give the impression of a shaded
relief map with a specified light source. RGBA values are returned,
which can then be used to plot the shaded image with imshow.
The color of the resulting image will be darkened by moving the (s,v)
values (in hsv colorspace) toward (hsv_min_sat, hsv_min_val) in the
shaded regions, or lightened by sliding (s,v) toward (hsv_max_sat
hsv_max_val) in regions that are illuminated. The default extremes are
chose so that completely shaded points are nearly black (s = 1, v = 0)
and completely illuminated points are nearly white (s = 0, v = 1).
Parameters
----------
rgb : ndarray
An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
intensity : ndarray
An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
hsv_max_sat : number, optional
The maximum saturation value that the *intensity* map can shift the
output image to. Defaults to 1.
hsv_min_sat : number, optional
The minimum saturation value that the *intensity* map can shift the
output image to. Defaults to 0.
hsv_max_val : number, optional
The maximum value ("v" in "hsv") that the *intensity* map can shift
the output image to. Defaults to 1.
hsv_min_val: number, optional
The minimum value ("v" in "hsv") that the *intensity* map can shift
the output image to. Defaults to 0.
Returns
-------
rgb : ndarray
An MxNx3 RGB array representing the combined images.
"""
# Backward compatibility...
if hsv_max_sat is None:
hsv_max_sat = self.hsv_max_sat
if hsv_max_val is None:
hsv_max_val = self.hsv_max_val
if hsv_min_sat is None:
hsv_min_sat = self.hsv_min_sat
if hsv_min_val is None:
hsv_min_val = self.hsv_min_val
# Expects a 2D intensity array scaled between -1 to 1...
intensity = intensity[..., 0]
intensity = 2 * intensity - 1
# convert to rgb, then rgb to hsv
hsv = rgb_to_hsv(rgb[:, :, 0:3])
# modify hsv values to simulate illumination.
hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,
intensity > 0),
((1. - intensity) * hsv[:, :, 1] +
intensity * hsv_max_sat),
hsv[:, :, 1])
hsv[:, :, 2] = np.where(intensity > 0,
((1. - intensity) * hsv[:, :, 2] +
intensity * hsv_max_val),
hsv[:, :, 2])
hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,
intensity < 0),
((1. + intensity) * hsv[:, :, 1] -
intensity * hsv_min_sat),
hsv[:, :, 1])
hsv[:, :, 2] = np.where(intensity < 0,
((1. + intensity) * hsv[:, :, 2] -
intensity * hsv_min_val),
hsv[:, :, 2])
hsv[:, :, 1:] = np.where(hsv[:, :, 1:] < 0., 0, hsv[:, :, 1:])
hsv[:, :, 1:] = np.where(hsv[:, :, 1:] > 1., 1, hsv[:, :, 1:])
# convert modified hsv back to rgb.
return hsv_to_rgb(hsv)
def blend_soft_light(self, rgb, intensity):
"""
Combines an rgb image with an intensity map using "soft light"
blending. Uses the "pegtop" formula.
Parameters
----------
rgb : ndarray
An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
intensity : ndarray
An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
Returns
-------
rgb : ndarray
An MxNx3 RGB array representing the combined images.
"""
return 2 * intensity * rgb + (1 - 2 * intensity) * rgb**2
def blend_overlay(self, rgb, intensity):
"""
Combines an rgb image with an intensity map using "overlay" blending.
Parameters
----------
rgb : ndarray
An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
intensity : ndarray
An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
Returns
-------
rgb : ndarray
An MxNx3 RGB array representing the combined images.
"""
low = 2 * intensity * rgb
high = 1 - 2 * (1 - intensity) * (1 - rgb)
return np.where(rgb <= 0.5, low, high)
def from_levels_and_colors(levels, colors, extend='neither'):
"""
A helper routine to generate a cmap and a norm instance which
behave similar to contourf's levels and colors arguments.
Parameters
----------
levels : sequence of numbers
The quantization levels used to construct the :class:`BoundaryNorm`.
Values ``v`` are quantizized to level ``i`` if
``lev[i] <= v < lev[i+1]``.
colors : sequence of colors
The fill color to use for each level. If `extend` is "neither" there
must be ``n_level - 1`` colors. For an `extend` of "min" or "max" add
one extra color, and for an `extend` of "both" add two colors.
extend : {'neither', 'min', 'max', 'both'}, optional
The behaviour when a value falls out of range of the given levels.
See :func:`~matplotlib.pyplot.contourf` for details.
Returns
-------
(cmap, norm) : tuple containing a :class:`Colormap` and a \
:class:`Normalize` instance
"""
colors_i0 = 0
colors_i1 = None
if extend == 'both':
colors_i0 = 1
colors_i1 = -1
extra_colors = 2
elif extend == 'min':
colors_i0 = 1
extra_colors = 1
elif extend == 'max':
colors_i1 = -1
extra_colors = 1
elif extend == 'neither':
extra_colors = 0
else:
raise ValueError('Unexpected value for extend: {0!r}'.format(extend))
n_data_colors = len(levels) - 1
n_expected_colors = n_data_colors + extra_colors
if len(colors) != n_expected_colors:
raise ValueError('With extend == {0!r} and n_levels == {1!r} expected'
' n_colors == {2!r}. Got {3!r}.'
''.format(extend, len(levels), n_expected_colors,
len(colors)))
cmap = ListedColormap(colors[colors_i0:colors_i1], N=n_data_colors)
if extend in ['min', 'both']:
cmap.set_under(colors[0])
else:
cmap.set_under('none')
if extend in ['max', 'both']:
cmap.set_over(colors[-1])
else:
cmap.set_over('none')
cmap.colorbar_extend = extend
norm = BoundaryNorm(levels, ncolors=n_data_colors)
return cmap, norm
| apache-2.0 |
kvasukib/groupflow_simulator | groupflow_scripts/tree_aggregation_simulations/tree_aggregation.py | 2 | 37710 | from scipy.stats import truncnorm, tstd, poisson, expon
from numpy.random import randint, uniform
from datetime import datetime
from collections import defaultdict
from sets import Set
from heapq import heappop, heappush
from time import time
from scipy.cluster.hierarchy import *
from scipy.spatial.distance import pdist
import scipy.spatial.distance as ssd
import os
import sys
import signal
import numpy as np
import matplotlib.pyplot as plt
MIN_SUM_TREE_LEN = False
def get_cluster_group_aggregation(group_indexes, linkage_array, difference_threshold):
group_map = {}
for group_index in group_indexes:
group_map[group_index] = [group_index]
next_cluster_index = len(group_indexes)
for cluster in linkage_array:
if cluster[2] > difference_threshold:
break
new_cluster_list = []
for index in group_map[cluster[0]]:
new_cluster_list.append(index)
for index in group_map[cluster[1]]:
new_cluster_list.append(index)
del group_map[cluster[0]]
del group_map[cluster[1]]
group_map[next_cluster_index] = new_cluster_list
next_cluster_index += 1
#print 'Group Aggregations for Difference Threshold: ' + str(difference_threshold)
#for cluster_index in group_map:
# print 'Cluster Index: ' + str(cluster_index)
# for group_index in group_map[cluster_index]:
# print str(group_index) + ' ',
# print ' '
return group_map
def calc_best_rendevouz_point(topology, mcast_groups):
aggregated_group_receivers = []
for group in mcast_groups:
for receiver_id in group.receiver_ids:
aggregated_group_receivers.append(receiver_id)
aggregated_group_receivers = list(Set(aggregated_group_receivers))
min_sum_path_length = sys.maxint
rv_node_id = None
for forwarding_element in topology.forwarding_elements:
no_rendevouz_path = False
potential_rv_node_id = forwarding_element.node_id
sum_path_length = 0
if MIN_SUM_TREE_LEN:
sum_path_length = len(topology.get_shortest_path_tree(potential_rv_node_id, aggregated_group_receivers))
for mcast_group in mcast_groups:
src_node_id = mcast_group.src_node_id
shortest_path = topology.get_shortest_path_tree(src_node_id, [potential_rv_node_id])
if shortest_path is None:
no_rendevouz_path = True
break
sum_path_length = sum_path_length + len(shortest_path)
if no_rendevouz_path:
continue
if sum_path_length <= min_sum_path_length:
# Optimization - If multiple forwarding elements have the same sum rendevouz path length, choose
# the rendevouz point that results in the shortest distribution tree (in number of hops)
if sum_path_length == min_sum_path_length:
aggregated_mcast_tree_new = topology.get_shortest_path_tree(potential_rv_node_id, aggregated_group_receivers)
aggregated_mcast_tree_old = topology.get_shortest_path_tree(rv_node_id, aggregated_group_receivers)
if len(aggregated_mcast_tree_new) > len(aggregated_mcast_tree_old):
# print 'Chose old rendevouz point'
continue
# print 'Chose new rendevouz point'
min_sum_path_length = sum_path_length
rv_node_id = potential_rv_node_id
return rv_node_id, aggregated_group_receivers
def aggregate_groups_via_tree_sim(topology, mcast_groups, bandwidth_overhead_threshold):
group_map = defaultdict(lambda : None)
next_agg_tree_index = 0
for group in mcast_groups:
if len(group_map) == 0:
# This is the first group to initialize, always uses a native multicast tree
group.rendevouz_point_node_id = group.src_node_id
group.rendevouz_point_shortest_path = []
group.aggregated_mcast_tree = topology.get_shortest_path_tree(group.src_node_id, list(group.receiver_ids))
group.aggregated_mcast_tree_index = next_agg_tree_index
group.aggregated_bandwidth_Mbps = len(group.aggregated_mcast_tree) * group.bandwidth_Mbps
group_map[next_agg_tree_index] = [group.group_index]
next_agg_tree_index += 1
continue
# If this is not the first group, iterate through all existing aggregated trees, and check if any can be extended
# to cover the group without exceeding the bandwidth overhead threshold
final_aggregated_groups = None
final_aggregated_tree_index = None
final_aggregated_mcast_tree = None
final_rv_node_id = None
final_aggregated_bandwidth_overhead = None
for agg_tree_index in group_map:
test_aggregated_groups = [group]
for group_index in group_map[agg_tree_index]:
test_aggregated_groups.append(mcast_groups[group_index])
rv_node_id, aggregated_group_receivers = calc_best_rendevouz_point(topology, test_aggregated_groups)
aggregated_mcast_tree = topology.get_shortest_path_tree(rv_node_id, aggregated_group_receivers)
# Got a rendevouz node for this potential aggregation, now calculate the bandwidth overhead of this potential aggregation
native_bandwidth_Mbps = 0
aggregated_bandwidth_Mbps = 0
for test_group in test_aggregated_groups:
rv_path = topology.get_shortest_path_tree(test_group.src_node_id, [rv_node_id])
native_bandwidth_Mbps += test_group.native_bandwidth_Mbps
aggregated_bandwidth_Mbps += ((len(aggregated_mcast_tree) + len(rv_path)) * test_group.bandwidth_Mbps)
bandwidth_overhead_ratio = float(aggregated_bandwidth_Mbps) / native_bandwidth_Mbps;
# Note: When using shortest path trees, this is not neccesarily an error condition. If steiner trees were used, this would not be possible
#if bandwidth_overhead_ratio < 1:
# print '\n====== ERROR: bandwidth overhead ratio less than 1! (Native: ' + str(native_bandwidth_Mbps) + ' Mbps Aggregated: ' + str(aggregated_bandwidth_Mbps) + ' Mbps)'
# for test_group in test_aggregated_groups:
# print '\nGroup Src: ' + str(test_group.src_node_id) + ' Receivers: ' + str(test_group.receiver_ids)
# print 'Native:\n' + str(test_group.native_bandwidth_Mbps) + ' - ' + str(test_group.native_mcast_tree)
# print 'Terminal vertices: ' + str(get_terminal_vertices(test_group.native_mcast_tree))
# rv_path = topology.get_shortest_path_tree(test_group.src_node_id, [rv_node_id])
# print '\nAggregated:\n' + str(((len(aggregated_mcast_tree)) * test_group.bandwidth_Mbps)) + ' - ' + str(aggregated_mcast_tree) + '\n' + str((len(rv_path)) * test_group.bandwidth_Mbps) + ' - ' + str(rv_path)
# print 'Terminal vertices: ' + str(get_terminal_vertices(aggregated_mcast_tree))
# print '=====\n'
# sys.exit(1)
if bandwidth_overhead_ratio > bandwidth_overhead_threshold:
continue # This aggregation causes the bandwidth overhead ratio to exceed the threshold
if final_aggregated_bandwidth_overhead is None or bandwidth_overhead_ratio < final_aggregated_bandwidth_overhead:
final_aggregated_bandwidth_overhead = bandwidth_overhead_ratio
final_aggregated_tree_index = agg_tree_index
final_aggregated_groups = test_aggregated_groups
final_aggregated_mcast_tree = aggregated_mcast_tree
final_rv_node_id = rv_node_id
# At this point, either a valid aggregation has been found (and stored in the "final" variables), or the group will
# be assigned to a new, native tree
if final_aggregated_tree_index is not None:
# A valid aggregation has been found
# print 'Assigning group #' + str(group.group_index) + ' to aggregated tree #' + str(final_aggregated_tree_index) + ' (BW Overhead: ' + str(final_aggregated_bandwidth_overhead) + ')'
group_map[final_aggregated_tree_index].append(group.group_index)
for agg_group in final_aggregated_groups:
src_node_id = agg_group.src_node_id
rv_path = topology.get_shortest_path_tree(src_node_id, [final_rv_node_id])
agg_group.rendevouz_point_node_id = final_rv_node_id
agg_group.rendevouz_point_shortest_path = rv_path
agg_group.aggregated_mcast_tree = final_aggregated_mcast_tree
agg_group.aggregated_mcast_tree_index = final_aggregated_tree_index
agg_group.aggregated_bandwidth_Mbps = ((len(agg_group.aggregated_mcast_tree)
+ len(agg_group.rendevouz_point_shortest_path)) * agg_group.bandwidth_Mbps)
else:
# Create a new aggregated tree index for the group
group.rendevouz_point_node_id = group.src_node_id
group.rendevouz_point_shortest_path = []
group.aggregated_mcast_tree = topology.get_shortest_path_tree(group.src_node_id, list(group.receiver_ids))
group.aggregated_mcast_tree_index = next_agg_tree_index
group.aggregated_bandwidth_Mbps = len(group.aggregated_mcast_tree) * group.bandwidth_Mbps
group_map[next_agg_tree_index] = [group.group_index]
next_agg_tree_index += 1
# print 'Tree similarity aggregation results:\n' + str(group_map)
return mcast_groups, group_map
def generate_cluster_aggregated_mcast_trees(topology, mcast_groups, group_map):
for group_aggregation in group_map:
# print 'Cluster #' + str(group_aggregation) + ' - Groups: ' + (str(group_map[group_aggregation]))
cluster_groups = []
for mcast_group_id in group_map[group_aggregation]:
mcast_groups[mcast_group_id].aggregated_mcast_tree_index = group_aggregation
cluster_groups.append(mcast_groups[mcast_group_id])
min_sum_path_length = sys.maxint
rv_node_id, aggregated_group_receivers = calc_best_rendevouz_point(topology, cluster_groups)
for mcast_group_id in group_map[group_aggregation]:
src_node_id = mcast_groups[mcast_group_id].src_node_id
shortest_path = topology.get_shortest_path_tree(src_node_id, [rv_node_id])
mcast_groups[mcast_group_id].rendevouz_point_node_id = rv_node_id
mcast_groups[mcast_group_id].rendevouz_point_shortest_path = shortest_path
mcast_groups[mcast_group_id].aggregated_mcast_tree = topology.get_shortest_path_tree(rv_node_id, aggregated_group_receivers)
mcast_groups[mcast_group_id].aggregated_bandwidth_Mbps = ((len(mcast_groups[mcast_group_id].aggregated_mcast_tree)
+ len(mcast_groups[mcast_group_id].rendevouz_point_shortest_path)) * mcast_groups[mcast_group_id].bandwidth_Mbps)
return mcast_groups, group_map
def get_outgoing_edges(edge_list, node_id):
outgoing_edges = []
for edge in edge_list:
if edge[0] == node_id:
outgoing_edges.append(edge)
return outgoing_edges
def get_terminal_vertices(edge_list):
tail_set = Set()
head_set = Set()
for edge in edge_list:
tail_set.add(edge[0])
head_set.add(edge[1])
return head_set - tail_set
def get_origin_vertices(edge_list, origin_candidates):
node_edge_count = defaultdict(lambda : None)
for edge in edge_list:
if node_edge_count[edge[0]] is None:
node_edge_count[edge[0]] = 1
else:
node_edge_count[edge[0]] = node_edge_count[edge[0]] + 1
if node_edge_count[edge[1]] is None:
node_edge_count[edge[1]] = -1
else:
node_edge_count[edge[1]] = node_edge_count[edge[1]] - 1
origin_set = Set()
for node_id in origin_candidates:
if node_edge_count[node_id] is not None and node_edge_count[node_id] > 0:
origin_set.add(node_id)
return origin_set
def get_intermediate_vertices(edge_list):
tail_set = Set()
head_set = Set()
for edge in edge_list:
tail_set.add(edge[0])
head_set.add(edge[1])
return tail_set.intersection(head_set)
def aggregate_groups_via_clustering(groups, linkage_method, similarity_threshold, plot_dendrogram = False):
src_dist_clustering = False
if '_srcdist' in linkage_method:
src_dist_clustering = True
linkage_method = linkage_method[:-len('_srcdist')]
# Generate the distance matrix used for clustering
receivers_list = []
for group in groups:
receivers_list.append(list(group.receiver_ids))
receivers_array = np.array(receivers_list)
distance_matrix = []
group_index = 0
group_indexes = []
for group1 in groups:
distance_matrix.append([])
for group2 in groups:
jaccard_distance = group1.jaccard_distance(group2)
if src_dist_clustering:
src_distance = len(topo.get_shortest_path_tree(group1.src_node_id, [group2.src_node_id]))
src_distance_ratio = (float(src_distance)/topo.network_diameter) # Distance between source nodes as a percentage of the network diameter
distance_matrix[group_index].append(1 - ((1 - jaccard_distance) * (1 - src_distance_ratio)))
else:
distance_matrix[group_index].append(jaccard_distance)
group_indexes.append(group_index)
group_index += 1
comp_dist_array = ssd.squareform(distance_matrix)
# Perform clustering, and plot a dendrogram of the results if requested
z = linkage(comp_dist_array, method=linkage_method)
group_map = get_cluster_group_aggregation(group_indexes, z, similarity_threshold)
if plot_dendrogram:
plt.figure(1, figsize=(6, 5))
print 'Linkage Array:\n' + str(z)
print ' '
d = dendrogram(z, show_leaf_counts=True)
plt.title('Multicast Group Clustering')
plt.xlabel('Multicast Group Index')
plt.ylabel('Cluster Similarity')
plt.show()
# Generate aggregated multicast trees based on the generated clusters
generate_cluster_aggregated_mcast_trees(topo, groups, group_map)
return groups, group_map
def calc_bloom_filter_flow_table_reduction(topo, groups, debug_print = False):
native_network_flow_table_size = 0
bloom_filter_network_flow_table_size = 0
non_reducible_flow_table_entries = 0
# First - Calculate the number of flow entries required for bloom filter matching
# (i.e. one flow table entry for each outgoing interface on each node in the network)
topo_edge_list = []
for link in topo.links:
topo_edge_list.append([link.tail_node_id, link.head_node_id])
for node in topo.forwarding_elements:
bloom_filter_network_flow_table_size += len(get_outgoing_edges(topo_edge_list, node.node_id))
# Second - Consider flow table entries required at egress and ingress nodes, as well as all flow
# entries for the native routing case
for group in groups:
native_network_flow_table_size += len(group.native_mcast_tree) + 1
bloom_filter_network_flow_table_size += len(group.receiver_ids) + 1
non_reducible_flow_table_entries += len(group.receiver_ids) + 1
reducible_native_network_flow_table_size = native_network_flow_table_size - non_reducible_flow_table_entries
reducible_bloom_filter_network_flow_table_size = bloom_filter_network_flow_table_size - non_reducible_flow_table_entries
flow_table_reduction_ratio = 1 - float(bloom_filter_network_flow_table_size) / float(native_network_flow_table_size)
reducible_flow_table_reduction_ratio = 1 - float(reducible_bloom_filter_network_flow_table_size) / float(reducible_native_network_flow_table_size)
if debug_print:
print ' '
print 'Native Network Flow Table Size: ' + str(native_network_flow_table_size)
print 'Bloom Filter Network Flow Table Size: ' + str(bloom_filter_network_flow_table_size)
print 'Flow Table Reduction Ratio: ' + str(flow_table_reduction_ratio)
print ' '
print 'Reducible Native Network Flow Table Size: ' + str(reducible_native_network_flow_table_size)
print 'Reducible Bloom Filter Network Flow Table Size: ' + str(reducible_bloom_filter_network_flow_table_size)
print 'Reducible Flow Table Reduction Ratio: ' + str(reducible_flow_table_reduction_ratio)
return flow_table_reduction_ratio, reducible_flow_table_reduction_ratio
def calc_network_performance_metrics(groups, group_map, debug_print = False):
native_network_flow_table_size = 0
aggregated_network_flow_table_size = 0
native_bandwidth_Mbps = 0
aggregated_bandwidth_Mbps = 0
seen_aggregated_tree_indexes = []
non_reducible_flow_table_entries = 0
for group in groups:
non_reducible_flow_table_entries = non_reducible_flow_table_entries + len(group.receiver_ids) + 1
native_bandwidth_Mbps = native_bandwidth_Mbps + group.native_bandwidth_Mbps
aggregated_bandwidth_Mbps = aggregated_bandwidth_Mbps + group.aggregated_bandwidth_Mbps
native_network_flow_table_size = native_network_flow_table_size + len(group.native_mcast_tree) + 1
label_switched_tree = False
if len(group_map[group.aggregated_mcast_tree_index]) > 1:
label_switched_tree = True
# print 'Tree #' + str(group.aggregated_mcast_tree_index) + ' Label Switched: ' + str(label_switched_tree)
if label_switched_tree:
aggregated_network_flow_table_size = aggregated_network_flow_table_size + len(group.receiver_ids) + 1
if group.aggregated_mcast_tree_index not in seen_aggregated_tree_indexes:
seen_aggregated_tree_indexes.append(group.aggregated_mcast_tree_index)
if not label_switched_tree:
aggregated_network_flow_table_size = aggregated_network_flow_table_size + len(group.aggregated_mcast_tree) + 1
else:
# Calculate the set of all edges participating in this aggregate distribution tree
agg_tree_edges = Set(group.aggregated_mcast_tree)
origin_candidates = []
for group_index in group_map[group.aggregated_mcast_tree_index]:
origin_candidates.append(groups[group_index].src_node_id)
for edge in groups[group_index].rendevouz_point_shortest_path:
agg_tree_edges.add(edge)
# print '|DE| = ' + str(len(agg_tree_edges)) + ' - ' + str(agg_tree_edges)
origin_nodes = get_origin_vertices(agg_tree_edges, origin_candidates)
# print '|DEo| = ' + str(len(origin_nodes)) + ' - ' + str(origin_nodes)
terminal_nodes = get_terminal_vertices(agg_tree_edges)
# print '|DEt| = ' + str(len(terminal_nodes)) + ' - ' + str(terminal_nodes)
aggregated_network_flow_table_size = aggregated_network_flow_table_size + len(agg_tree_edges) + 1 - len(origin_nodes) - len(terminal_nodes)
#if debug_print:
# print ' '
# group.debug_print()
reducible_native_network_flow_table_size = native_network_flow_table_size - non_reducible_flow_table_entries
reducible_aggregated_network_flow_table_size = aggregated_network_flow_table_size - non_reducible_flow_table_entries
bandwidth_overhead_ratio = float(aggregated_bandwidth_Mbps) / float(native_bandwidth_Mbps)
flow_table_reduction_ratio = 1 - float(aggregated_network_flow_table_size) / float(native_network_flow_table_size)
reducible_flow_table_reduction_ratio = 1 - float(reducible_aggregated_network_flow_table_size) / float(reducible_native_network_flow_table_size)
if debug_print:
print ' '
print 'Aggregated Network Bandwidth Utilization: ' + str(aggregated_bandwidth_Mbps) + ' Mbps'
print 'Native Network Bandwidth Utilization: ' + str(native_bandwidth_Mbps) + ' Mbps'
print 'Bandwidth Overhead Ratio: ' + str(bandwidth_overhead_ratio)
print ' '
print 'Native Network Flow Table Size: ' + str(native_network_flow_table_size)
print 'Aggregated Network Flow Table Size: ' + str(aggregated_network_flow_table_size)
print 'Flow Table Reduction Ratio: ' + str(flow_table_reduction_ratio)
print ' '
print 'Reducible Native Network Flow Table Size: ' + str(reducible_native_network_flow_table_size)
print 'Reducible Aggregated Network Flow Table Size: ' + str(reducible_aggregated_network_flow_table_size)
print 'Reducible Flow Table Reduction Ratio: ' + str(reducible_flow_table_reduction_ratio)
return bandwidth_overhead_ratio, flow_table_reduction_ratio, reducible_flow_table_reduction_ratio, len(group_map)
class ForwardingElement(object):
def __init__(self, node_id):
self.node_id = node_id
def __str__(self):
return 'Forwarding Element #' + str(self.node_id)
class Link(object):
def __init__(self, tail_node_id, head_node_id, cost):
self.tail_node_id = tail_node_id # Node ID from which the link originates
self.head_node_id = head_node_id # Node ID to which the link delivers traffic
self.cost = cost
def __str__(self):
return 'Link: ' + str(self.tail_node_id) + ' --> ' + str(self.head_node_id) + ' C:' + str(self.cost)
class SimTopo(object):
def __init__(self):
self.forwarding_elements = []
self.links = []
self.shortest_path_map = defaultdict(lambda : None)
self.network_diameter = 0
self.recalc_path_tree_map = True
def calc_shortest_path_tree(self):
self.shortest_path_map = defaultdict(lambda : None)
for source_forwarding_element in self.forwarding_elements:
src_node_id = source_forwarding_element.node_id
nodes = set(self.forwarding_elements)
edges = self.links
graph = defaultdict(list)
for link in edges:
graph[link.tail_node_id].append((link.cost, link.head_node_id))
src_path_tree_map = defaultdict(lambda : None)
queue, seen = [(0,src_node_id,())], set()
while queue:
(cost,node1,path) = heappop(queue)
if node1 not in seen:
seen.add(node1)
path = (node1, path)
src_path_tree_map[node1] = path
for next_cost, node2 in graph.get(node1, ()):
if node2 not in seen:
new_path_cost = cost + next_cost
heappush(queue, (new_path_cost, node2, path))
for dst_forwarding_element in self.forwarding_elements:
if self.shortest_path_map[src_node_id] is None:
self.shortest_path_map[src_node_id] = defaultdict(lambda : None)
dst_node_id = dst_forwarding_element.node_id
shortest_path_edges = []
if dst_node_id == src_node_id:
self.shortest_path_map[src_node_id][dst_node_id] = []
continue
receiver_path = src_path_tree_map[dst_node_id]
if receiver_path is None:
continue
while receiver_path[1]:
shortest_path_edges.append((receiver_path[1][0], receiver_path[0]))
receiver_path = receiver_path[1]
self.shortest_path_map[src_node_id][dst_node_id] = shortest_path_edges
self.recalc_path_tree_map = False
# Recalculate the network diameter
self.network_diameter = 0
for source_forwarding_element in self.forwarding_elements:
for dest_forwarding_element in self.forwarding_elements:
path = self.get_shortest_path_tree(source_forwarding_element.node_id, [dest_forwarding_element.node_id])
if path is not None and len(path) > self.network_diameter:
self.network_diameter = len(path)
# print 'Got network diameter: ' + str(self.network_diameter)
def get_shortest_path_tree(self, source_node_id, receiver_node_id_list):
if self.recalc_path_tree_map:
self.calc_shortest_path_tree()
if len(receiver_node_id_list) == 1:
return self.shortest_path_map[source_node_id][receiver_node_id_list[0]]
shortest_path_tree_edges = Set()
for receiver_node_id in receiver_node_id_list:
shortest_path = self.shortest_path_map[source_node_id][receiver_node_id]
if shortest_path is None:
print 'ERROR: No shortest path from node ' + str(source_node_id) + ' to ' + str(receiver_node_id)
return None
for edge in shortest_path:
shortest_path_tree_edges.add(edge)
# Return the set as a list of edges
shortest_path_tree_edges = list(shortest_path_tree_edges)
return shortest_path_tree_edges
def load_from_edge_list(self, edge_list):
self.forwarding_elements = []
self.links = []
seen_node_ids = []
for edge in edge_list:
self.links.append(Link(edge[0], edge[1], 1))
if edge[0] not in seen_node_ids:
self.forwarding_elements.append(ForwardingElement(edge[0]))
seen_node_ids.append(edge[0])
if edge[1] not in seen_node_ids:
self.forwarding_elements.append(ForwardingElement(edge[1]))
seen_node_ids.append(edge[1])
self.recalc_path_tree_map = True
def load_from_brite_topo(self, brite_filepath, debug_print = False):
self.forwarding_elements = []
self.links = []
print 'Parsing BRITE topology at filepath: ' + str(brite_filepath)
file = open(brite_filepath, 'r')
line = file.readline()
print 'BRITE ' + line
# Skip ahead until the nodes section is reached
in_node_section = False
while not in_node_section:
line = file.readline()
if 'Nodes:' in line:
in_node_section = True
break
# In the nodes section now, generate a forwarding element for each node
while in_node_section:
line = file.readline().strip()
if not line:
in_node_section = False
if debug_print:
print 'Finished parsing nodes'
break
line_split = line.split('\t')
node_id = int(line_split[0])
if debug_print:
print 'Generating forwarding element for ID: ' + str(node_id)
self.forwarding_elements.append(ForwardingElement(node_id))
# Skip ahead to the edges section
in_edge_section = False
while not in_edge_section:
line = file.readline()
if 'Edges:' in line:
in_edge_section = True
break
# In the edges section now, add all required links
# Note: This code assumes that all links are bidirectional with cost 1
while in_edge_section:
line = file.readline().strip()
if not line: # Empty string
in_edge_section = False
if debug_print:
print 'Finished parsing edges'
break
line_split = line.split('\t')
node_id_1 = int(line_split[1])
node_id_2 = int(line_split[2])
if debug_print:
print 'Adding bi-directional link between forwarding elements ' + str(node_id_1) + ' and ' + str(node_id_2)
self.links.append(Link(node_id_1, node_id_2, 1))
self.links.append(Link(node_id_2, node_id_1, 1))
file.close()
self.recalc_path_tree_map = True
def __str__(self):
return_str = '====================\nForwarding Elements:\n====================\n'
for forwarding_element in self.forwarding_elements:
return_str = return_str + str(forwarding_element) + '\n'
return_str = return_str + '======\nLinks:\n======\n'
for link in self.links:
return_str = return_str + str(link) + '\n'
return return_str
class McastGroup(object):
def __init__(self, topology, src_node_id, bandwidth_Mbps, mcast_group_index):
self.group_index = mcast_group_index
self.src_node_id = src_node_id
self.receiver_ids = Set()
self.topology = topology
self.bandwidth_Mbps = bandwidth_Mbps
self.native_mcast_tree = None
self.native_bandwidth_Mbps = None
self.aggregated_mcast_tree_index = None
self.aggregated_mcast_tree = None
self.rendevouz_point_node_id = None
self.rendevouz_point_shortest_path = None
self.aggregated_bandwidth_Mbps = None
def set_receiver_ids(self, receiver_ids):
self.receiver_ids = Set(receiver_ids)
self.native_mcast_tree = self.topology.get_shortest_path_tree(self.src_node_id, list(self.receiver_ids))
self.native_bandwidth_Mbps = len(self.native_mcast_tree) * self.bandwidth_Mbps
def generate_random_receiver_ids(self, num_receivers):
# KLUDGE: Receiver IDs will always be generated until there is at least one receiver which is not colocated with the source
# This prevents divide by 0 errors when calculating performance metrics
# TODO - AC: Find a better way to handle this situation
while len(self.receiver_ids) < num_receivers:
new_node_id = randint(0, len(topo.forwarding_elements))
if new_node_id != self.src_node_id and new_node_id not in self.receiver_ids:
self.receiver_ids.add(new_node_id)
self.native_mcast_tree = self.topology.get_shortest_path_tree(self.src_node_id, list(self.receiver_ids))
self.native_bandwidth_Mbps = len(self.native_mcast_tree) * self.bandwidth_Mbps
def jaccard_distance(self, mcast_group):
return 1.0 - (float(len(self.receiver_ids.intersection(mcast_group.receiver_ids))) / float(len(self.receiver_ids.union(mcast_group.receiver_ids))))
def debug_print(self):
print 'Multicast Group #' + str(self.group_index) + '\nSrc Node ID: ' + str(self.src_node_id) + '\nReceivers: ',
for receiver_id in self.receiver_ids:
print str(receiver_id) + ', ',
print ' '
print 'Native Mcast Tree:\n' + str(self.native_mcast_tree)
print 'Aggregated Mcast Tree Index: ' + str(self.aggregated_mcast_tree_index)
print 'Aggregated Mcast Tree:\n' + str(self.aggregated_mcast_tree)
print 'Rendevouz Point: Node #' + str(self.rendevouz_point_node_id) + '\nRendevouz Path: ' + str(self.rendevouz_point_shortest_path)
def run_multicast_aggregation_test(topo, num_groups, min_group_size, max_group_size, similarity_type, similarity_parameter, debug_print = False, plot_dendrogram = False):
# Generate random multicast groups
groups = []
for i in range(0, num_groups):
groups.append(McastGroup(topo, randint(0, len(topo.forwarding_elements)), 10, i))
groups[i].generate_random_receiver_ids(randint(min_group_size, max_group_size + 1))
#groups.append(McastGroup(topo, 0, 10, 0))
#groups[0].set_receiver_ids([6,7])
#groups.append(McastGroup(topo, 1, 10, 1))
#groups[1].set_receiver_ids([6,7])
#groups.append(McastGroup(topo, 8, 10, 2))
#groups[2].set_receiver_ids([6,7])
if 'bloom_filter' in similarity_type:
flow_table_reduction_ratio, reducible_flow_table_reduction_ratio = calc_bloom_filter_flow_table_reduction(topo, groups);
return 0, flow_table_reduction_ratio, reducible_flow_table_reduction_ratio, 0, 0
run_time_start = time()
if 'single' in similarity_type or 'complete' in similarity_type or 'average' in similarity_type:
groups, group_map = aggregate_groups_via_clustering(groups, similarity_type, similarity_parameter)
elif 'tree_sim' in similarity_type:
groups, group_map = aggregate_groups_via_tree_sim(topo, groups, similarity_parameter)
else:
print 'ERROR: Invalid similarity type - Supported options are "single", "average", "complete", or "tree_sim"'
sys.exit(1)
run_time = time() - run_time_start
# Calculate network performance metrics
bandwidth_overhead_ratio, flow_table_reduction_ratio, reducible_flow_table_reduction_ratio, num_trees = calc_network_performance_metrics(groups, group_map)
return bandwidth_overhead_ratio, flow_table_reduction_ratio, reducible_flow_table_reduction_ratio, num_trees, run_time
if __name__ == '__main__':
if len(sys.argv) < 5:
print 'Tree aggregation script requires the following 5 command line arguments:'
print '[1] Topology filepath (string)'
print '[2] Number of trials to run (integer)'
print '[3] Number of multicast groups (integer)'
print '[4] Group size range (string, in format "1-10"). If only a single number is specified, the minimum group size is set to 1'
print '[5] Similarity type (string): one of "single", "complete", "average", or "tree_sim"'
print '[6] Similarity parameter (float):'
print '\tFor the "single", "complete", and "average" similarity types this sets the similarity threshold to use for clustering'
print '\tFor the "tree_sim" similarity type this sets the bandwidth overhead threshold'
print
sys.exit(0)
# Import the topology from BRITE format
topo = SimTopo()
if 'abilene' in sys.argv[1]:
print 'Using hardcoded Abilene topology'
topo.load_from_edge_list([[0,1], [0,2], [1,0], [1,2], [1,3], [2,0], [2,1], [2,5], [3,1], [3,4], [4,3], [4,5], [4,10],
[5,2], [5,4], [5,6], [6,5], [6,10], [6,7], [7,6], [7,8], [8,7], [8,9], [9,8], [9,10], [10,6], [10,9], [10,4]])
else:
topo.load_from_brite_topo(sys.argv[1])
#topo.load_from_edge_list([[0,2],[1,2],[2,0],[2,1],[2,3],[3,2],[3,4],[4,3],[4,5],[5,6],[5,7], [8,0]])
#similarity_threshold = 0.5
#bandwidth_overhead_ratio, flow_table_reduction_ratio, num_clusters = run_multicast_aggregation_test(topo, similarity_threshold, 'single', True)
#sys.exit(0)
bandwidth_overhead_list = []
flow_table_reduction_list = []
reducible_flow_table_reduction_list = []
num_trees_list = []
run_time_list = []
min_group_size = 1
max_group_size = 10
group_range_split = sys.argv[4].split('-')
if len(group_range_split) == 1:
max_group_size = int(group_range_split[0])
else:
min_group_size = int(group_range_split[0])
max_group_size = int(group_range_split[1])
num_trials = int(sys.argv[2])
start_time = time()
print 'Simulations started at: ' + str(datetime.now())
for i in range(0, num_trials):
#if i % 20 == 0:
# print 'Running trial #' + str(i)
bandwidth_overhead_ratio, flow_table_reduction_ratio, reducible_flow_table_reduction_ratio, num_trees, run_time = \
run_multicast_aggregation_test(topo, int(sys.argv[3]), min_group_size, max_group_size, sys.argv[5], float(sys.argv[6]), False, False)
bandwidth_overhead_list.append(bandwidth_overhead_ratio)
flow_table_reduction_list.append(flow_table_reduction_ratio)
reducible_flow_table_reduction_list.append(reducible_flow_table_reduction_ratio)
num_trees_list.append(num_trees)
run_time_list.append(run_time)
end_time = time()
print ' '
print 'Similarity Type: ' + sys.argv[5]
print 'Similarity Threshold: ' + sys.argv[6]
print 'Average Bandwidth Overhead: ' + str(sum(bandwidth_overhead_list) / len(bandwidth_overhead_list))
print 'Average Flow Table Reduction: ' + str(sum(flow_table_reduction_list) / len(flow_table_reduction_list))
print 'Average Reducible Flow Table Reduction: ' + str(sum(reducible_flow_table_reduction_list) / len(reducible_flow_table_reduction_list))
print 'Average # Aggregated Trees: ' + str(float(sum(num_trees_list)) / len(num_trees_list))
print 'Average Tree Agg. Run-Time: ' + str(float(sum(run_time_list)) / len(run_time_list))
print 'Expected Sim Run-Time: ' + str((float(sum(run_time_list)) / len(run_time_list)) * num_trials)
print ' '
print 'Completed ' + str(num_trials) + ' trials in ' + str(end_time - start_time) + ' seconds (' + str(datetime.now()) + ')'
sys.exit() | apache-2.0 |
akhilaananthram/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_ps.py | 69 | 50262 | """
A PostScript backend, which can produce both PostScript .ps and .eps
"""
from __future__ import division
import glob, math, os, shutil, sys, time
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from tempfile import gettempdir
from cStringIO import StringIO
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.afm import AFM
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, get_realpath_and_stat, \
is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib.transforms import IdentityTransform
import numpy as npy
import binascii
import re
try:
set
except NameError:
from sets import Set as set
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
backend_version = 'Level II'
debugPS = 0
papersize = {'letter': (8.5,11),
'legal': (8.5,14),
'ledger': (11,17),
'a0': (33.11,46.81),
'a1': (23.39,33.11),
'a2': (16.54,23.39),
'a3': (11.69,16.54),
'a4': (8.27,11.69),
'a5': (5.83,8.27),
'a6': (4.13,5.83),
'a7': (2.91,4.13),
'a8': (2.07,2.91),
'a9': (1.457,2.05),
'a10': (1.02,1.457),
'b0': (40.55,57.32),
'b1': (28.66,40.55),
'b2': (20.27,28.66),
'b3': (14.33,20.27),
'b4': (10.11,14.33),
'b5': (7.16,10.11),
'b6': (5.04,7.16),
'b7': (3.58,5.04),
'b8': (2.51,3.58),
'b9': (1.76,2.51),
'b10': (1.26,1.76)}
def _get_papertype(w, h):
keys = papersize.keys()
keys.sort()
keys.reverse()
for key in keys:
if key.startswith('l'): continue
pw, ph = papersize[key]
if (w < pw) and (h < ph): return key
else:
return 'a0'
def _num_to_str(val):
if is_string_like(val): return val
ival = int(val)
if val==ival: return str(ival)
s = "%1.3f"%val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str,args))
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s=s.replace("\\", "\\\\")
s=s.replace("(", "\\(")
s=s.replace(")", "\\)")
s=s.replace("'", "\\251")
s=s.replace("`", "\\301")
s=re.sub(r"[^ -~\n]", lambda x: r"\%03o"%ord(x.group()), s)
return s
def seq_allequal(seq1, seq2):
"""
seq1 and seq2 are either None or sequences or numerix arrays
Return True if both are None or both are seqs with identical
elements
"""
if seq1 is None:
return seq2 is None
if seq2 is None:
return False
#ok, neither are None:, assuming iterable
if len(seq1) != len(seq2): return False
return npy.alltrue(npy.equal(seq1, seq2))
class RendererPS(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
fontd = maxdict(50)
afmfontd = maxdict(50)
def __init__(self, width, height, pswriter, imagedpi=72):
"""
Although postscript itself is dpi independent, we need to
imform the image code about a requested dpi to generate high
res images and them scale them before embeddin them
"""
RendererBase.__init__(self)
self.width = width
self.height = height
self._pswriter = pswriter
if rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
if rcParams['path.simplify']:
self.simplify = (width * imagedpi, height * imagedpi)
else:
self.simplify = None
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self.hatch = None
self.image_magnification = imagedpi/72.0
self._clip_paths = {}
self._path_collection_id = 0
self.used_characters = {}
self.mathtext_parser = MathTextParser("PS")
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
realpath, stat_key = get_realpath_and_stat(font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.items():
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def set_color(self, r, g, b, store=1):
if (r,g,b) != self.color:
if r==g and r==b:
self._pswriter.write("%1.3f setgray\n"%r)
else:
self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
if store: self.color = (r,g,b)
def set_linewidth(self, linewidth, store=1):
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
if store: self.linewidth = linewidth
def set_linejoin(self, linejoin, store=1):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n"%linejoin)
if store: self.linejoin = linejoin
def set_linecap(self, linecap, store=1):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n"%linecap)
if store: self.linecap = linecap
def set_linedash(self, offset, seq, store=1):
if self.linedash is not None:
oldo, oldseq = self.linedash
if seq_allequal(seq, oldseq): return
if seq is not None and len(seq):
s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store: self.linedash = (offset,seq)
def set_font(self, fontname, fontsize, store=1):
if rcParams['ps.useafm']: return
if (fontname,fontsize) != (self.fontname,self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname,fontsize))
self._pswriter.write(out)
if store: self.fontname = fontname
if store: self.fontsize = fontsize
def set_hatch(self, hatch):
"""
hatch can be one of:
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
X - crossed diagonal
letters can be combined, in which case all the specified
hatchings are done
if same letter repeats, it increases the density of hatching
in that direction
"""
hatches = {'horiz':0, 'vert':0, 'diag1':0, 'diag2':0}
for letter in hatch:
if (letter == '/'): hatches['diag2'] += 1
elif (letter == '\\'): hatches['diag1'] += 1
elif (letter == '|'): hatches['vert'] += 1
elif (letter == '-'): hatches['horiz'] += 1
elif (letter == '+'):
hatches['horiz'] += 1
hatches['vert'] += 1
elif (letter.lower() == 'x'):
hatches['diag1'] += 1
hatches['diag2'] += 1
def do_hatch(angle, density):
if (density == 0): return ""
return """\
gsave
eoclip %s rotate 0.0 0.0 0.0 0.0 setrgbcolor 0 setlinewidth
/hatchgap %d def
pathbbox /hatchb exch def /hatchr exch def /hatcht exch def /hatchl exch def
hatchl cvi hatchgap idiv hatchgap mul
hatchgap
hatchr cvi hatchgap idiv hatchgap mul
{hatcht m 0 hatchb hatcht sub r }
for
stroke
grestore
""" % (angle, 12/density)
self._pswriter.write("gsave\n")
self._pswriter.write(do_hatch(90, hatches['horiz']))
self._pswriter.write(do_hatch(0, hatches['vert']))
self._pswriter.write(do_hatch(45, hatches['diag1']))
self._pswriter.write(do_hatch(-45, hatches['diag2']))
self._pswriter.write("grestore\n")
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
l,b,r,t = texmanager.get_ps_bbox(s, fontsize)
w = (r-l)
h = (t-b)
# TODO: We need a way to get a good baseline from
# text.usetex
return w, h, 0
if ismath:
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
if rcParams['ps.useafm']:
if ismath: s = s[1:-1]
font = self._get_font_afm(prop)
l,b,w,h,d = font.get_str_bbox_and_descent(s)
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
w *= scale
h *= scale
d *= scale
return w, h, d
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
#print s, w, h
return w, h, d
def flipy(self):
'return true if small y numbers are top for renderer'
return False
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afmfontd.get(key)
if font is None:
fname = findfont(prop, fontext='afm')
font = self.afmfontd.get(fname)
if font is None:
font = AFM(file(findfont(prop, fontext='afm')))
self.afmfontd[fname] = font
self.afmfontd[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _rgba(self, im):
return im.as_rgba_str()
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = npy.fromstring(s, npy.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
return h, w, rgb.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = npy.fromstring(rgbat[2], npy.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(npy.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(npy.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def _hex_lines(self, s, chars_per_line=128):
s = binascii.b2a_hex(s)
nhex = len(s)
lines = []
for i in range(0,nhex,chars_per_line):
limit = min(i+chars_per_line, nhex)
lines.append(s[i:limit])
return lines
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas and y
is the distance from bottom
bbox is a matplotlib.transforms.BBox instance for clipping, or
None
"""
im.flipud_out()
if im.is_grayscale:
h, w, bits = self._gray(im)
imagecmd = "image"
else:
h, w, bits = self._rgb(im)
imagecmd = "false 3 colorimage"
hexlines = '\n'.join(self._hex_lines(bits))
xscale, yscale = (
w/self.image_magnification, h/self.image_magnification)
figh = self.height*72
#print 'values', origin, flipud, figh, h, y
clip = []
if bbox is not None:
clipx,clipy,clipw,cliph = bbox.bounds
clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
#y = figh-(y+h)
ps = """gsave
%(clip)s
%(x)s %(y)s translate
%(xscale)s %(yscale)s scale
/DataString %(w)s string def
%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
{
currentfile DataString readhexstring pop
} bind %(imagecmd)s
%(hexlines)s
grestore
""" % locals()
self._pswriter.write(ps)
# unflip
im.flipud_out()
def _convert_path(self, path, transform, simplify=None):
path = transform.transform_path(path)
ps = []
last_points = None
for points, code in path.iter_segments(simplify):
if code == Path.MOVETO:
ps.append("%g %g m" % tuple(points))
elif code == Path.LINETO:
ps.append("%g %g l" % tuple(points))
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
ps.append("%g %g %g %g %g %g c" %
tuple(points[2:]))
elif code == Path.CURVE4:
ps.append("%g %g %g %g %g %g c" % tuple(points))
elif code == Path.CLOSEPOLY:
ps.append("cl")
last_points = points
ps = "\n".join(ps)
return ps
def _get_clip_path(self, clippath, clippath_transform):
id = self._clip_paths.get((clippath, clippath_transform))
if id is None:
id = 'c%x' % len(self._clip_paths)
ps_cmd = ['/%s {' % id]
ps_cmd.append(self._convert_path(clippath, clippath_transform))
ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
self._pswriter.write('\n'.join(ps_cmd))
self._clip_paths[(clippath, clippath_transform)] = id
return id
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a Path instance using the given affine transform.
"""
ps = self._convert_path(path, transform, self.simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draw the markers defined by path at each of the positions in x
and y. path coordinates are points, x and y coords will be
transformed by the transform
"""
if debugPS: self._pswriter.write('% draw_markers \n')
write = self._pswriter.write
if rgbFace:
if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]:
ps_color = '%1.3f setgray' % rgbFace[0]
else:
ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace
# construct the generic marker command:
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global
ps_cmd.append(self._convert_path(marker_path, marker_trans))
if rgbFace:
ps_cmd.extend(['gsave', ps_color, 'fill', 'grestore'])
ps_cmd.extend(['stroke', 'grestore', '} bind def'])
tpath = trans.transform_path(path)
for vertices, code in tpath.iter_segments():
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
write = self._pswriter.write
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
ps_cmd = ['/%s {' % name,
'newpath', 'translate']
ps_cmd.append(self._convert_path(path, transform))
ps_cmd.extend(['} bind def\n'])
write('\n'.join(ps_cmd))
path_codes.append(name)
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_codes, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc, rgbFace)
self._path_collection_id += 1
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
"""
draw a Text instance
"""
w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
fontsize = prop.get_size_in_points()
corr = 0#w/2*(fontsize-10)/10
pos = _nums_to_str(x-corr, y)
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(
rcParams['font.family'], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
ps = """\
gsave
%(pos)s moveto
(%(thetext)s)
show
grestore
""" % locals()
self._pswriter.write(ps)
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
draw a Text instance
"""
# local to avoid repeated attribute lookups
write = self._pswriter.write
if debugPS:
write("% text\n")
if ismath=='TeX':
return self.tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif isinstance(s, unicode):
return self.draw_unicode(gc, x, y, s, prop, angle)
elif rcParams['ps.useafm']:
font = self._get_font_afm(prop)
l,b,w,h = font.get_str_bbox(s)
fontsize = prop.get_size_in_points()
l *= 0.001*fontsize
b *= 0.001*fontsize
w *= 0.001*fontsize
h *= 0.001*fontsize
if angle==90: l,b = -b, l # todo generalize for arb rotations
pos = _nums_to_str(x-l, y-b)
thetext = '(%s)' % s
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
rotate = '%1.1f rotate' % angle
setcolor = '%1.3f %1.3f %1.3f setrgbcolor' % gc.get_rgb()[:3]
#h = 0
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(pos)s moveto
%(rotate)s
%(thetext)s
%(setcolor)s
show
grestore
""" % locals()
self._draw_ps(ps, gc, None)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
write("%s m\n"%_nums_to_str(x,y))
if angle:
write("gsave\n")
write("%s rotate\n"%_num_to_str(angle))
descent = font.get_descent() / 64.0
if descent:
write("0 %s rmoveto\n"%_num_to_str(descent))
write("(%s) show\n"%quote_ps_string(s))
if angle:
write("grestore\n")
def new_gc(self):
return GraphicsContextPS()
def draw_unicode(self, gc, x, y, s, prop, angle):
"""draw a unicode string. ps doesn't have unicode support, so
we have to do this the hard way
"""
if rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), 'question')
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
cmap = font.get_charmap()
lastgind = None
#print 'text', s
lines = []
thisx = 0
thisy = font.get_descent() / 64.0
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
name = '.notdef'
gind = 0
else:
name = font.get_glyph_name(gind)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
lastgind = gind
thisx += kern/64.0
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += glyph.linearHoriAdvance/65536.0
thetext = '\n'.join(lines)
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def draw_mathtext(self, gc,
x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = (gc.get_linewidth() > 0.0 and
(len(gc.get_rgb()) <= 3 or gc.get_rgb()[3] != 0.0))
stroke = stroke and mightstroke
fill = (fill and rgbFace is not None and
(len(rgbFace) <= 3 or rgbFace[3] != 0.0))
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
x,y,w,h=cliprect.bounds
write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke:
write("gsave\n")
self.set_color(store=0, *rgbFace[:3])
write("fill\ngrestore\n")
else:
self.set_color(store=0, *rgbFace[:3])
write("fill\n")
hatch = gc.get_hatch()
if hatch:
self.set_hatch(hatch)
if stroke:
write("stroke\n")
write("grestore\n")
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt':0,
'round':1,
'projecting':2}[GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter':0,
'round':1,
'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasPS(thisFig)
manager = FigureManagerPS(canvas, num)
return manager
class FigureCanvasPS(FigureCanvasBase):
def draw(self):
pass
filetypes = {'ps' : 'Postscript',
'eps' : 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(self, outfile, format, *args, **kwargs):
papertype = kwargs.get("papertype", rcParams['ps.papersize'])
papertype = papertype.lower()
if papertype == 'auto':
pass
elif papertype not in papersize:
raise RuntimeError( '%s is not a valid papertype. Use one \
of %s'% (papertype, ', '.join( papersize.keys() )) )
orientation = kwargs.get("orientation", "portrait").lower()
if orientation == 'landscape': isLandscape = True
elif orientation == 'portrait': isLandscape = False
else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
self.figure.set_dpi(72) # Override the dpi kwarg
imagedpi = kwargs.get("dpi", 72)
facecolor = kwargs.get("facecolor", "w")
edgecolor = kwargs.get("edgecolor", "w")
if rcParams['text.usetex']:
self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype)
else:
self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype)
def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
orientation='portrait', isLandscape=False, papertype=None):
"""
Render the figure to hardcopy. Set the figure patch face and
edge colors. This is useful because some of the GUIs have a
gray figure face color background and you'll probably want to
override this on hardcopy
If outfile is a string, it is interpreted as a file name.
If the extension matches .ep* write encapsulated postscript,
otherwise write a stand-alone PostScript file.
If outfile is a file object, a stand-alone PostScript file is
written into this file object.
"""
isEPSF = format == 'eps'
passed_in_file_object = False
if is_string_like(outfile):
title = outfile
tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())
elif is_writable_file_like(outfile):
title = None
tmpfile = os.path.join(gettempdir(), md5(str(hash(outfile))).hexdigest())
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
fh = file(tmpfile, 'w')
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
if isLandscape: papertype = _get_papertype(height, width)
else: papertype = _get_papertype(width, height)
if isLandscape: paperHeight, paperWidth = papersize[papertype]
else: paperWidth, paperHeight = papersize[papertype]
if rcParams['ps.usedistiller'] and not papertype == 'auto':
# distillers will improperly clip eps files if the pagesize is
# too small
if width>paperWidth or height>paperHeight:
if isLandscape:
papertype = _get_papertype(height, width)
paperHeight, paperWidth = papersize[papertype]
else:
papertype = _get_papertype(width, height)
paperWidth, paperHeight = papersize[papertype]
# center the figure on the paper
xo = 72*0.5*(paperWidth - width)
yo = 72*0.5*(paperHeight - height)
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
rotation = 0
if isLandscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72*paperHeight - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
self._pswriter = StringIO()
renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
self.figure.draw(renderer)
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the PostScript headers
if isEPSF: print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
else: print >>fh, "%!PS-Adobe-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%Orientation: " + orientation
if not isEPSF: print >>fh, "%%DocumentPaperSizes: "+papertype
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
if not isEPSF: print >>fh, "%%Pages: 1"
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
if not rcParams['ps.useafm']:
Ndict += len(renderer.used_characters)
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
if not rcParams['ps.useafm']:
for font_filename, chars in renderer.used_characters.values():
if len(chars):
font = FT2Font(font_filename)
cmap = font.get_charmap()
glyph_ids = []
for c in chars:
gind = cmap.get(c) or 0
glyph_ids.append(gind)
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the
# STIX fonts). This will simply turn that off to avoid
# errors.
if is_opentype_cff_font(font_filename):
raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.")
else:
fonttype = rcParams['ps.fonttype']
convert_ttf_to_ps(font_filename, fh, rcParams['ps.fonttype'], glyph_ids)
print >>fh, "end"
print >>fh, "%%EndProlog"
if not isEPSF: print >>fh, "%%Page: 1 1"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
if rotation: print >>fh, "%d rotate"%rotation
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
if not isEPSF: print >>fh, "%%EOF"
fh.close()
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if passed_in_file_object:
fh = file(tmpfile)
print >>outfile, fh.read()
else:
shutil.move(tmpfile, outfile)
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
orientation, isLandscape, papertype):
"""
If text.usetex is True in rc, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
"""
isEPSF = format == 'eps'
title = outfile
# write to a temp file, we'll move it to outfile when done
tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())
fh = file(tmpfile, 'w')
self.figure.dpi = 72 # ignore the dpi kwarg
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
self._pswriter = StringIO()
renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
self.figure.draw(renderer)
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the Encapsulated PostScript headers
print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
print >>fh, "end"
print >>fh, "%%EndProlog"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
fh.close()
if isLandscape: # now we are ready to rotate
isLandscape = True
width, height = height, width
bbox = (lly, llx, ury, urx)
temp_papertype = _get_papertype(width, height)
if papertype=='auto':
papertype = temp_papertype
paperWidth, paperHeight = papersize[temp_papertype]
else:
paperWidth, paperHeight = papersize[papertype]
if (width>paperWidth or height>paperHeight) and isEPSF:
paperWidth, paperHeight = papersize[temp_papertype]
verbose.report('Your figure is too big to fit on %s paper. %s \
paper will be used to prevent clipping.'%(papertype, temp_papertype), 'helpful')
texmanager = renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
convert_psfrags(tmpfile, renderer.psfrag, font_preamble,
custom_preamble, paperWidth, paperHeight,
orientation)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['text.usetex']:
if False: pass # for debugging
else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if isinstance(outfile, file):
fh = file(tmpfile)
print >>outfile, fh.read()
else: shutil.move(tmpfile, outfile)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paperWidth, paperHeight, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
tmpdir = os.path.split(tmpfile)[0]
epsfile = tmpfile+'.eps'
shutil.move(tmpfile, epsfile)
latexfile = tmpfile+'.tex'
outfile = tmpfile+'.output'
latexh = file(latexfile, 'w')
dvifile = tmpfile+'.dvi'
psfile = tmpfile+'.ps'
if orientation=='landscape': angle = 90
else: angle = 0
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
\usepackage{psfrag}
\usepackage[dvips]{graphicx}
\usepackage{color}
\pagestyle{empty}
\begin{document}
\begin{figure}
\centering
\leavevmode
%s
\includegraphics*[angle=%s]{%s}
\end{figure}
\end{document}
"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
paperWidth, paperHeight,
'\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
if rcParams['text.latex.unicode']:
latexh.write(s.encode('utf8'))
else:
try:
latexh.write(s)
except UnicodeEncodeError, err:
verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
latexh.close()
# the split drive part of the command is necessary for windows users with
# multiple
if sys.platform == 'win32': precmd = '%s &&'% os.path.splitdrive(tmpdir)[0]
else: precmd = ''
command = '%s cd "%s" && latex -interaction=nonstopmode "%s" > "%s"'\
%(precmd, tmpdir, latexfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('LaTeX was not able to process your file:\
\nHere is the full report generated by LaTeX: \n\n%s'% fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = '%s cd "%s" && dvips -q -R0 -o "%s" "%s" > "%s"'%(precmd, tmpdir,
os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('dvips was not able to \
process the following file:\n%s\nHere is the full report generated by dvips: \
\n\n'% dvifile + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(epsfile)
shutil.move(psfile, tmpfile)
if not debugPS:
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
paper = '-sPAPERSIZE=%s'% ptype
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
dpi = rcParams['ps.distiller.res']
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -r%d -sDEVICE=pswrite %s -sOutputFile="%s" \
"%s" > "%s"'% (gs_exe, dpi, paper, psfile, tmpfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ghostscript was not able to process \
your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile, bbox)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
command = 'ps2pdf -dAutoFilterColorImages=false \
-sColorImageFilter=FlateEncode -sPAPERSIZE=%s "%s" "%s" > "%s"'% \
(ptype, tmpfile, pdffile, outfile)
if sys.platform == 'win32': command = command.replace('=', '#')
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ps2pdf was not able to process your \
image.\n\Here is the report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = 'pdftops -paper match -level2 "%s" "%s" > "%s"'% \
(pdffile, psfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('pdftops was not able to process your \
image.\nHere is the full report generated by pdftops: \n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile, bbox)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox(tmpfile, bbox):
"""
Use ghostscript's bbox device to find the center of the bounding box. Return
an appropriately sized bbox centered around that point. A bit of a hack.
"""
outfile = tmpfile + '.output'
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -sDEVICE=bbox "%s"' %\
(gs_exe, tmpfile)
verbose.report(command, 'debug')
stdin, stdout, stderr = os.popen3(command)
verbose.report(stdout.read(), 'debug-annoying')
bbox_info = stderr.read()
verbose.report(bbox_info, 'helpful')
bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
if bbox_found:
bbox_info = bbox_found.group()
else:
raise RuntimeError('Ghostscript was not able to extract a bounding box.\
Here is the Ghostscript output:\n\n%s'% bbox_info)
l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
# this is a hack to deal with the fact that ghostscript does not return the
# intended bbox, but a tight bbox. For now, we just center the ink in the
# intended bbox. This is not ideal, users may intend the ink to not be
# centered.
if bbox is None:
l, b, r, t = (l-1, b-1, r+1, t+1)
else:
x = (l+r)/2
y = (b+t)/2
dx = (bbox[2]-bbox[0])/2
dy = (bbox[3]-bbox[1])/2
l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, npy.ceil(r), npy.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info])
def pstoeps(tmpfile, bbox):
"""
Convert the postscript to encapsulated postscript.
"""
bbox_info = get_bbox(tmpfile, bbox)
epsfile = tmpfile + '.eps'
epsh = file(epsfile, 'w')
tmph = file(tmpfile)
line = tmph.readline()
# Modify the header:
while line:
if line.startswith('%!PS'):
print >>epsh, "%!PS-Adobe-3.0 EPSF-3.0"
print >>epsh, bbox_info
elif line.startswith('%%EndComments'):
epsh.write(line)
print >>epsh, '%%BeginProlog'
print >>epsh, 'save'
print >>epsh, 'countdictstack'
print >>epsh, 'mark'
print >>epsh, 'newpath'
print >>epsh, '/showpage {} def'
print >>epsh, '/setpagedevice {pop} def'
print >>epsh, '%%EndProlog'
print >>epsh, '%%Page 1 1'
break
elif line.startswith('%%Bound') \
or line.startswith('%%HiResBound') \
or line.startswith('%%Pages'):
pass
else:
epsh.write(line)
line = tmph.readline()
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
line = tmph.readline()
while line:
if line.startswith('%%Trailer'):
print >>epsh, '%%Trailer'
print >>epsh, 'cleartomark'
print >>epsh, 'countdictstack'
print >>epsh, 'exch sub { end } repeat'
print >>epsh, 'restore'
if rcParams['ps.usedistiller'] == 'xpdf':
# remove extraneous "end" operator:
line = tmph.readline()
else:
epsh.write(line)
line = tmph.readline()
tmph.close()
epsh.close()
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
class FigureManagerPS(FigureManagerBase):
pass
FigureManager = FigureManagerPS
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
| agpl-3.0 |
kakaba2009/MachineLearning | python/src/mylib/mpeg.py | 1 | 1877 | import numpy as np
import matplotlib
#matplotlib.pyplot.switch_backend('Agg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from pandas.tools.plotting import radviz
from pandas.tools.plotting import lag_plot
from pandas.tools.plotting import scatter_matrix
from pandas.tools.plotting import andrews_curves
from pandas.tools.plotting import bootstrap_plot
from pandas.tools.plotting import parallel_coordinates
from pandas.tools.plotting import autocorrelation_plot
def m_scatter_mpeg(X, Y, filename):
#FFMpegWriter = manimation.writers['ffmpeg']
#metadata = dict(title=filename, artist='Matplotlib',comment=filename)
#writer = FFMpegWriter(fps=15, metadata=metadata)
fig = plt.figure()
func = plt.scatter(X, Y, s=np.abs(Y)*300.0, c=Y, cmap=plt.get_cmap('prism'))
#with writer.saving(fig, filename, 100):
for i in range(100):
func.set_data(X.shift(1), Y.shift(1))
#writer.grab_frame()
def m_scatter_live(X, Y, filename):
fig, ax = plt.subplots()
ax.scatter(X, Y)
A, B = X, Y
def run(i):
print(i)
A = X.shift(i)
B = Y.shift(i)
ax.clear()
ax.scatter(A, B, s=np.abs(B)*100.0, c=B, cmap=plt.get_cmap('prism'))
im_ani = animation.FuncAnimation(fig, run, interval=500, repeat_delay=100)
#im_ani.save(filename, metadata={'artist':'Matplotlib'})
plt.show()
def pandas_lag_live(df):
fig, ax = plt.subplots()
def run(i):
print(i)
ax.clear()
lag_plot(df, lag=i+1) #lag must be >= 1
im_ani = animation.FuncAnimation(fig, run, interval=1000, repeat_delay=1000)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
| apache-2.0 |
astropy/astropy | examples/coordinates/plot_sgr-coordinate-frame.py | 2 | 10574 | # -*- coding: utf-8 -*-
r"""
==========================================================
Create a new coordinate class (for the Sagittarius stream)
==========================================================
This document describes in detail how to subclass and define a custom spherical
coordinate frame, as discussed in :ref:`astropy-coordinates-design` and the
docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example, we
will define a coordinate system defined by the plane of orbit of the Sagittarius
Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003). The Sgr
coordinate system is often referred to in terms of two angular coordinates,
:math:`\Lambda,B`.
To do this, we need to define a subclass of
`~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the
coordinate system angles in each of the supported representations. In this case
we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and
"Beta". Then we have to define the transformation from this coordinate system to
some other built-in system. Here we will use Galactic coordinates, represented
by the `~astropy.coordinates.Galactic` class.
See Also
--------
* The `gala package <http://gala.adrian.pw/>`_, which defines a number of
Astropy coordinate frames for stellar stream coordinate systems.
* Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius
Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms",
https://arxiv.org/abs/astro-ph/0304198
* Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a
Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132
* David Law's Sgr info page https://www.stsci.edu/~dlaw/Sgr/
*By: Adrian Price-Whelan, Erik Tollerud*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for coordinates
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# The first step is to create a new class, which we'll call
# ``Sagittarius`` and make it a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`:
class Sagittarius(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Sagittarius dwarf galaxy, as described in
https://ui.adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
https://www.stsci.edu/~dlaw/Sgr/.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `~astropy.coordinates.Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `~astropy.coordinates.Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_Lambda_cosBeta : `~astropy.units.Quantity`, optional, must be keyword
The proper motion along the stream in ``Lambda`` (including the
``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given).
pm_Beta : `~astropy.units.Quantity`, optional, must be keyword
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : `~astropy.units.Quantity`, optional, keyword-only
The radial velocity of this object.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')]
}
##############################################################################
# Breaking this down line-by-line, we define the class as a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive
# docstring. The final lines are class-level attributes that specify the
# default representation for the data, default differential for the velocity
# information, and mappings from the attribute names used by representation
# objects to the names that are to be used by the ``Sagittarius`` frame. In this
# case we override the names in the spherical representations but don't do
# anything with other representations like cartesian or cylindrical.
#
# Next we have to define the transformation from this coordinate system to some
# other built-in coordinate system; we will use Galactic coordinates. We can do
# this by defining functions that return transformation matrices, or by simply
# defining a function that accepts a coordinate and returns a new coordinate in
# the new system. Because the transformation to the Sagittarius coordinate
# system is just a spherical rotation from Galactic coordinates, we'll just
# define a function that returns this matrix. We'll start by constructing the
# transformation matrix using pre-determined Euler angles and the
# ``rotation_matrix`` helper function:
SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010)
SGR_THETA = (90 - 13.46) * u.degree
SGR_PSI = (180 + 14.111534) * u.degree
# Generate the rotation matrix using the x-convention (see Goldstein)
D = rotation_matrix(SGR_PHI, "z")
C = rotation_matrix(SGR_THETA, "x")
B = rotation_matrix(SGR_PSI, "z")
A = np.diag([1.,1.,-1.])
SGR_MATRIX = matrix_product(A, B, C, D)
##############################################################################
# Since we already constructed the transformation (rotation) matrix above, and
# the inverse of a rotation matrix is just its transpose, the required
# transformation functions are very simple:
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius)
def galactic_to_sgr():
""" Compute the transformation matrix from Galactic spherical to
heliocentric Sgr coordinates.
"""
return SGR_MATRIX
##############################################################################
# The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform,
# coord.Galactic, Sagittarius)`` registers this function on the
# ``frame_transform_graph`` as a coordinate transformation. Inside the function,
# we simply return the previously defined rotation matrix.
#
# We then register the inverse transformation by using the transpose of the
# rotation matrix (which is faster to compute than the inverse):
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
""" Compute the transformation matrix from heliocentric Sgr coordinates to
spherical Galactic.
"""
return matrix_transpose(SGR_MATRIX)
##############################################################################
# Now that we've registered these transformations between ``Sagittarius`` and
# `~astropy.coordinates.Galactic`, we can transform between *any* coordinate
# system and ``Sagittarius`` (as long as the other system has a path to
# transform to `~astropy.coordinates.Galactic`). For example, to transform from
# ICRS coordinates to ``Sagittarius``, we would do:
icrs = coord.SkyCoord(280.161732*u.degree, 11.91934*u.degree, frame='icrs')
sgr = icrs.transform_to(Sagittarius)
print(sgr)
##############################################################################
# Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this
# case, a line along the ``Sagittarius`` x-y plane):
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian, frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
##############################################################################
# As an example, we'll now plot the points in both coordinate systems:
fig, axes = plt.subplots(2, 1, figsize=(8, 10),
subplot_kw={'projection': 'aitoff'})
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian,
linestyle='none', marker='.')
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian,
linestyle='none', marker='.')
plt.show()
##############################################################################
# This particular transformation is just a spherical rotation, which is a
# special case of an Affine transformation with no vector offset. The
# transformation of velocity components is therefore natively supported as
# well:
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian,
pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr,
pm_Beta=np.zeros(128)*u.mas/u.yr,
frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.degree,
sgr.pm_Lambda_cosBeta.value,
linestyle='none', marker='.')
axes[0].set_xlabel(r"$\Lambda$ [deg]")
axes[0].set_ylabel(
fr"$\mu_\Lambda \, \cos B$ [{sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')}]")
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value,
linestyle='none', marker='.')
axes[1].set_ylabel(
fr"$\mu_\alpha \, \cos\delta$ [{icrs.pm_ra_cosdec.unit.to_string('latex_inline')}]")
axes[2].set_title("ICRS")
axes[2].plot(icrs.ra.degree, icrs.pm_dec.value,
linestyle='none', marker='.')
axes[2].set_xlabel("RA [deg]")
axes[2].set_ylabel(
fr"$\mu_\delta$ [{icrs.pm_dec.unit.to_string('latex_inline')}]")
plt.show()
| bsd-3-clause |
nvoron23/statsmodels | statsmodels/sandbox/regression/example_kernridge.py | 39 | 1232 |
import numpy as np
import matplotlib.pyplot as plt
from .kernridgeregress_class import GaussProcess, kernel_euclid
m,k = 50,4
upper = 6
scale = 10
xs = np.linspace(1,upper,m)[:,np.newaxis]
#xs1 = xs1a*np.ones((1,4)) + 1/(1.0+np.exp(np.random.randn(m,k)))
#xs1 /= np.std(xs1[::k,:],0) # normalize scale, could use cov to normalize
##y1true = np.sum(np.sin(xs1)+np.sqrt(xs1),1)[:,np.newaxis]
xs1 = np.sin(xs)#[:,np.newaxis]
y1true = np.sum(xs1 + 0.01*np.sqrt(np.abs(xs1)),1)[:,np.newaxis]
y1 = y1true + 0.10 * np.random.randn(m,1)
stride = 3 #use only some points as trainig points e.g 2 means every 2nd
xstrain = xs1[::stride,:]
ystrain = y1[::stride,:]
xstrain = np.r_[xs1[:m/2,:], xs1[m/2+10:,:]]
ystrain = np.r_[y1[:m/2,:], y1[m/2+10:,:]]
index = np.hstack((np.arange(m/2), np.arange(m/2+10,m)))
gp1 = GaussProcess(xstrain, ystrain, kernel=kernel_euclid,
ridgecoeff=5*1e-4)
yhatr1 = gp1.predict(xs1)
plt.figure()
plt.plot(y1true, y1,'bo',y1true, yhatr1,'r.')
plt.title('euclid kernel: true y versus noisy y and estimated y')
plt.figure()
plt.plot(index,ystrain.ravel(),'bo-',y1true,'go-',yhatr1,'r.-')
plt.title('euclid kernel: true (green), noisy (blue) and estimated (red) '+
'observations')
| bsd-3-clause |
imatge-upc/detection-2016-nipsws | scripts/image_zooms_training.py | 1 | 15586 | import cv2, numpy as np
import time
import math as mth
from PIL import Image, ImageDraw, ImageFont
import scipy.io
from keras.models import Sequential
from keras import initializations
from keras.initializations import normal, identity
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import RMSprop, SGD, Adam
import random
import argparse
from scipy import ndimage
from keras.preprocessing import image
from sklearn.preprocessing import OneHotEncoder
from features import get_image_descriptor_for_image, obtain_compiled_vgg_16, vgg_16, \
get_conv_image_descriptor_for_image, calculate_all_initial_feature_maps
from parse_xml_annotations import *
from image_helper import *
from metrics import *
from visualization import *
from reinforcement import *
# Read number of epoch to be trained, to make checkpointing
parser = argparse.ArgumentParser(description='Epoch:')
parser.add_argument("-n", metavar='N', type=int, default=0)
args = parser.parse_args()
epochs_id = int(args.n)
if __name__ == "__main__":
######## PATHS definition ########
# path of PASCAL VOC 2012 or other database to use for training
path_voc = "./VOC2012/"
# path of other PASCAL VOC dataset, if you want to train with 2007 and 2012 train datasets
path_voc2 = "./VOC2007/"
# path of where to store the models
path_model = "../models_image_zooms"
# path of where to store visualizations of search sequences
path_testing_folder = '../testing_visualizations'
# path of VGG16 weights
path_vgg = "../vgg16_weights.h5"
######## PARAMETERS ########
# Class category of PASCAL that the RL agent will be searching
class_object = 1
# Scale of subregion for the hierarchical regions (to deal with 2/4, 3/4)
scale_subregion = float(3)/4
scale_mask = float(1)/(scale_subregion*4)
# 1 if you want to obtain visualizations of the search for objects
bool_draw = 0
# How many steps can run the agent until finding one object
number_of_steps = 10
# Boolean to indicate if you want to use the two databases, or just one
two_databases = 0
epochs = 50
gamma = 0.90
epsilon = 1
batch_size = 100
# Pointer to where to store the last experience in the experience replay buffer,
# actually there is a pointer for each PASCAL category, in case all categories
# are trained at the same time
h = np.zeros([20])
# Each replay memory (one for each possible category) has a capacity of 100 experiences
buffer_experience_replay = 1000
# Init replay memories
replay = [[] for i in range(20)]
reward = 0
######## MODELS ########
model_vgg = obtain_compiled_vgg_16(path_vgg)
# If you want to train it from first epoch, first option is selected. Otherwise,
# when making checkpointing, weights of last stored weights are loaded for a particular class object
if epochs_id == 0:
models = get_array_of_q_networks_for_pascal("0", class_object)
else:
models = get_array_of_q_networks_for_pascal(path_model, class_object)
######## LOAD IMAGE NAMES ########
if two_databases == 1:
image_names1 = np.array([load_images_names_in_data_set('trainval', path_voc)])
image_names2 = np.array([load_images_names_in_data_set('trainval', path_voc2)])
image_names = np.concatenate([image_names1, image_names2])
else:
image_names = np.array([load_images_names_in_data_set('trainval', path_voc)])
######## LOAD IMAGES ########
if two_databases == 1:
images1 = get_all_images(image_names1, path_voc)
images2 = get_all_images(image_names2, path_voc2)
images = np.concatenate([images1, images2])
else:
images = get_all_images(image_names, path_voc)
for i in range(epochs_id, epochs_id + epochs):
for j in range(np.size(image_names)):
masked = 0
not_finished = 1
image = np.array(images[j])
image_name = image_names[0][j]
annotation = get_bb_of_gt_from_pascal_xml_annotation(image_name, path_voc)
if two_databases == 1:
if j < np.size(image_names1):
annotation = get_bb_of_gt_from_pascal_xml_annotation(image_name, path_voc)
else:
annotation = get_bb_of_gt_from_pascal_xml_annotation(image_name, path_voc2)
gt_masks = generate_bounding_box_from_annotation(annotation, image.shape)
array_classes_gt_objects = get_ids_objects_from_annotation(annotation)
region_mask = np.ones([image.shape[0], image.shape[1]])
shape_gt_masks = np.shape(gt_masks)
available_objects = np.ones(np.size(array_classes_gt_objects))
# Iterate through all the objects in the ground truth of an image
for k in range(np.size(array_classes_gt_objects)):
# Init visualization
background = Image.new('RGBA', (10000, 2500), (255, 255, 255, 255))
draw = ImageDraw.Draw(background)
# We check whether the ground truth object is of the target class category
if array_classes_gt_objects[k] == class_object:
gt_mask = gt_masks[:, :, k]
step = 0
new_iou = 0
# this matrix stores the IoU of each object of the ground-truth, just in case
# the agent changes of observed object
last_matrix = np.zeros([np.size(array_classes_gt_objects)])
region_image = image
offset = (0, 0)
size_mask = (image.shape[0], image.shape[1])
original_shape = size_mask
old_region_mask = region_mask
region_mask = np.ones([image.shape[0], image.shape[1]])
# If the ground truth object is already masked by other already found masks, do not
# use it for training
if masked == 1:
for p in range(gt_masks.shape[2]):
overlap = calculate_overlapping(old_region_mask, gt_masks[:, :, p])
if overlap > 0.60:
available_objects[p] = 0
# We check if there are still obejcts to be found
if np.count_nonzero(available_objects) == 0:
not_finished = 0
# follow_iou function calculates at each time step which is the groun truth object
# that overlaps more with the visual region, so that we can calculate the rewards appropiately
iou, new_iou, last_matrix, index = follow_iou(gt_masks, region_mask, array_classes_gt_objects,
class_object, last_matrix, available_objects)
new_iou = iou
gt_mask = gt_masks[:, :, index]
# init of the history vector that indicates past actions (6 actions * 4 steps in the memory)
history_vector = np.zeros([24])
# computation of the initial state
state = get_state(region_image, history_vector, model_vgg)
# status indicates whether the agent is still alive and has not triggered the terminal action
status = 1
action = 0
reward = 0
if step > number_of_steps:
background = draw_sequences(i, k, step, action, draw, region_image, background,
path_testing_folder, iou, reward, gt_mask, region_mask, image_name,
bool_draw)
step += 1
while (status == 1) & (step < number_of_steps) & not_finished:
category = int(array_classes_gt_objects[k]-1)
model = models[0][category]
qval = model.predict(state.T, batch_size=1)
background = draw_sequences(i, k, step, action, draw, region_image, background,
path_testing_folder, iou, reward, gt_mask, region_mask, image_name,
bool_draw)
step += 1
# we force terminal action in case actual IoU is higher than 0.5, to train faster the agent
if (i < 100) & (new_iou > 0.5):
action = 6
# epsilon-greedy policy
elif random.random() < epsilon:
action = np.random.randint(1, 7)
else:
action = (np.argmax(qval))+1
# terminal action
if action == 6:
iou, new_iou, last_matrix, index = follow_iou(gt_masks, region_mask,
array_classes_gt_objects, class_object,
last_matrix, available_objects)
gt_mask = gt_masks[:, :, index]
reward = get_reward_trigger(new_iou)
background = draw_sequences(i, k, step, action, draw, region_image, background,
path_testing_folder, iou, reward, gt_mask, region_mask,
image_name, bool_draw)
step += 1
# movement action, we perform the crop of the corresponding subregion
else:
region_mask = np.zeros(original_shape)
size_mask = (size_mask[0] * scale_subregion, size_mask[1] * scale_subregion)
if action == 1:
offset_aux = (0, 0)
elif action == 2:
offset_aux = (0, size_mask[1] * scale_mask)
offset = (offset[0], offset[1] + size_mask[1] * scale_mask)
elif action == 3:
offset_aux = (size_mask[0] * scale_mask, 0)
offset = (offset[0] + size_mask[0] * scale_mask, offset[1])
elif action == 4:
offset_aux = (size_mask[0] * scale_mask,
size_mask[1] * scale_mask)
offset = (offset[0] + size_mask[0] * scale_mask,
offset[1] + size_mask[1] * scale_mask)
elif action == 5:
offset_aux = (size_mask[0] * scale_mask / 2,
size_mask[0] * scale_mask / 2)
offset = (offset[0] + size_mask[0] * scale_mask / 2,
offset[1] + size_mask[0] * scale_mask / 2)
region_image = region_image[offset_aux[0]:offset_aux[0] + size_mask[0],
offset_aux[1]:offset_aux[1] + size_mask[1]]
region_mask[offset[0]:offset[0] + size_mask[0], offset[1]:offset[1] + size_mask[1]] = 1
iou, new_iou, last_matrix, index = follow_iou(gt_masks, region_mask,
array_classes_gt_objects, class_object,
last_matrix, available_objects)
gt_mask = gt_masks[:, :, index]
reward = get_reward_movement(iou, new_iou)
iou = new_iou
history_vector = update_history_vector(history_vector, action)
new_state = get_state(region_image, history_vector, model_vgg)
# Experience replay storage
if len(replay[category]) < buffer_experience_replay:
replay[category].append((state, action, reward, new_state))
else:
if h[category] < (buffer_experience_replay-1):
h[category] += 1
else:
h[category] = 0
h_aux = h[category]
h_aux = int(h_aux)
replay[category][h_aux] = (state, action, reward, new_state)
minibatch = random.sample(replay[category], batch_size)
X_train = []
y_train = []
# we pick from the replay memory a sampled minibatch and generate the training samples
for memory in minibatch:
old_state, action, reward, new_state = memory
old_qval = model.predict(old_state.T, batch_size=1)
newQ = model.predict(new_state.T, batch_size=1)
maxQ = np.max(newQ)
y = np.zeros([1, 6])
y = old_qval
y = y.T
if action != 6: #non-terminal state
update = (reward + (gamma * maxQ))
else: #terminal state
update = reward
y[action-1] = update #target output
X_train.append(old_state)
y_train.append(y)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_train = X_train.astype("float32")
y_train = y_train.astype("float32")
X_train = X_train[:, :, 0]
y_train = y_train[:, :, 0]
hist = model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=1, verbose=0)
models[0][category] = model
state = new_state
if action == 6:
status = 0
masked = 1
# we mask object found with ground-truth so that agent learns faster
image = mask_image_with_mean_background(gt_mask, image)
else:
masked = 0
available_objects[index] = 0
if epsilon > 0.1:
epsilon -= 0.1
for t in range (np.size(models)):
if t == (class_object-1):
string = path_model + '/model' + str(t) + '_epoch_' + str(i) + 'h5'
string2 = path_model + '/model' + str(t) + 'h5'
model = models[0][t]
model.save_weights(string, overwrite=True)
model.save_weights(string2, overwrite=True)
| mit |
pravsripad/mne-python | examples/forward/plot_forward_sensitivity_maps.py | 14 | 4139 | """
.. _ex-sensitivity-maps:
================================================
Display sensitivity maps for EEG and MEG sensors
================================================
Sensitivity maps can be produced from forward operators that
indicate how well different sensor types will be able to detect
neural currents from different regions of the brain.
To get started with forward modeling see :ref:`tut-forward`.
"""
# Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets import sample
from mne.source_space import compute_distance_to_sensors
from mne.source_estimate import SourceEstimate
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
subjects_dir = data_path + '/subjects'
# Read the forward solutions with surface orientation
fwd = mne.read_forward_solution(fwd_fname)
mne.convert_forward_solution(fwd, surf_ori=True, copy=False)
leadfield = fwd['sol']['data']
print("Leadfield size : %d x %d" % leadfield.shape)
###############################################################################
# Compute sensitivity maps
grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
###############################################################################
# Show gain matrix a.k.a. leadfield matrix with sensitivity map
picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14)
for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto',
cmap='RdBu_r')
ax.set_title(ch_type.upper())
ax.set_xlabel('sources')
ax.set_ylabel('sensors')
fig.colorbar(im, ax=ax)
fig_2, ax = plt.subplots()
ax.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'],
color=['c', 'b', 'k'])
fig_2.legend()
ax.set(title='Normal orientation sensitivity',
xlabel='sensitivity', ylabel='count')
# sphinx_gallery_thumbnail_number = 3
brain_sens = grad_map.plot(
subjects_dir=subjects_dir, clim=dict(lims=[0, 50, 100]), figure=1)
brain_sens.add_text(0.1, 0.9, 'Gradiometer sensitivity', 'title', font_size=16)
###############################################################################
# Compare sensitivity map with distribution of source depths
# source space with vertices
src = fwd['src']
# Compute minimum Euclidean distances between vertices and MEG sensors
depths = compute_distance_to_sensors(src=src, info=fwd['info'],
picks=picks_meg).min(axis=1)
maxdep = depths.max() # for scaling
vertices = [src[0]['vertno'], src[1]['vertno']]
depths_map = SourceEstimate(data=depths, vertices=vertices, tmin=0.,
tstep=1.)
brain_dep = depths_map.plot(
subject='sample', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[0, maxdep / 2., maxdep]), figure=2)
brain_dep.add_text(0.1, 0.9, 'Source depth (m)', 'title', font_size=16)
###############################################################################
# Sensitivity is likely to co-vary with the distance between sources to
# sensors. To determine the strength of this relationship, we can compute the
# correlation between source depth and sensitivity values.
corr = np.corrcoef(depths, grad_map.data[:, 0])[0, 1]
print('Correlation between source depth and gradiomter sensitivity values: %f.'
% corr)
###############################################################################
# Gradiometer sensitiviy is highest close to the sensors, and decreases rapidly
# with inreasing source depth. This is confirmed by the high negative
# correlation between the two.
| bsd-3-clause |
zorroblue/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 24 | 14995 | import numpy as np
from numpy.testing import assert_approx_equal
from sklearn.utils.testing import (assert_equal, assert_array_almost_equal,
assert_array_equal, assert_true,
assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_, CCA
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
pls_bysvd.x_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
pls_bysvd.y_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
# x_weights_sign_flip holds columns of 1 or -1, depending on sign flip
# between R and python
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
x_rotations_sign_flip = pls_ca.x_rotations_ / x_rotations
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
y_rotations_sign_flip = pls_ca.y_rotations_ / y_rotations
# x_weights = X.dot(x_rotation)
# Hence R/python sign flip should be the same in x_weight and x_rotation
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
# This test that R / python give the same result up to column
# sign indeterminacy
assert_array_almost_equal(np.abs(x_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
assert_array_almost_equal(np.abs(y_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
x_weights_sign_flip = pls_2.x_weights_ / x_weights
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
x_loadings_sign_flip = pls_2.x_loadings_ / x_loadings
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_weights_sign_flip = pls_2.y_weights_ / y_weights
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_loadings_sign_flip = pls_2.y_loadings_ / y_loadings
# x_loadings[:, i] = Xi.dot(x_weights[:, i]) \forall i
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
rng = check_random_state(11)
l1 = rng.normal(size=n)
l2 = rng.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + rng.normal(size=4 * n).reshape((n, 4))
Y = latents + rng.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, rng.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, rng.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
x_loadings_sign_flip = pls_ca.x_loadings_ / x_loadings
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
y_loadings_sign_flip = pls_ca.y_loadings_ / y_loadings
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specified number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale_and_stability():
# We test scale=True parameter
# This allows to check numerical stability over platforms as well
d = load_linnerud()
X1 = d.data
Y1 = d.target
# causes X[:, -1].std() to be zero
X1[:, -1] = 1.0
# From bug #2821
# Test with X2, T2 s.t. clf.x_score[:, 1] == 0, clf.y_score[:, 1] == 0
# This test robustness of algorithm when dealing with value close to 0
X2 = np.array([[0., 0., 1.],
[1., 0., 0.],
[2., 2., 2.],
[3., 5., 4.]])
Y2 = np.array([[0.1, -0.2],
[0.9, 1.1],
[6.2, 5.9],
[11.9, 12.3]])
for (X, Y) in [(X1, Y1), (X2, Y2)]:
X_std = X.std(axis=0, ddof=1)
X_std[X_std == 0] = 1
Y_std = Y.std(axis=0, ddof=1)
Y_std[Y_std == 0] = 1
X_s = (X - X.mean(axis=0)) / X_std
Y_s = (Y - Y.mean(axis=0)) / Y_std
for clf in [CCA(), pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X, Y)
clf.set_params(scale=False)
X_s_score, Y_s_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
# Scaling should be idempotent
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components",
clf.fit, X, Y)
def test_pls_scaling():
# sanity check for scale=True
n_samples = 1000
n_targets = 5
n_features = 10
rng = check_random_state(0)
Q = rng.randn(n_targets, n_features)
Y = rng.randn(n_samples, n_targets)
X = np.dot(Y, Q) + 2 * rng.randn(n_samples, n_features) + 1
X *= 1000
X_scaled = StandardScaler().fit_transform(X)
pls = pls_.PLSRegression(n_components=5, scale=True)
pls.fit(X, Y)
score = pls.score(X, Y)
pls.fit(X_scaled, Y)
score_scaled = pls.score(X_scaled, Y)
assert_approx_equal(score, score_scaled)
| bsd-3-clause |
sanjanalab/GUIDES | static/data/pre_processed/generate_GRCm38_genes_2.py | 2 | 1344 | # produce list of genes in GRCm38
import pandas as pd
import json
# open refgene
refGeneFilename = '../gtex/gtex_mouse/refGene_mouse.txt'
refGene = pd.read_csv(refGeneFilename, sep="\t")
refGene.columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames']
# open biomart
biomartFilename = 'mart_export_mus_2.txt'
biomart = pd.read_csv(biomartFilename, sep="\t")
seen = {}
results = []
total_len = len(refGene)
for index, row in refGene.iterrows():
ensembl_id = row['name']
if ensembl_id not in seen:
the_loc = biomart.loc[biomart['Gene ID'] == ensembl_id]
gene_name = list(the_loc['Associated Gene Name'])[0]
entrez = list(the_loc['EntrezGene ID'])[0]
if pd.isnull(entrez):
entrez = ''
print ensembl_id, gene_name, 'has no entrez'
else:
entrez = str(int(entrez))
if pd.isnull(gene_name):
gene_name = ''
print ensembl_id, 'has no gene_name'
results.append({
'name': gene_name,
'ensembl_id': ensembl_id,
'entrez_id': entrez,
'description': ""
})
seen[ensembl_id] = True
with open('genes_list_GRCm38_processed.txt', 'w') as output:
json.dump(results, output)
with open('genes_list_GRCm38.txt', 'w') as output:
json.dump(results, output)
| bsd-3-clause |
jm-begon/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
xyguo/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
ddboline/kaggle_imdb_sentiment_model | train_word2vec_model.py | 1 | 2397 | #!/usr/bin/python
import os
import csv
import gzip
import multiprocessing
from collections import defaultdict
import pandas as pd
import numpy as np
import nltk
from gensim.models import Word2Vec
from sklearn.feature_extraction.text import CountVectorizer
from KaggleWord2VecUtility import review_to_wordlist, review_to_sentences
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
def clean_review(review):
list_of_sentences = review_to_sentences(review, tokenizer, remove_stopwords=False)
return list_of_sentences
def train_word2vec_model(do_plots=False):
traindf = pd.read_csv('labeledTrainData.tsv.gz', compression='gzip', delimiter='\t', header=0, quoting=3)
unlabeled_traindf = pd.read_csv('unlabeledTrainData.tsv.gz', compression='gzip', delimiter='\t', header=0, quoting=3)
review_list = traindf['review'].tolist() + unlabeled_traindf['review'].tolist()
sentences = []
pool = multiprocessing.Pool(4)
for rsent in pool.imap_unordered(clean_review, review_list):
sentences += rsent
#traincleanreview = traindf['review'].apply(clean_review).tolist()
#unlabeledcleanreview = unlabeled_traindf['review'].apply(clean_review).tolist()
#sentences = traincleanreview + unlabeledcleanreview
#print type(sentences[0])
# Set values for various parameters
num_features = 600 # Word vector dimensionality
min_word_count = 20 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model (this will take some time)
print "Training Word2Vec model..."
model = Word2Vec(sentences, workers=num_workers, \
size=num_features, min_count = min_word_count, \
window = context, sample = downsampling, seed=1)
# If you don't plan to train the model any further, calling
# init_sims will make the model much more memory-efficient.
model.init_sims(replace=True)
# It can be helpful to create a meaningful model name and
# save the model for later use. You can load it later using Word2Vec.load()
model_name = "%dfeatures_%dminwords_%dcontext" % (num_features, min_word_count, context
model.save(model_name)
if __name__ == '__main__':
train_word2vec_model(do_plots=True)
| mit |
zfrenchee/pandas | pandas/core/config_init.py | 1 | 17042 | """
This module is imported from the pandas package __init__.py file
in order to ensure that the core.config options registered here will
be available as soon as the user loads the package. if register_option
is invoked inside specific modules, they will not be registered until that
module is imported, which may or may not be a problem.
If you need to make sure options are available even before a certain
module is imported, register them here rather then in the module.
"""
import pandas.core.config as cf
from pandas.core.config import (is_int, is_bool, is_text, is_instance_factory,
is_one_of_factory, get_default_val,
is_callable)
from pandas.io.formats.console import detect_console_encoding
# compute
use_bottleneck_doc = """
: bool
Use the bottleneck library to accelerate if it is installed,
the default is True
Valid values: False,True
"""
def use_bottleneck_cb(key):
from pandas.core import nanops
nanops.set_use_bottleneck(cf.get_option(key))
use_numexpr_doc = """
: bool
Use the numexpr library to accelerate computation if it is installed,
the default is True
Valid values: False,True
"""
def use_numexpr_cb(key):
from pandas.core.computation import expressions
expressions.set_use_numexpr(cf.get_option(key))
with cf.config_prefix('compute'):
cf.register_option('use_bottleneck', True, use_bottleneck_doc,
validator=is_bool, cb=use_bottleneck_cb)
cf.register_option('use_numexpr', True, use_numexpr_doc,
validator=is_bool, cb=use_numexpr_cb)
#
# options from the "display" namespace
pc_precision_doc = """
: int
Floating point output precision (number of significant digits). This is
only a suggestion
"""
pc_colspace_doc = """
: int
Default space for DataFrame columns.
"""
pc_max_rows_doc = """
: int
If max_rows is exceeded, switch to truncate view. Depending on
`large_repr`, objects are either centrally truncated or printed as
a summary view. 'None' value means unlimited.
In case python/IPython is running in a terminal and `large_repr`
equals 'truncate' this can be set to 0 and pandas will auto-detect
the height of the terminal and print a truncated object which fits
the screen height. The IPython notebook, IPython qtconsole, or
IDLE do not run in a terminal and hence it is not possible to do
correct auto-detection.
"""
pc_max_cols_doc = """
: int
If max_cols is exceeded, switch to truncate view. Depending on
`large_repr`, objects are either centrally truncated or printed as
a summary view. 'None' value means unlimited.
In case python/IPython is running in a terminal and `large_repr`
equals 'truncate' this can be set to 0 and pandas will auto-detect
the width of the terminal and print a truncated object which fits
the screen width. The IPython notebook, IPython qtconsole, or IDLE
do not run in a terminal and hence it is not possible to do
correct auto-detection.
"""
pc_max_categories_doc = """
: int
This sets the maximum number of categories pandas should output when
printing out a `Categorical` or a Series of dtype "category".
"""
pc_max_info_cols_doc = """
: int
max_info_columns is used in DataFrame.info method to decide if
per column information will be printed.
"""
pc_nb_repr_h_doc = """
: boolean
When True, IPython notebook will use html representation for
pandas objects (if it is available).
"""
pc_date_dayfirst_doc = """
: boolean
When True, prints and parses dates with the day first, eg 20/01/2005
"""
pc_date_yearfirst_doc = """
: boolean
When True, prints and parses dates with the year first, eg 2005/01/20
"""
pc_pprint_nest_depth = """
: int
Controls the number of nested levels to process when pretty-printing
"""
pc_multi_sparse_doc = """
: boolean
"sparsify" MultiIndex display (don't display repeated
elements in outer levels within groups)
"""
pc_encoding_doc = """
: str/unicode
Defaults to the detected encoding of the console.
Specifies the encoding to be used for strings returned by to_string,
these are generally strings meant to be displayed on the console.
"""
float_format_doc = """
: callable
The callable should accept a floating point number and return
a string with the desired format of the number. This is used
in some places like SeriesFormatter.
See formats.format.EngFormatter for an example.
"""
max_colwidth_doc = """
: int
The maximum width in characters of a column in the repr of
a pandas data structure. When the column overflows, a "..."
placeholder is embedded in the output.
"""
colheader_justify_doc = """
: 'left'/'right'
Controls the justification of column headers. used by DataFrameFormatter.
"""
pc_expand_repr_doc = """
: boolean
Whether to print out the full DataFrame repr for wide DataFrames across
multiple lines, `max_columns` is still respected, but the output will
wrap-around across multiple "pages" if its width exceeds `display.width`.
"""
pc_show_dimensions_doc = """
: boolean or 'truncate'
Whether to print out dimensions at the end of DataFrame repr.
If 'truncate' is specified, only print out the dimensions if the
frame is truncated (e.g. not display all rows and/or columns)
"""
pc_line_width_doc = """
: int
Deprecated.
"""
pc_east_asian_width_doc = """
: boolean
Whether to use the Unicode East Asian Width to calculate the display text
width.
Enabling this may affect to the performance (default: False)
"""
pc_ambiguous_as_wide_doc = """
: boolean
Whether to handle Unicode characters belong to Ambiguous as Wide (width=2)
(default: False)
"""
pc_latex_repr_doc = """
: boolean
Whether to produce a latex DataFrame representation for jupyter
environments that support it.
(default: False)
"""
pc_table_schema_doc = """
: boolean
Whether to publish a Table Schema representation for frontends
that support it.
(default: False)
"""
pc_html_border_doc = """
: int
A ``border=value`` attribute is inserted in the ``<table>`` tag
for the DataFrame HTML repr.
"""
pc_html_border_deprecation_warning = """\
html.border has been deprecated, use display.html.border instead
(currently both are identical)
"""
pc_width_doc = """
: int
Width of the display in characters. In case python/IPython is running in
a terminal this can be set to None and pandas will correctly auto-detect
the width.
Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a
terminal and hence it is not possible to correctly detect the width.
"""
pc_height_doc = """
: int
Deprecated.
"""
pc_chop_threshold_doc = """
: float or None
if set to a float value, all float values smaller then the given threshold
will be displayed as exactly 0 by repr and friends.
"""
pc_max_seq_items = """
: int or None
when pretty-printing a long sequence, no more then `max_seq_items`
will be printed. If items are omitted, they will be denoted by the
addition of "..." to the resulting string.
If set to None, the number of items to be printed is unlimited.
"""
pc_max_info_rows_doc = """
: int or None
df.info() will usually show null-counts for each column.
For large frames this can be quite slow. max_info_rows and max_info_cols
limit this null check only to frames with smaller dimensions than
specified.
"""
pc_large_repr_doc = """
: 'truncate'/'info'
For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can
show a truncated table (the default from 0.13), or switch to the view from
df.info() (the behaviour in earlier versions of pandas).
"""
pc_memory_usage_doc = """
: bool, string or None
This specifies if the memory usage of a DataFrame should be displayed when
df.info() is called. Valid values True,False,'deep'
"""
pc_latex_escape = """
: bool
This specifies if the to_latex method of a Dataframe uses escapes special
characters.
Valid values: False,True
"""
pc_latex_longtable = """
:bool
This specifies if the to_latex method of a Dataframe uses the longtable
format.
Valid values: False,True
"""
pc_latex_multicolumn = """
: bool
This specifies if the to_latex method of a Dataframe uses multicolumns
to pretty-print MultiIndex columns.
Valid values: False,True
"""
pc_latex_multicolumn_format = """
: string
This specifies the format for multicolumn headers.
Can be surrounded with '|'.
Valid values: 'l', 'c', 'r', 'p{<width>}'
"""
pc_latex_multirow = """
: bool
This specifies if the to_latex method of a Dataframe uses multirows
to pretty-print MultiIndex rows.
Valid values: False,True
"""
style_backup = dict()
def table_schema_cb(key):
from pandas.io.formats.printing import _enable_data_resource_formatter
_enable_data_resource_formatter(cf.get_option(key))
with cf.config_prefix('display'):
cf.register_option('precision', 6, pc_precision_doc, validator=is_int)
cf.register_option('float_format', None, float_format_doc,
validator=is_one_of_factory([None, is_callable]))
cf.register_option('column_space', 12, validator=is_int)
cf.register_option('max_info_rows', 1690785, pc_max_info_rows_doc,
validator=is_instance_factory((int, type(None))))
cf.register_option('max_rows', 60, pc_max_rows_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('max_categories', 8, pc_max_categories_doc,
validator=is_int)
cf.register_option('max_colwidth', 50, max_colwidth_doc, validator=is_int)
cf.register_option('max_columns', 20, pc_max_cols_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('large_repr', 'truncate', pc_large_repr_doc,
validator=is_one_of_factory(['truncate', 'info']))
cf.register_option('max_info_columns', 100, pc_max_info_cols_doc,
validator=is_int)
cf.register_option('colheader_justify', 'right', colheader_justify_doc,
validator=is_text)
cf.register_option('notebook_repr_html', True, pc_nb_repr_h_doc,
validator=is_bool)
cf.register_option('date_dayfirst', False, pc_date_dayfirst_doc,
validator=is_bool)
cf.register_option('date_yearfirst', False, pc_date_yearfirst_doc,
validator=is_bool)
cf.register_option('pprint_nest_depth', 3, pc_pprint_nest_depth,
validator=is_int)
cf.register_option('multi_sparse', True, pc_multi_sparse_doc,
validator=is_bool)
cf.register_option('encoding', detect_console_encoding(), pc_encoding_doc,
validator=is_text)
cf.register_option('expand_frame_repr', True, pc_expand_repr_doc)
cf.register_option('show_dimensions', 'truncate', pc_show_dimensions_doc,
validator=is_one_of_factory([True, False, 'truncate']))
cf.register_option('chop_threshold', None, pc_chop_threshold_doc)
cf.register_option('max_seq_items', 100, pc_max_seq_items)
cf.register_option('height', 60, pc_height_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('width', 80, pc_width_doc,
validator=is_instance_factory([type(None), int]))
# redirected to width, make defval identical
cf.register_option('line_width', get_default_val('display.width'),
pc_line_width_doc)
cf.register_option('memory_usage', True, pc_memory_usage_doc,
validator=is_one_of_factory([None, True,
False, 'deep']))
cf.register_option('unicode.east_asian_width', False,
pc_east_asian_width_doc, validator=is_bool)
cf.register_option('unicode.ambiguous_as_wide', False,
pc_east_asian_width_doc, validator=is_bool)
cf.register_option('latex.repr', False,
pc_latex_repr_doc, validator=is_bool)
cf.register_option('latex.escape', True, pc_latex_escape,
validator=is_bool)
cf.register_option('latex.longtable', False, pc_latex_longtable,
validator=is_bool)
cf.register_option('latex.multicolumn', True, pc_latex_multicolumn,
validator=is_bool)
cf.register_option('latex.multicolumn_format', 'l', pc_latex_multicolumn,
validator=is_text)
cf.register_option('latex.multirow', False, pc_latex_multirow,
validator=is_bool)
cf.register_option('html.table_schema', False, pc_table_schema_doc,
validator=is_bool, cb=table_schema_cb)
cf.register_option('html.border', 1, pc_html_border_doc,
validator=is_int)
with cf.config_prefix('html'):
cf.register_option('border', 1, pc_html_border_doc,
validator=is_int)
cf.deprecate_option('html.border', msg=pc_html_border_deprecation_warning,
rkey='display.html.border')
tc_sim_interactive_doc = """
: boolean
Whether to simulate interactive mode for purposes of testing
"""
with cf.config_prefix('mode'):
cf.register_option('sim_interactive', False, tc_sim_interactive_doc)
use_inf_as_null_doc = """
: boolean
use_inf_as_null had been deprecated and will be removed in a future
version. Use `use_inf_as_na` instead.
"""
use_inf_as_na_doc = """
: boolean
True means treat None, NaN, INF, -INF as NA (old way),
False means None and NaN are null, but INF, -INF are not NA
(new way).
"""
# We don't want to start importing everything at the global context level
# or we'll hit circular deps.
def use_inf_as_na_cb(key):
from pandas.core.dtypes.missing import _use_inf_as_na
_use_inf_as_na(key)
with cf.config_prefix('mode'):
cf.register_option('use_inf_as_na', False, use_inf_as_na_doc,
cb=use_inf_as_na_cb)
cf.register_option('use_inf_as_null', False, use_inf_as_null_doc,
cb=use_inf_as_na_cb)
cf.deprecate_option('mode.use_inf_as_null', msg=use_inf_as_null_doc,
rkey='mode.use_inf_as_na')
# user warnings
chained_assignment = """
: string
Raise an exception, warn, or no action if trying to use chained assignment,
The default is warn
"""
with cf.config_prefix('mode'):
cf.register_option('chained_assignment', 'warn', chained_assignment,
validator=is_one_of_factory([None, 'warn', 'raise']))
# Set up the io.excel specific configuration.
writer_engine_doc = """
: string
The default Excel writer engine for '{ext}' files. Available options:
auto, {others}.
"""
_xls_options = ['xlwt']
_xlsm_options = ['openpyxl']
_xlsx_options = ['openpyxl', 'xlsxwriter']
with cf.config_prefix("io.excel.xls"):
cf.register_option("writer", "auto",
writer_engine_doc.format(
ext='xls',
others=', '.join(_xls_options)),
validator=str)
with cf.config_prefix("io.excel.xlsm"):
cf.register_option("writer", "auto",
writer_engine_doc.format(
ext='xlsm',
others=', '.join(_xlsm_options)),
validator=str)
with cf.config_prefix("io.excel.xlsx"):
cf.register_option("writer", "auto",
writer_engine_doc.format(
ext='xlsx',
others=', '.join(_xlsx_options)),
validator=str)
# Set up the io.parquet specific configuration.
parquet_engine_doc = """
: string
The default parquet reader/writer engine. Available options:
'auto', 'pyarrow', 'fastparquet', the default is 'auto'
"""
with cf.config_prefix('io.parquet'):
cf.register_option(
'engine', 'auto', parquet_engine_doc,
validator=is_one_of_factory(['auto', 'pyarrow', 'fastparquet']))
# --------
# Plotting
# ---------
register_converter_doc = """
: bool
Whether to register converters with matplotlib's units registry for
dates, times, datetimes, and Periods. Toggling to False will remove
the converters, restoring any converters that pandas overwrote.
"""
def register_converter_cb(key):
from pandas.plotting import register_matplotlib_converters
from pandas.plotting import deregister_matplotlib_converters
if cf.get_option(key):
register_matplotlib_converters()
else:
deregister_matplotlib_converters()
with cf.config_prefix("plotting.matplotlib"):
cf.register_option("register_converters", True, register_converter_doc,
validator=bool, cb=register_converter_cb)
| bsd-3-clause |
aminert/scikit-learn | sklearn/neighbors/regression.py | 106 | 10572 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
sazlin/data-structures | insertion_sort.py | 1 | 1832 | def sort(list):
"""
performs insertion sort on list in place
source: http://en.wikipedia.org/wiki/Insertion_sort
"""
for i in xrange(1, len(list)):
x = list[i]
j = i
while j > 0 and list[j-1] > x:
list[j] = list[j-1]
j -= 1
list[j] = x
if __name__ == '__main__':
import timeit
import matplotlib.pyplot as plt
repetitions = 10
long_random_list_test = """
l = [random.randint(-99999,99999) for i in xrange(0, 1000)]
insertion_sort.sort(l)
"""
long_random_time = timeit.timeit(
long_random_list_test,
setup="import insertion_sort, random",
number=repetitions
)
print "1000 item random int list with {} repetitions takes time {}".format(
repetitions, long_random_time)
descend_case = """
l = [i for i in xrange({}, 0, -1)]
insertion_sort.sort(l)
"""
ascend_case = """
l = [i for i in xrange(0, {})]
insertion_sort.sort(l)
"""
descend_results = []
ascend_results = []
for size in xrange(50, 2501, 50):
descend_time = timeit.timeit(
descend_case.format(size),
setup="import insertion_sort",
number=repetitions
)
descend_results.append((size, descend_time))
ascend_time = timeit.timeit(
ascend_case.format(size),
setup="import insertion_sort",
number=repetitions
)
ascend_results.append((size, ascend_time))
plt.hold(True)
for i in range(len(descend_results)):
n_descend, time_descend = descend_results[i]
n_ascend, time_ascend = ascend_results[i]
plt.plot(n_descend, time_descend, 'bo')
plt.plot(n_ascend, time_ascend, 'ro')
plt.show()
| mit |
gfyoung/pandas | pandas/tests/series/methods/test_asof.py | 2 | 5386 | import numpy as np
import pytest
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import Series, Timestamp, date_range, isna, notna, offsets
import pandas._testing as tm
class TestSeriesAsof:
def test_basic(self):
# array or list or dates
N = 50
rng = date_range("1/1/1990", periods=N, freq="53s")
ts = Series(np.random.randn(N), index=rng)
ts.iloc[15:30] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
result = ts.asof(dates)
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
assert (rs == ts[lb]).all()
val = result[result.index[result.index >= ub][0]]
assert ts[ub] == val
def test_scalar(self):
N = 30
rng = date_range("1/1/1990", periods=N, freq="53s")
ts = Series(np.arange(N), index=rng)
ts.iloc[5:10] = np.NaN
ts.iloc[15:20] = np.NaN
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
assert val1 == ts[4]
assert val2 == ts[14]
# accepts strings
val1 = ts.asof(str(ts.index[7]))
assert val1 == ts[4]
# in there
result = ts.asof(ts.index[3])
assert result == ts[3]
# no as of value
d = ts.index[0] - offsets.BDay()
assert np.isnan(ts.asof(d))
def test_with_nan(self):
# basic asof test
rng = date_range("1/1/2000", "1/2/2000", freq="4h")
s = Series(np.arange(len(rng)), index=rng)
r = s.resample("2h").mean()
result = r.asof(r.index)
expected = Series(
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6.0],
index=date_range("1/1/2000", "1/2/2000", freq="2h"),
)
tm.assert_series_equal(result, expected)
r.iloc[3:5] = np.nan
result = r.asof(r.index)
expected = Series(
[0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 5, 5, 6.0],
index=date_range("1/1/2000", "1/2/2000", freq="2h"),
)
tm.assert_series_equal(result, expected)
r.iloc[-3:] = np.nan
result = r.asof(r.index)
expected = Series(
[0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4, 4.0],
index=date_range("1/1/2000", "1/2/2000", freq="2h"),
)
tm.assert_series_equal(result, expected)
def test_periodindex(self):
from pandas import PeriodIndex, period_range
# array or list or dates
N = 50
rng = period_range("1/1/1990", periods=N, freq="H")
ts = Series(np.random.randn(N), index=rng)
ts.iloc[15:30] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="37min")
result = ts.asof(dates)
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
pix = PeriodIndex(result.index.values, freq="H")
mask = (pix >= lb) & (pix < ub)
rs = result[mask]
assert (rs == ts[lb]).all()
ts.iloc[5:10] = np.nan
ts.iloc[15:20] = np.nan
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
assert val1 == ts[4]
assert val2 == ts[14]
# accepts strings
val1 = ts.asof(str(ts.index[7]))
assert val1 == ts[4]
# in there
assert ts.asof(ts.index[3]) == ts[3]
# no as of value
d = ts.index[0].to_timestamp() - offsets.BDay()
assert isna(ts.asof(d))
# Mismatched freq
msg = "Input has different freq"
with pytest.raises(IncompatibleFrequency, match=msg):
ts.asof(rng.asfreq("D"))
def test_errors(self):
s = Series(
[1, 2, 3],
index=[Timestamp("20130101"), Timestamp("20130103"), Timestamp("20130102")],
)
# non-monotonic
assert not s.index.is_monotonic
with pytest.raises(ValueError, match="requires a sorted index"):
s.asof(s.index[0])
# subset with Series
N = 10
rng = date_range("1/1/1990", periods=N, freq="53s")
s = Series(np.random.randn(N), index=rng)
with pytest.raises(ValueError, match="not valid for Series"):
s.asof(s.index[0], subset="foo")
def test_all_nans(self):
# GH 15713
# series is all nans
# testing non-default indexes
N = 50
rng = date_range("1/1/1990", periods=N, freq="53s")
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
result = Series(np.nan, index=rng).asof(dates)
expected = Series(np.nan, index=dates)
tm.assert_series_equal(result, expected)
# testing scalar input
date = date_range("1/1/1990", periods=N * 3, freq="25s")[0]
result = Series(np.nan, index=rng).asof(date)
assert isna(result)
# test name is propagated
result = Series(np.nan, index=[1, 2, 3, 4], name="test").asof([4, 5])
expected = Series(np.nan, index=[4, 5], name="test")
tm.assert_series_equal(result, expected)
| bsd-3-clause |
raultron/ardrone_velocity | scripts/filter_design.py | 1 | 1704 | #!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import Header
from nav_msgs.msg import Odometry
from ardrone_autonomy.msg import Navdata
import rosbag
from scipy import signal
import scipy
import matplotlib.pyplot as plt
from numpy import cos, sin, pi, absolute, arange
bag = rosbag.Bag('2015-08-14-12-29-31.bag')
vel = list()
time = list()
for topic, msg, t in bag.read_messages('/ardrone/navdata'):
vel.append(msg.vx)
time.append(msg.header.stamp)
bag.close()
sample_rate = 25 #1/0.004
vel_out_median = signal.medfilt(vel,21)
#
# plt.plot(vel)
# plt.plot(vel_out)
# plt.show()
# The Nyquist rate of the signal.
nyq_rate = sample_rate / 2.0
# The desired width of the transition from pass to stop,
# relative to the Nyquist rate. We'll design the filter
# with a 5 Hz transition width.
width = 20.0/nyq_rate
# The desired attenuation in the stop band, in dB.
ripple_db = 20.0
# Compute the order and Kaiser parameter for the FIR filter.
N, beta = signal.kaiserord(ripple_db, width)
# The cutoff frequency of the filter.
cutoff_hz = 1.0
# Use firwin with a Kaiser window to create a lowpass FIR filter.
#taps = signal.firwin(N, cutoff_hz/nyq_rate, window=('kaiser', beta))
#Use a predifined filter order
taps = signal.firwin(30, cutoff_hz/nyq_rate)
print taps
# Use lfilter to filter x with the FIR filter.
vel_out = signal.lfilter(taps, 1.0, vel)
plt.plot(vel_out)
plt.plot(vel)
plt.plot(vel_out_median)
plt.figure(2)
plt.clf()
w, h = signal.freqz(taps, worN=8000)
plt.plot((w/pi)*nyq_rate, absolute(h), linewidth=2)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain')
plt.title('Frequency Response')
plt.ylim(-0.05, 1.05)
plt.grid(True)
plt.show()
| gpl-2.0 |
iwegner/MITK | Modules/Biophotonics/python/iMC/regression/linear.py | 6 | 3053 | '''
Created on Oct 19, 2015
@author: wirkert
'''
import numpy as np
from scipy.interpolate import interp1d
from mc.usuag import get_haemoglobin_extinction_coefficients
class LinearSaO2Unmixing(object):
'''
classdocs
'''
def __init__(self):
# oxygenated haemoglobin extinction coefficients
eHb02 = 0
eHb = 0
# oxygenated haemoglobin extinction coefficients
eHbO2 = np.array([34772.8,
27840.93333,
23748.8 ,
21550.8 ,
21723.46667,
28064.8 ,
39131.73333,
45402.93333,
42955.06667,
40041.73333,
42404.4 ,
36333.6 ,
22568.26667,
6368.933333,
1882.666667,
1019.333333,
664.6666667,
473.3333333,
376.5333333,
327.2 ,
297.0666667],)
# deoxygenated haemoglobin extinction coefficients
eHb = [18031.73333 ,
15796.8 ,
17365.33333 ,
21106.53333 ,
26075.06667 ,
32133.2 ,
39072.66667 ,
46346.8 ,
51264 ,
50757.33333 ,
45293.33333 ,
36805.46667 ,
26673.86667 ,
17481.73333 ,
10210.13333 ,
7034 ,
5334.533333 ,
4414.706667 ,
3773.96 ,
3257.266667 ,
2809.866667]
nr_total_wavelengths = len(eHb)
# to account for scattering losses we allow a constant offset
scattering = np.ones(nr_total_wavelengths)
# put eHbO2, eHb and scattering term in one measurement matrix
self.H = np.vstack((eHbO2, eHb, scattering)).T
self.lsq_solution_matrix = np.dot(np.linalg.inv(np.dot(self.H.T,
self.H)),
self.H.T)
def fit(self, X, y, weights=None):
"""only implemented to fit to the standard sklearn framework."""
pass
def predict(self, X):
"""predict like in sklearn:
Parameters:
X: nrsamples x nr_features matrix of samples to predict for
regression
Returns:
y: array of shape [nr_samples] with values for predicted
oxygenation """
# do least squares estimation
oxy_test, deoxy, s = np.dot(self.lsq_solution_matrix, X.T)
# calculate oxygenation = oxygenated blood / total blood
saO2 = oxy_test / (oxy_test + deoxy)
return np.clip(saO2, 0., 1.)
| bsd-3-clause |
afgaron/rgz-analysis | python/consensus.py | 2 | 55278 | from __future__ import division
'''
consensus.py
This is the collation and aggregation code for the Radio Galaxy Zoo project. The main
intent of the code is to take in the raw classifications generated by Ouroboros
(as a MongoDB file), combine classifications by independent users, and generate
a single consensus answer. The results are stored in both MongoDB and as a
set of static CSV output files.
Originally developed by Kyle Willett (University of Minnesota), 2014-2016.
'''
# Local RGZ modules
import collinearity
from load_contours import get_contours,make_pathdict
# Packges (installed by default with Python)
import datetime
import operator
from collections import Counter
import cStringIO
import urllib
import json
import os.path
import time
import shutil
import logging
# Other packages (may need to install separately)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.pyplot import cm
from matplotlib.path import Path
import matplotlib.patches as patches
from scipy.ndimage.filters import maximum_filter
from scipy import stats
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
from scipy.linalg.basic import LinAlgError
from astropy.io import fits
from pymongo import ASCENDING
from pymongo import MongoClient
from PIL import Image
# Load data from MongoDB; collections are used in almost every module, so make them global variables.
client = MongoClient('localhost', 27017)
db = client['radio']
# Select which version of the catalog to use
version = '_2019-05-06'
subjects = db['radio_subjects'] # subjects = images
classifications = db['radio_classifications'] # classifications = classifications of each subject per user
consensus = db['consensus{}'.format(version)] # consensus = output of this program
user_weights = db['user_weights{}'.format(version)]
logfile = 'consensus{}.log'.format(version)
# Parameters for the RGZ project
main_release_date = datetime.datetime(2013, 12, 17, 0, 0, 0, 0)
# Different image scales and sizes, depending on the survey being analyzed
img_params = {
'first':{
'IMG_HEIGHT_OLD':424.0 , # number of pixels in the IR coordinate system (as recorded in Mongo) along the y axis
'IMG_WIDTH_OLD':424.0 , # number of pixels in the IR coordinate system (as recorded in Mongo) along the x axis
'IMG_HEIGHT_NEW':500.0 , # number of pixels in the downloaded JPG image along the y axis
'IMG_WIDTH_NEW':500.0 , # number of pixels in the downloaded JPG image along the x axis
'FITS_HEIGHT':132.0 , # number of pixels in the FITS image along the y axis (radio only)
'FITS_WIDTH':132.0 , # number of pixels in the FITS image along the y axis (radio only)
'PIXEL_SIZE':1.3748 # the number of arcseconds per pixel in the radio FITS image (from CDELT1)
},
'atlas':{
'IMG_HEIGHT_OLD':424.0 , # number of pixels in the IR coordinate system (as recorded in Mongo) along the y axis
'IMG_WIDTH_OLD':424.0 , # number of pixels in the IR coordinate system (as recorded in Mongo) along the x axis
'IMG_HEIGHT_NEW':500.0 , # number of pixels in the downloaded PNG image along the y axis
'IMG_WIDTH_NEW':500.0 , # number of pixels in the downloaded PNG image along the x axis
'FITS_HEIGHT':201.0 , # number of pixels in the FITS image along the y axis (both IR and radio)
'FITS_WIDTH':201.0 , # number of pixels in the FITS image along the x axis (both IR and radio)
'PIXEL_SIZE':0.6000 # the number of arcseconds per pixel in the radio FITS image (from CDELT1)
}
}
# These fields are annoyingly stored in the same raw data structure as the actual
# annotations of the image. They're removed from all analysis.
bad_keys = ('finished_at','started_at','user_agent','lang','pending')
# Local directory paths. Add paths below depending on your local source for:
# - raw FITS images of radio data (data_path)
# - PNG images of radio and IR subjects (data_path)
# - contour data for radio flux (data_path)
# - your current working directory (rgz_path)
def determine_paths(paths):
found_path = False
for path in paths:
if os.path.exists(path):
found_path = True
return path
if found_path == False:
print "Unable to find the hardcoded local path:"
print paths
return None
rgz_path = determine_paths(('/data/tabernacle/larry/RGZdata/rgz-analysis','/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'))
#rgz_path = '/home/garon/Documents/RGZdata/rgz-analysis'
data_path = determine_paths(('/data/tabernacle/larry/RGZdata/rawdata','/data/extragal/willett','/Volumes/REISEPASS','/Volumes/3TB'))
plot_path = "{0}/rgz/plots".format(data_path)
pathdict = make_pathdict()
########################################
# Begin the actual code
########################################
def checksum(zid,experts_only=False,excluded=[],no_anonymous=False,include_peak_data=True,weights=0,scheme='scaling'):
# Find the consensus for all users who have classified a subject
sub = subjects.find_one({'zooniverse_id':zid})
imgid = sub['_id']
survey = sub['metadata']['survey']
# Classifications for this subject after launch date
class_params = {"subject_ids": imgid, "updated_at": {"$gt": main_release_date}}
# Only get the consensus classification for the science team members
if experts_only:
class_params['expert'] = True
# If comparing a particular volunteer (such as an expert), don't include self-comparison
if len(excluded) > 0:
class_params['user_name'] = {"$nin":excluded}
# To exclude anonymous classifications (registered users only):
if no_anonymous:
if class_params.has_key('user_name'):
class_params['user_name']["$exists"] = True
else:
class_params['user_name'] = {"$exists":True}
_c = classifications.find(class_params)
# Empty dicts and lists
cdict = {}
unique_users = set()
clen_start = 0
clist_all = []
listcount = []
# Compute the most popular combination for each NUMBER of galaxies identified in image
for c in _c:
clist_all.append(c)
clen_start += 1
# Skip classification if they already did one. This assumes the latest classification
# is always the best (or at least the one that will be recorded here).
try:
user_name = c['user_name']
except KeyError:
user_name = 'Anonymous'
# Check the answer, as long as they haven't already done one.
if user_name not in unique_users or user_name is 'Anonymous':
unique_users.add(user_name)
listcount.append(True)
sumlist = [] # List of the checksums over all possible combinations
# Only find data that was an actual marking, not metadata
goodann = [x for x in c['annotations'] if (x.keys()[0] not in bad_keys)]
n_galaxies = len(goodann)
# There must be at least one galaxy!
if n_galaxies > 0:
for idx,ann in enumerate(goodann):
xmaxlist = []
try:
radio_comps = ann['radio']
# loop over all the radio components within an galaxy
if radio_comps != 'No Contours':
for rc in radio_comps:
xmaxlist.append(float(radio_comps[rc]['xmax']))
# or make the value -99 if there are no contours
else:
xmaxlist.append(-99)
except KeyError:
# No radio data for this classification
xmaxlist.append(-99)
# To create a unique ID for the combination of radio components,
# take the product of all the xmax coordinates and sum them together
# as a crude hash. This is not an ideal method and is potentially
# subject to rounding errors - could be significantly improved.
product = reduce(operator.mul, xmaxlist, 1)
sumlist.append(round(product,3))
checksum = sum(sumlist)
else:
# No galaxies in this classification
checksum = -99
c['checksum'] = checksum
c['n_galaxies'] = n_galaxies
# Insert checksum into dictionary with number of galaxies as the index
if cdict.has_key(n_galaxies):
cdict[n_galaxies].append(checksum)
else:
cdict[n_galaxies] = [checksum]
else:
listcount.append(False)
# Remove duplicates and classifications for "No Object"
clist = [c for lc,c in zip(listcount,clist_all) if lc and c['checksum'] != -99]
clist_debugged = []
for ix, c in enumerate(clist):
if ix and 'user_name' not in c and 'user_name' not in clist[ix-1]:
c0 = clist[ix-1]
if ([anno for anno in c['annotations'] if 'ir' in anno] != [anno for anno in c0['annotations'] if 'ir' in anno]) or \
(abs(c['created_at']-c0['created_at']).seconds > 30):
clist_debugged.append(c)
else:
cdict[c['n_galaxies']].remove(c['checksum'])
else:
clist_debugged.append(c)
clist = clist_debugged
# Implement the weighting scheme, if desired. Simply add duplicate classifications
# for users who have been upweighted based on their agreement with the science team
# on gold-standard subjects.
if weights > 0:
weighted_c = []
for c in clist:
if c.has_key('user_name'):
try:
weight = user_weights.find_one({'user_name':c['user_name']})['weight']
except TypeError:
weight = 0
if scheme == 'threshold' and weight == 1:
for i in range(weights):
weighted_c.append(c)
cdict[c['n_galaxies']].append(c['checksum'])
elif scheme == 'scaling' and weight > 0:
for i in range(weight):
weighted_c.append(c)
cdict[c['n_galaxies']].append(c['checksum'])
if len(weighted_c) > 0:
clist.extend(weighted_c)
maxval=0
mc_checksum = 0.
# Find the number of sources in the image that has the highest number of consensus classifications
for k,v in cdict.iteritems():
mc = Counter(v).most_common()
# Check if the most common selection coordinate was for no radio contours
if mc[0][0] == -99.0:
if len(mc) > 1:
# If so, take the selection with the next-highest number of counts
mc_best = mc[1]
else:
continue
# Selection with the highest number of counts
else:
mc_best = mc[0]
# If the new selection has more counts than the previous one, choose it as the best match;
# if tied or less than this, remain with the current consensus number of galaxies
if mc_best[1] > maxval:
maxval = mc_best[1]
mc_checksum = mc_best[0]
# Get a galaxy that matches the checksum so we can record the annotation data
try:
cmatch = next(i for i in clist if i['checksum'] == mc_checksum)
except StopIteration:
# Necessary for objects like ARG0003par;
# one classifier recorded 22 "No IR","No Contours" in a short space.
# Shouldn't happen (some sort of system glitch), but catch it if it does.
print 'No non-zero classifications recorded for {0}'.format(zid)
logging.info('No non-zero classifications recorded for {0}'.format(zid))
return None
# Get the annotation data for galaxies that match the consensus
goodann = [x for x in cmatch['annotations'] if x.keys()[0] not in bad_keys]
# Find the sum of the xmax coordinates for each galaxy. This gives the index to search on.
cons = {}
cons['zid'] = zid
cons['source'] = sub['metadata']['source']
cons['survey'] = survey
ir_x,ir_y = {},{}
cons['answer'] = {}
cons['n_votes'] = maxval
cons['n_total'] = len(clist)
# This will be where we store the consensus parameters
answer = cons['answer']
# Loop over the annotations and record the parameters of the bounding boxes
for k,gal in enumerate(goodann):
xmax_temp = []
bbox_temp = []
try:
for v in gal['radio'].itervalues():
xmax_temp.append(float(v['xmax']))
bbox_temp.append((v['xmax'],v['ymax'],v['xmin'],v['ymin']))
checksum2 = round(sum(xmax_temp),3)
answer[checksum2] = {}
answer[checksum2]['ind'] = k
answer[checksum2]['xmax'] = xmax_temp
answer[checksum2]['bbox'] = bbox_temp
except KeyError:
print gal, zid
logging.warning((gal, zid))
except AttributeError:
print 'No Sources, No IR recorded for {0}'.format(zid)
logging.warning('No Sources, No IR recorded for {0}'.format(zid))
# Make empty copy of next dict in same loop
ir_x[k] = []
ir_y[k] = []
# Now loop over all sets of classifications to get their IR counterparts
for c in clist:
if c['checksum'] == mc_checksum:
annlist = [ann for ann in c['annotations'] if ann.keys()[0] not in bad_keys]
for ann in annlist:
if 'ir' in ann.keys():
# Find the index k that this corresponds to
try:
xmax_checksum = round(sum([float(ann['radio'][a]['xmax']) for a in ann['radio']]),3)
except TypeError:
xmax_checksum = -99
try:
k = answer[xmax_checksum]['ind']
if ann['ir'] == 'No Sources':
ir_x[k].append(-99)
ir_y[k].append(-99)
else:
# Only takes the first IR source if there is more than one.
ir_x[k].append(float(ann['ir']['0']['x']))
ir_y[k].append(float(ann['ir']['0']['y']))
except KeyError:
print '"No radio" still appearing as valid consensus option.'
logging.warning('"No radio" still appearing as valid consensus option.')
# Perform a kernel density estimate on the data for each galaxy to find the IR peak (in pixel coordinates)
scale_ir = img_params[survey]['IMG_HEIGHT_NEW'] * 1./img_params[survey]['IMG_HEIGHT_OLD']
peak_data = []
# Remove any empty IR peaks recorded
for (xk,xv),(yk,yv) in zip(ir_x.iteritems(),ir_y.iteritems()):
if len(xv) == 0:
ir_x.pop(xk)
if len(yv) == 0:
ir_y.pop(yk)
# Make sure that we have the same number of points for the x- and y-coordinates of the IR peaks
assert len(ir_x) == len(ir_y),'Lengths of ir_x ({0:d}) and ir_y ({1:d}) are not the same'.format(len(ir_x),len(ir_y))
for (xk,xv),(yk,yv) in zip(ir_x.iteritems(),ir_y.iteritems()):
if len(xv) == 0:
irx
pd = {}
# Convert into the same scale as the radio coordinates
x_exists = [xt * scale_ir for xt in xv if xt != -99.0]
y_exists = [yt * scale_ir for yt in yv if yt != -99.0]
x_all = [xt * scale_ir for xt in xv]
y_all = [yt * scale_ir for yt in yv]
# Find the most common IR coordinate. We want to skip the next steps
# if they said there was no IR counterpart (-99,-99)
ir_Counter = Counter([(xx,yy) for xx,yy in zip(xv,yv)])
most_common_ir = ir_Counter.most_common(1)[0][0]
xmin = 1.
xmax = img_params[survey]['IMG_HEIGHT_NEW']
ymin = 1.
ymax = img_params[survey]['IMG_WIDTH_NEW']
# Check if there are enough IR points to attempt a kernel density estimate
if len(Counter(x_exists)) > 2 and len(Counter(y_exists)) > 2 and most_common_ir != (-99,-99):
# X,Y = grid of uniform coordinates over the IR pixel plane
X, Y = np.mgrid[xmin:xmax, ymin:ymax]
positions = np.vstack([X.ravel(), Y.ravel()])
try:
values = np.vstack([x_exists, y_exists])
except ValueError:
# Breaks on the tutorial subject. Find out why len(x) != len(y)
print zid
print 'Length of IR x array: {0:d}; Length of IR y array: {1:d}'.format(len(x_exists),len(y_exists))
logging.warning((zid, 'Length of IR x array: {0:d}; Length of IR y array: {1:d}'.format(len(x_exists),len(y_exists))))
try:
# Compute the kernel density estimate
kernel = stats.gaussian_kde(values)
except LinAlgError:
print 'LinAlgError in KD estimation for {0}'.format(zid,x_exists,y_exists)
logging.warning('LinAlgError in KD estimation for {0}'.format(zid,x_exists,y_exists))
for k,v in answer.iteritems():
if v['ind'] == xk:
xpeak, ypeak = np.mean(x_exists), np.mean(y_exists)
answer[k]['ir'] = (xpeak, ypeak)
# Count the number of clicks within 3"
agreed = 0
for x0, y0 in zip(x_exists, y_exists):
if np.sqrt(np.square(xpeak-x0)+np.square(ypeak-y0)) <= (xmax-xmin)/60.:
agreed += 1
answer[k]['n_ir'] = agreed
answer[k]['ir_level'] = 1.0*agreed/len(xv)
answer[k]['ir_flag'] = 0
continue
# Even if there are more than 2 sets of points, if they are mutually co-linear,
# matrix can't invert and kernel returns NaNs.
kp = kernel(positions)
# Check to see if there are NaNs in the kernel (usually a sign of co-linear points).
if np.isnan(kp).sum() > 0:
acp = collinearity.collinear(x_exists,y_exists)
if len(acp) > 0:
output = 'There are {0:d} unique points for {1} (source no. {2:d} in the field), but all are co-linear; KDE estimate does not work.'.format( \
len(Counter(x_exists)),zid,xk)
else:
output = 'There are NaNs in the KDE for {0} (source no. {1:d} in the field), but points are not co-linear.'.format(zid,xk)
logging.info(output)
for k,v in answer.iteritems():
if v['ind'] == xk:
xpeak, ypeak = np.mean(x_exists), np.mean(y_exists)
answer[k]['ir'] = (xpeak, ypeak)
# Count the number of clicks within 3"
agreed = 0
for x0, y0 in zip(x_exists, y_exists):
if np.sqrt(np.square(xpeak-x0)+np.square(ypeak-y0)) <= (xmax-xmin)/60.:
agreed += 1
answer[k]['n_ir'] = agreed
answer[k]['ir_level'] = 1.0*agreed/len(xv)
answer[k]['ir_flag'] = 0
# Kernel is finite; should be able to get a position
else:
Z = np.reshape(kp.T, X.shape)
# Find the number of peaks in the kernel
# http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
neighborhood = np.ones((10,10))
local_max = maximum_filter(Z, footprint=neighborhood)==Z
background = (Z==0)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
detected_peaks = local_max ^ eroded_background
npeaks = detected_peaks.sum()
pd['X'] = X
pd['Y'] = Y
pd['Z'] = Z
pd['npeaks'] = npeaks
try:
# Peak values in the kernel are what we take as the final IR location
xpeak = float(pd['X'][pd['Z']==pd['Z'].max()][0])
ypeak = float(pd['Y'][pd['Z']==pd['Z'].max()][0])
peak_height = Z[int(xpeak)-1, int(ypeak)-1]
except IndexError:
# Print results to screen if it doesn't match
print pd
print zid, clist
logging.warning((pd, zid, clist))
# For each answer in this image, record the final IR peak
for k,v in answer.iteritems():
if v['ind'] == xk:
answer[k]['ir_peak'] = (xpeak,ypeak)
# Count the number of clicks within 3"
agreed = 0
for x0, y0 in zip(x_exists, y_exists):
if np.sqrt(np.square(xpeak-x0)+np.square(ypeak-y0)) <= (xmax-xmin)/60.:
agreed += 1
answer[k]['n_ir'] = agreed
answer[k]['ir_level'] = 1.0*agreed/len(xv)
answer[k]['ir_flag'] = 1
# Don't write to consensus for serializable JSON object
if include_peak_data:
answer[k]['peak_data'] = pd
answer[k]['ir_x'] = x_exists
answer[k]['ir_y'] = y_exists
# Couldn't attempt a KDE; too few IR points in consensus
else:
# Note: need to actually put a limit in if less than half of users selected IR counterpart.
# Right now it still IDs a source even if only, say, 1/10 users said it was there.
for k,v in answer.iteritems():
if v['ind'] == xk:
# Case 1: multiple users selected IR source, but not enough unique points to pinpoint peak
if most_common_ir != (-99,-99) and len(x_exists) > 0 and len(y_exists) > 0:
xpeak, ypeak = np.mean(x_exists), np.mean(y_exists)
answer[k]['ir'] = (xpeak, ypeak)
# Count the number of clicks within 3"
agreed = 0
for x0, y0 in zip(x_exists, y_exists):
if np.sqrt(np.square(xpeak-x0)+np.square(ypeak-y0)) <= (xmax-xmin)/60.:
agreed += 1
answer[k]['n_ir'] = agreed
answer[k]['ir_level'] = 1.0*agreed/len(xv)
answer[k]['ir_flag'] = 0
# Case 2: most users have selected No Sources
else:
answer[k]['ir'] = (-99,-99)
answer[k]['n_ir'] = xv.count(-99)
answer[k]['ir_level'] = 1.0*xv.count(-99)/len(xv)
answer[k]['ir_flag'] = 0
# Final answer
return cons
def one_answer(zid,user_name):
# Find the result for just one user and one image (a single classification)
sub = subjects.find_one({'zooniverse_id':zid})
imgid = sub['_id']
# Classifications for this subject after launch date
class_params = {"subject_ids": imgid, "updated_at": {"$gt": main_release_date},'user_name':user_name}
clist = list(classifications.find(class_params))
# Empty dicts and lists
cdict = {}
for c in clist:
# Want most popular combination for each NUMBER of galaxies identified in image
sumlist = [] # List of the checksums over all possible combinations
# Only find data that was an actual marking, not metadata
goodann = [x for x in c['annotations'] if x.keys()[0] not in bad_keys]
n_galaxies = len(goodann)
for idx,ann in enumerate(goodann):
xmaxlist = []
radio_comps = ann['radio']
# loop over all the radio components within an galaxy
if radio_comps != 'No Contours':
for rc in radio_comps:
xmaxlist.append(float(radio_comps[rc]['xmax']))
# or make the value -99 if there are no contours
else:
xmaxlist.append(-99)
# To create a unique ID for the combination of radio components,
# take the product of all the xmax coordinates and sum them together.
product = reduce(operator.mul, xmaxlist, 1)
sumlist.append(round(product,3))
checksum = round(sum(sumlist),3)
c['checksum'] = checksum
# Insert checksum into dictionary with number of galaxies as the index
if cdict.has_key(n_galaxies):
cdict[n_galaxies].append(checksum)
else:
cdict[n_galaxies] = [checksum]
maxval=0
mc_checksum = 0.
# Find the number of galaxies that has the highest number of consensus classifications
for k,v in cdict.iteritems():
mc = Counter(v).most_common()
# Check if the most common selection coordinate was for no radio contours
if mc[0][0] == -99.0:
if len(mc) > 1:
# If so, take the selection with the next-highest number of counts
mc_best = mc[1]
else:
continue
# Selection with the highest number of counts
else:
mc_best = mc[0]
# If the new selection has more counts than the previous one, choose it as the best match;
# if tied or less than this, remain with the current consensus number of galaxies
if mc_best[1] > maxval:
maxval = mc_best[1]
mc_checksum = mc_best[0]
# Find a galaxy that matches the checksum (easier to keep track as a list)
try:
cmatch = next(i for i in clist if i['checksum'] == mc_checksum)
except StopIteration:
# Crude way to check for No Sources and No Contours (mc_checksum = 0.)
cons = {'zid':zid,'answer':{}}
return cons
# Find IR peak for the checksummed galaxies
goodann = [x for x in cmatch['annotations'] if x.keys()[0] not in bad_keys]
# Find the sum of the xmax coordinates for each galaxy. This gives the index to search on.
cons = {}
cons['zid'] = zid
cons['answer'] = {}
cons['n_votes'] = 1
cons['n_total'] = 1
answer = cons['answer']
ir_x,ir_y = {},{}
for k,gal in enumerate(goodann):
xmax_temp = []
try:
for v in gal['radio'].itervalues():
xmax_temp.append(float(v['xmax']))
except AttributeError:
xmax_temp.append(-99)
checksum2 = round(sum(xmax_temp),3)
answer[checksum2] = {}
answer[checksum2]['ind'] = k
answer[checksum2]['xmax'] = xmax_temp
# Make empty copy of next dict in same loop
ir_x[k] = []
ir_y[k] = []
# Now loop over the galaxies themselves
for c in clist:
if c['checksum'] == mc_checksum:
annlist = [ann for ann in c['annotations'] if ann.keys()[0] not in bad_keys]
for ann in annlist:
if 'ir' in ann.keys():
# Find the index k that this corresponds to
try:
xmax_checksum = round(sum([float(ann['radio'][a]['xmax']) for a in ann['radio']]),3)
except TypeError:
xmax_checksum = -99
k = answer[xmax_checksum]['ind']
if ann['ir'] == 'No Sources':
ir_x[k].append(-99)
ir_y[k].append(-99)
else:
# Only takes the first IR source right now; NEEDS TO BE MODIFIED.
ir_x[k].append(float(ann['ir']['0']['x']))
ir_y[k].append(float(ann['ir']['0']['y']))
for k,v in answer.iteritems():
if v['ind'] == k:
answer[k]['ir_peak'] = (xpeak,ypeak)
return cons
def check_indices(index_names):
# See if additional indices have been created (improves run time drastically)
indices = classifications.index_information()
for index_name in index_names:
if not indices.has_key("{0}_idx".format(index_name)):
subindex = classifications.create_index([(index_name,ASCENDING)],name='{0}_idx'.format(index_name))
return None
def grab_image(subject,imgtype='standard'):
# Import a JPG from the RGZ subjects. Tries to find a local version before downloading over the web
url = subject['location'][imgtype]
filename = "{0}/rgz/{1}/{2}".format(data_path,imgtype,url.split('/')[-1])
if os.path.exists(filename):
with open(filename) as f:
im = Image.open(f)
im.load()
else:
im = Image.open(cStringIO.StringIO(urllib.urlopen(url).read()))
return im
def plot_consensus(consensus,figno=1,savefig=False):
# Plot a 4-panel image of IR, radio, KDE estimate, and consensus
zid = consensus['zid']
answer = consensus['answer']
sub = subjects.find_one({'zooniverse_id':zid})
survey = sub['metadata']['survey']
# Get contour data
contours = get_contours(sub,pathdict)
# Key bit that sets the difference between surveys.
# contours['width'] = img_params[survey]['FITS_WIDTH']
# contours['height'] = img_params[survey]['FITS_HEIGHT']
sf_x = img_params[survey]['IMG_WIDTH_NEW'] * 1./contours['width']
sf_y = img_params[survey]['IMG_HEIGHT_NEW'] * 1./contours['height']
verts_all = []
codes_all = []
components = contours['contours']
for comp in components:
# Order of bounding box components is (xmax,ymax,xmin,ymin)
comp_xmax,comp_ymax,comp_xmin,comp_ymin = comp[0]['bbox']
# Only plot radio components identified by the users as the consensus;
# check on the xmax value to make sure
for v in answer.itervalues():
if comp_xmax in v['xmax']:
for idx,level in enumerate(comp):
verts = [((p['x'])*sf_x,(p['y']-1)*sf_y) for p in level['arr']]
codes = np.ones(len(verts),int) * Path.LINETO
codes[0] = Path.MOVETO
verts_all.extend(verts)
codes_all.extend(codes)
try:
path = Path(verts_all, codes_all)
patch_black = patches.PathPatch(path, facecolor = 'none', edgecolor='black', lw=1)
except AssertionError:
print 'Users found no components for consensus match of {0}'.format(zid)
# Plot the infrared results
fig = plt.figure(figno,(15,4))
fig.clf()
ax3 = fig.add_subplot(143)
ax4 = fig.add_subplot(144)
colormaparr = [cm.hot_r,cm.Blues,cm.RdPu,cm.Greens,cm.PuBu,cm.YlGn,cm.Greys][::-1]
colorarr = ['r','b','m','g','c','y','k'][::-1]
# If, in the rare case, that the consensus has more unique sources than the number of colors:
if len(answer) > len(colorarr):
colorarr *= int(len(answer)/len(colorarr))+1
colormaparr *= int(len(answer)/len(colorarr))+1
if len(answer) > 0: # At least one galaxy was identified
for idx,ans in enumerate(answer.itervalues()):
if ans.has_key('peak_data'):
xmin = 1.
xmax = img_params[survey]['IMG_HEIGHT_NEW']
ymin = 1.
ymax = img_params[survey]['IMG_WIDTH_NEW']
# Plot the KDE map
colormap = colormaparr.pop()
ax3.imshow(np.rot90(ans['peak_data']['Z']), cmap=colormap,extent=[xmin, xmax, ymin, ymax])
# Plot individual sources
color = colorarr.pop()
x_plot,y_plot = ans['ir_x'],ans['ir_y']
ax3.scatter(x_plot, y_plot, c=color, marker='o', s=10, alpha=1./len(x_plot))
ax4.plot([ans['ir_peak'][0]],[ans['ir_peak'][1]],color=color,marker='*',markersize=12)
elif ans.has_key('ir'):
color = colorarr.pop()
x_plot,y_plot = ans['ir']
ax3.plot([x_plot],[y_plot],color=color,marker='o',markersize=2)
ax4.plot([x_plot],[y_plot],color=color,marker='*',markersize=12)
else:
ax4.text(img_params[survey]['IMG_WIDTH_NEW']+50,idx*25,'#{0:d} - no IR host'.format(idx),fontsize=11)
ax3.set_xlim([0, img_params[survey]['IMG_WIDTH_NEW']])
ax3.set_ylim([img_params[survey]['IMG_HEIGHT_NEW'], 0])
ax3.set_title(zid)
ax3.set_aspect('equal')
ax4.set_xlim([0, img_params[survey]['IMG_WIDTH_NEW']])
ax4.set_ylim([img_params[survey]['IMG_HEIGHT_NEW'], 0])
ax4.set_title('Consensus ({0:d}/{1:d} users)'.format(consensus['n_votes'],consensus['n_total']))
ax4.set_aspect('equal')
# Display IR and radio images
im_standard = grab_image(sub,imgtype='standard')
ax1 = fig.add_subplot(141)
ax1.imshow(im_standard,origin='upper')
ax1.set_title('WISE')
im_radio = grab_image(sub,imgtype='radio')
ax2 = fig.add_subplot(142)
ax2.imshow(im_radio,origin='upper')
ax2.set_title(sub['metadata']['source'])
ax2.get_yaxis().set_ticklabels([])
ax3.get_yaxis().set_ticklabels([])
# Plot contours identified as the consensus
if len(answer) > 0:
ax4.add_patch(patch_black)
ax4.yaxis.tick_right()
nticks = 5
ax1.get_xaxis().set_ticks(np.arange(nticks)*img_params[survey]['IMG_WIDTH_NEW'] * 1./nticks)
ax2.get_xaxis().set_ticks(np.arange(nticks)*img_params[survey]['IMG_WIDTH_NEW'] * 1./nticks)
ax3.get_xaxis().set_ticks(np.arange(nticks)*img_params[survey]['IMG_WIDTH_NEW'] * 1./nticks)
ax4.get_xaxis().set_ticks(np.arange(nticks+1)*img_params[survey]['IMG_WIDTH_NEW'] * 1./nticks)
plt.subplots_adjust(wspace=0.02)
# Save hard copy of the figure
if savefig == True:
fig.savefig('{0}/{1}/{2}.pdf'.format(plot_path,consensus['survey'],zid))
else:
plt.show()
# Close figure after it's done; otherwise mpl complains about having thousands of stuff open
plt.close()
return None
def classifiers_per_image(zid):
# Print list of the users who classified a particular subject
sid = subjects.find_one({'zooniverse_id':zid})['_id']
c_all = classifications.find({'subject_ids':sid,'user_name':{'$exists':True,'$nin':expert_names()}}).sort([("updated_at", -1)])
clist = list(c_all)
for c in clist:
try:
name = c['user_name']
except KeyError:
name = '<<< Anonymous >>>'
print '{0:25} {1}'.format(name,c['updated_at'])
return None
def rc(zid):
# Visually compare the expert and volunteer consensus for a subject
plt.ion()
classifiers_per_image(zid)
cons = checksum(zid,excluded=expert_names(),no_anonymous=True)
plot_consensus(cons,figno=1,savefig=False)
print '\nVolunteers: {0:d} sources'.format(len(cons['answer']))
cons_ex = checksum(zid,experts_only=True)
plot_consensus(cons_ex,figno=2,savefig=False)
print ' Experts: {0:d} sources'.format(len(cons_ex['answer']))
return None
def run_sample(survey,update=True,subset=None,do_plot=False,weights=0,scheme='scaling'):
# Run the consensus algorithm on the RGZ classifications
check_indices(('subject_ids','updated_at','zooniverse_id'))
filestem = "consensus_rgz_{0}".format(survey)
if subset is not None:
'''
Only run consensus for classifications of
expert100: the sample of 100 galaxies classified by science team
goldstandard: the gold standard sample of 20 galaxies classified by all users
This only applies to FIRST subjects; no (explicit) gold standard yet for ATLAS,
although there are the manual classifications in Norris et al. (2006).
'''
assert survey == 'first', \
"Subsets only exist for the FIRST data set, not {0}.".format(survey)
assert subset in ('expert100','goldstandard'), \
"Subset is {0}; must be either 'expert100' or 'goldstandard'".format(subset)
pathd = {'expert100':'expert/expert_all_zooniverse_ids.txt',
'goldstandard':'goldstandard/gs_zids.txt'}
with open('{0}/{1}'.format(rgz_path,pathd[subset]),'rb') as f:
zooniverse_ids = [line.rstrip() for line in f]
suffix = '_{0}'.format(subset)
else:
all_completed_zids = [cz['zooniverse_id'] for cz in subjects.find({'state':'complete','metadata.survey':survey})]
if update:
'''
Check to see which subjects have already been completed --
only run on subjects without an existing consensus.
'''
master_json = '{0}/json/{1}.json'.format(rgz_path,filestem)
with open(master_json,'r') as fm:
jmaster = json.load(fm)
already_finished_zids = []
for gal in jmaster:
already_finished_zids.append(gal['zid'])
zooniverse_ids = list(set(all_completed_zids) - set(already_finished_zids))
print "\n{0:d} RGZ subjects already in master catalog".format(len(already_finished_zids))
logging.info("\n{0:d} RGZ subjects already in master catalog".format(len(already_finished_zids)))
print "{0:d} RGZ subjects completed since last consensus catalog generation on {1}".format(len(zooniverse_ids),time.ctime(os.path.getmtime(master_json)))
logging.info("{0:d} RGZ subjects completed since last consensus catalog generation on {1}".format(len(zooniverse_ids), \
time.ctime(os.path.getmtime(master_json))))
else:
# Rerun consensus for every completed subject in RGZ.
zooniverse_ids = all_completed_zids
suffix = ''
# Remove the tutorial subject
tutorial_zid = "ARG0003r15"
try:
zooniverse_ids.remove(tutorial_zid)
except ValueError:
print '\nTutorial subject {0} not in list.'.format(tutorial_zid)
logging.info('\nTutorial subject {0} not in list.'.format(tutorial_zid))
print '\nLoaded data; running consensus algorithm on {0:d} completed RGZ subjects'.format(len(zooniverse_ids))
logging.info('\nLoaded data; running consensus algorithm on {0:d} completed RGZ subjects'.format(len(zooniverse_ids)))
# Empty files and objects for CSV, JSON output
json_output = []
# CSV header
if update:
fc = open('{0}/csv/{1}{2}.csv'.format(rgz_path,filestem,suffix),'a')
else:
fc = open('{0}/csv/{1}{2}.csv'.format(rgz_path,filestem,suffix),'w')
fc.write('zooniverse_id,{0}_id,n_votes,n_total,consensus_level,n_radio,label,bbox,ir_peak,ir_level,ir_flag,n_ir\n'.format(survey))
for idx,zid in enumerate(zooniverse_ids):
# Check progress to screen
if not idx % 100:
print idx, datetime.datetime.now().strftime('%H:%M:%S.%f')
cons = checksum(zid,include_peak_data=do_plot,weights=weights,scheme=scheme)
if do_plot:
plot_consensus(cons,savefig=True)
# Save results to files
if cons is not None:
cons['consensus_level'] = (cons['n_votes']/cons['n_total'])
# JSON
# Remove peak data from saved catalog; numpy arrays are not JSON serializable (may want to adjust later).
# http://stackoverflow.com/questions/3488934/simplejson-and-numpy-array/24375113#24375113
for ans in cons['answer']:
if cons['answer'][ans].has_key('peak_data'):
popvar = cons['answer'][ans].pop('peak_data',None)
json_output.append(cons)
# CSV
for ans in cons['answer'].itervalues():
try:
ir_peak = ans['ir_peak']
except KeyError:
ir_peak = ans['ir'] if ans.has_key('ir') else (-99,-99)
try:
fc.write('{0},{1},{2:4d},{3:4d},{4:.3f},{5:2d},{6},"{7}","{8}",{9:.3f}\n'.format( \
cons['zid'],cons['source'],cons['n_votes'],cons['n_total'],cons['consensus_level'], \
len(ans['xmax']),alphabet(ans['ind']),bbox_unravel(ans['bbox']),ir_peak,ans['ir_level'],ans['ir_flag'],ans['n_ir']))
except KeyError:
print zid
print cons
logging.warning((zid, cons))
# Mongo collection
for ans in cons['answer'].itervalues():
try:
ir_peak = ans['ir_peak']
except KeyError:
ir_peak = ans['ir'] if ans.has_key('ir') else (-99,-99)
try:
new_con = {'zooniverse_id':cons['zid'], '{0}_id'.format(survey):cons['source'], 'n_votes':cons['n_votes'], \
'n_total':cons['n_total'], 'consensus_level':cons['consensus_level'], 'n_radio':len(ans['xmax']), \
'label':alphabet(ans['ind']), 'bbox':bbox_unravel(ans['bbox']), 'ir_peak':ir_peak, 'ir_level':ans['ir_level'], \
'ir_flag':ans['ir_flag'], 'n_ir':ans['n_ir']}
consensus.insert(new_con)
except KeyError:
print zid
print cons
logging.warning((zid, cons))
# Close the new CSV file
fc.close()
# Write and close the new JSON file
if update:
jmaster.extend(json_output)
jfinal = jmaster
else:
jfinal = json_output
with open('{0}/json/{1}{2}.json'.format(rgz_path,filestem,suffix),'w') as fj:
json.dump(jfinal,fj)
# Make 75% version for full catalog
if subset is None:
# JSON
json75 = filter(lambda a: (a['n_votes']/a['n_total']) >= 0.75, jfinal)
with open('{0}/json/{1}_75.json'.format(rgz_path,filestem),'w') as fj:
json.dump(json75,fj)
# CSV
import pandas as pd
cmaster = pd.read_csv('{0}/csv/{1}.csv'.format(rgz_path,filestem))
cmaster75 = cmaster[cmaster['consensus_level'] >= 0.75]
cmaster75.to_csv('{0}/csv/{1}_75.csv'.format(rgz_path,filestem),index=False)
print '\nCompleted consensus for {0}.'.format(survey)
logging.info('\nCompleted consensus for {0}.'.format(survey))
return None
def force_csv_update(survey='first',suffix=''):
# Force an update of the CSV file from the JSON, in case of errors.
filestem = 'consensus_rgz_{0}'.format(survey)
master_json = '{0}/json/{1}.json'.format(rgz_path,filestem)
with open(master_json,'r') as fm:
jmaster = json.load(fm)
fc = open('{0}/csv/{1}{2}.csv'.format(rgz_path,filestem,suffix),'w')
fc.write('zooniverse_id,{0}_id,n_votes,n_total,consensus_level,n_radio,label,bbox,ir_peak,ir_level,ir_flag,n_ir\n'.format(survey))
for gal in jmaster:
for ans in gal['answer'].itervalues():
try:
ir_peak = ans['ir_peak']
except KeyError:
ir_peak = ans['ir'] if ans.has_key('ir') else (-99,-99)
fc.write('{0},{1},{2:4d},{3:4d},{4:.3f},{5:2d},{6},"{7}","{8}"\n'.format(
gal['zid'],
gal['source'],
gal['n_votes'],
gal['n_total'],
gal['n_votes'] * 1./gal['n_total'],
len(ans['xmax']),
alphabet(ans['ind']),
bbox_unravel(ans['bbox']),
ir_peak,
ans['ir_level'],
ans['ir_flag'],
ans['n_ir']
)
)
fc.close()
return None
def bbox_unravel(bbox):
# Turn an array of tuple strings into floats
bboxes = []
for lobe in bbox:
t = [float(x) for x in lobe]
t = tuple(t)
bboxes.append(t)
return bboxes
def alphabet(i):
# Return a letter (or set of duplicated letters) of the alphabet for a given integer
from string import letters
# For i > 25, letters begin multiplying: alphabet(0) = 'a'
# alphabet(1) = 'b'
# ...
# alphabet(25) = 'z'
# alphabet(26) = 'aa'
# alphabet(27) = 'bb'
# ...
#
lowercase = letters[26:]
try:
letter = lowercase[i % 26]*int(i/26 + 1)
return letter
except TypeError:
raise AssertionError("Index must be an integer")
def update_experts():
# Add field to classifications made by members of the expert science team. Takes ~1 minute to run.
import dateutil.parser
# Load saved data from the test runs
json_data = open('{0}/expert/expert_params.json'.format(rgz_path)).read()
experts = json.loads(json_data)
for ex in experts:
expert_dates = (dateutil.parser.parse(ex['started_at']),dateutil.parser.parse(ex['ended_at']))
classifications.update({"updated_at": {"$gt": expert_dates[0],"$lt":expert_dates[1]},"user_name":ex['expert_user']},{'$set':{'expert':True}},multi=True)
return None
def expert_names():
# Return list of Zooniverse user names for the science team
ex = [u'42jkb', u'ivywong', u'stasmanian', u'klmasters', u'Kevin', u'akapinska', u'enno.middelberg', u'xDocR', u'vrooje', u'KWillett', u'DocR']
return ex
def update_gs_subjects():
# Add field to the Mongo database designating the gold standard subjects.
with open('{0}/goldstandard/gs_zids.txt'.format(rgz_path),'r') as f:
for gal in f:
subjects.update({'zooniverse_id':gal.strip()},{'$set':{'goldstandard':True}})
return None
def get_unique_users():
# Find the usernames for all logged-in classifiers with at least one classification
check_indices(('user_name',))
print "Finding non-anonymous classifications"
logging.info("Finding non-anonymous classifications")
non_anonymous = classifications.find({"user_name":{"$exists":True}})
print "Finding user list"
logging.info("Finding user list")
users = [n['user_name'] for n in non_anonymous]
unique_users = set(users)
return unique_users
def weight_users(unique_users, scheme, min_gs=5, min_agree=0.5, scaling=5):
# min_gs is the minimum number of gold standard subjects user must have seen to determine agreement.
# Set to prevent upweighting on low information (eg, agreeing with the science team if the user has
# only seen 1 gold standard object doesn't tell us as much than if they agreed 19/20 times).
# min_agree is the minimum level of agreement with the science team (N_agree / N_seen).
# scaling is the multiplicative factor for a sliding scale weighting scheme.
print 'Calculating weights for {} users using {} method, using parameters:'.format(len(unique_users), scheme)
logging.info('Calculating weights for {} users using {} method, using parameters:'.format(len(unique_users), scheme))
if scheme == 'threshold':
output = '\tminimum gold standard classified = {}\n\tminimum agreement level = {}'.format(min_gs, min_agree)
else:
output = '\tminimum gold standard classified = {}\n\tscaling factor = {}'.format(min_gs, scaling)
print output
logging.info(output)
# Assigns a weight to users based on their agreement with the gold standard sample as classified by RGZ science team
gs_count = subjects.find({'goldstandard':True}).count()
if gs_count < 1:
update_gs_subjects()
ex_count = classifications.find({'expert':True}).count()
if ex_count < 1:
update_experts()
# Find the science team answers:
gs_zids = [s['zooniverse_id'] for s in subjects.find({"goldstandard":True})]
science_answers = {}
for zid in gs_zids:
s = checksum(zid,experts_only=True)
science_answers[zid] = s['answer'].keys()
gs_ids = [s['_id'] for s in subjects.find({"goldstandard":True})]
count = 0
# For each user, find the gold standard subjects they saw and whether it agreed with the experts
for u in list(unique_users):
count += 1
print count, u
agreed = 0
u_str = u.encode('utf8')
zid_seen = set()
# For each match, see if they agreed with the science team. If this happened more than once, only keep first classification.
for g in classifications.find({'user_name':u, 'subject_ids':{'$in':gs_ids}}):
zid = g['subjects'][0]['zooniverse_id']
if zid not in zid_seen:
zid_seen = zid_seen.union([zid])
their_answer = one_answer(zid,u)
their_checksums = their_answer['answer'].keys()
science_checksums = science_answers[zid]
if set(their_checksums) == set(science_checksums):
agreed += 1
gs_count = len(zid_seen)
# Save output Mongo
if scheme == 'threshold' and gs_count > min_gs and (1.*agreed/gs_count) > min_agree:
weight = 1
elif scheme == 'scaling' and gs_count > min_gs:
weight = int(round(1.*scaling*agreed/gs_count))
else:
weight = 0
user_weights.update({'user_name':u_str}, {'$set':{'agreed':agreed, 'gs_seen':gs_count, 'weight':weight}}, upsert=True)
return None
def print_user_weights():
# Prints the user weights to a CSV
# Note that user names can include commas
with open('{0}/csv/user_weights{1}.csv'.format(rgz_path, version), 'w') as f:
print >> f, 'user_name,gs_seen,agreed,weight'
for user in user_weights.find():
print >> f, '"{0}",{1},{2},{3}'.format(user['user_name'].encode('utf8'), user['gs_seen'], user['agreed'], user['weight'])
if __name__ == "__main__":
# Run the consensus pipeline from the command line
logging.basicConfig(filename='{}/{}'.format(rgz_path,logfile), level=logging.DEBUG, format='%(asctime)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.captureWarnings(True)
logging.info('Consensus run from command line')
try:
if pathdict != None:
output = 'Starting at',datetime.datetime.now().strftime('%H:%M:%S.%f')
logging.info(output)
print output
# update: default = True
#
# Set as True if you want to run the consensus only on the subjects completed
# since the last time the pipeline was run. If False, it will run it on the
# entire set of completed subjects (which takes about 6 hours for 10k images).
update = False
# subset: default = None
#
# Run the sample only on some specific subjects. Pre-defined subsets include:
# 'expert100': a sample of 100 galaxies classified by science team
# 'goldstandard': the gold standard sample of 20 galaxies classified by all users
# and the science team. All galaxies in 'goldstandard' are also in
# 'expert100'.
subset = None
# do_plot: default = False
#
# Set as True if you want to make the four-panel plots of the consensus for each subject.
# Useful, but adds to the total runtime.
do_plot = False
# weights: default = 0
#
# Execute weighting of the users based on their agreement with the science team
# on the gold standard subjects. If weights = 0 or weights = 1, each user's vote
# is counted equally in the consensus. If weights > 1, then their impact is
# increased by replicating the classifications. Must be a nonnegative integer.
weights = 5
assert (type(weights) == int) and weights >= 0, 'Weight must be a nonnegative integer'
scheme = 'scaling'
assert scheme in ['threshold', 'scaling'], 'Weighting scheme must be threshold or sliding, not {}'.format(scheme)
# If you're using weights, make sure they're up to date
if weights > 1:
unique_users = get_unique_users()
weight_users(unique_users, scheme, min_gs=5, min_agree=0.5, scaling=weights)
# Run the consensus separately for different surveys, since the image parameters are different
for survey in ('atlas','first'):
run_sample(survey,update,subset,do_plot,weights,scheme)
output = 'Finished at',datetime.datetime.now().strftime('%H:%M:%S.%f')
logging.info(output)
print output
else:
# Needs to be able to find the raw image data to run the pipeline
print "\nAborting consensus.py - could not locate raw RGZ image data.\n"
logging.info("\nAborting consensus.py - could not locate raw RGZ image data.\n")
except BaseException as e:
logging.exception(e)
raise
| mit |
amolkahat/pandas | pandas/tests/io/generate_legacy_storage_files.py | 4 | 13646 | #!/usr/bin/env python
"""
self-contained to write legacy storage (pickle/msgpack) files
To use this script. Create an environment where you want
generate pickles, say its for 0.18.1, with your pandas clone
in ~/pandas
. activate pandas_0.18.1
cd ~/
$ python pandas/pandas/tests/io/generate_legacy_storage_files.py \
pandas/pandas/tests/io/data/legacy_pickle/0.18.1/ pickle
This script generates a storage file for the current arch, system,
and python version
pandas version: 0.18.1
output dir : pandas/pandas/tests/io/data/legacy_pickle/0.18.1/
storage format: pickle
created pickle file: 0.18.1_x86_64_darwin_3.5.2.pickle
The idea here is you are using the *current* version of the
generate_legacy_storage_files with an *older* version of pandas to
generate a pickle file. We will then check this file into a current
branch, and test using test_pickle.py. This will load the *older*
pickles and test versus the current data that is generated
(with master). These are then compared.
If we have cases where we changed the signature (e.g. we renamed
offset -> freq in Timestamp). Then we have to conditionally execute
in the generate_legacy_storage_files.py to make it
run under the older AND the newer version.
"""
from __future__ import print_function
from warnings import catch_warnings, filterwarnings
from distutils.version import LooseVersion
from pandas import (Series, DataFrame, Panel,
SparseSeries, SparseDataFrame,
Index, MultiIndex, bdate_range, to_msgpack,
date_range, period_range, timedelta_range,
Timestamp, NaT, Categorical, Period)
from pandas.tseries.offsets import (
DateOffset, Hour, Minute, Day,
MonthBegin, MonthEnd, YearBegin,
YearEnd, Week, WeekOfMonth, LastWeekOfMonth,
BusinessDay, BusinessHour, CustomBusinessDay, FY5253,
Easter,
SemiMonthEnd, SemiMonthBegin,
QuarterBegin, QuarterEnd)
from pandas.compat import u
import os
import sys
import numpy as np
import pandas
import platform as pl
from datetime import timedelta
_loose_version = LooseVersion(pandas.__version__)
def _create_sp_series():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
bseries = SparseSeries(arr, kind='block')
bseries.name = u'bseries'
return bseries
def _create_sp_tsseries():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
date_index = bdate_range('1/1/2011', periods=len(arr))
bseries = SparseSeries(arr, index=date_index, kind='block')
bseries.name = u'btsseries'
return bseries
def _create_sp_frame():
nan = np.nan
data = {u'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
u'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
u'C': np.arange(10).astype(np.int64),
u'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
dates = bdate_range('1/1/2011', periods=10)
return SparseDataFrame(data, index=dates)
def create_data():
""" create the pickle/msgpack data """
data = {
u'A': [0., 1., 2., 3., np.nan],
u'B': [0, 1, 0, 1, 0],
u'C': [u'foo1', u'foo2', u'foo3', u'foo4', u'foo5'],
u'D': date_range('1/1/2009', periods=5),
u'E': [0., 1, Timestamp('20100101'), u'foo', 2.]
}
scalars = dict(timestamp=Timestamp('20130101'),
period=Period('2012', 'M'))
index = dict(int=Index(np.arange(10)),
date=date_range('20130101', periods=10),
period=period_range('2013-01-01', freq='M', periods=10),
float=Index(np.arange(10, dtype=np.float64)),
uint=Index(np.arange(10, dtype=np.uint64)),
timedelta=timedelta_range('00:00:00', freq='30T', periods=10))
if _loose_version >= LooseVersion('0.18'):
from pandas import RangeIndex
index['range'] = RangeIndex(10)
if _loose_version >= LooseVersion('0.21'):
from pandas import interval_range
index['interval'] = interval_range(0, periods=10)
mi = dict(reg2=MultiIndex.from_tuples(
tuple(zip(*[[u'bar', u'bar', u'baz', u'baz', u'foo',
u'foo', u'qux', u'qux'],
[u'one', u'two', u'one', u'two', u'one',
u'two', u'one', u'two']])),
names=[u'first', u'second']))
series = dict(float=Series(data[u'A']),
int=Series(data[u'B']),
mixed=Series(data[u'E']),
ts=Series(np.arange(10).astype(np.int64),
index=date_range('20130101', periods=10)),
mi=Series(np.arange(5).astype(np.float64),
index=MultiIndex.from_tuples(
tuple(zip(*[[1, 1, 2, 2, 2],
[3, 4, 3, 4, 5]])),
names=[u'one', u'two'])),
dup=Series(np.arange(5).astype(np.float64),
index=[u'A', u'B', u'C', u'D', u'A']),
cat=Series(Categorical([u'foo', u'bar', u'baz'])),
dt=Series(date_range('20130101', periods=5)),
dt_tz=Series(date_range('20130101', periods=5,
tz='US/Eastern')),
period=Series([Period('2000Q1')] * 5))
mixed_dup_df = DataFrame(data)
mixed_dup_df.columns = list(u"ABCDA")
frame = dict(float=DataFrame({u'A': series[u'float'],
u'B': series[u'float'] + 1}),
int=DataFrame({u'A': series[u'int'],
u'B': series[u'int'] + 1}),
mixed=DataFrame({k: data[k]
for k in [u'A', u'B', u'C', u'D']}),
mi=DataFrame({u'A': np.arange(5).astype(np.float64),
u'B': np.arange(5).astype(np.int64)},
index=MultiIndex.from_tuples(
tuple(zip(*[[u'bar', u'bar', u'baz',
u'baz', u'baz'],
[u'one', u'two', u'one',
u'two', u'three']])),
names=[u'first', u'second'])),
dup=DataFrame(np.arange(15).reshape(5, 3).astype(np.float64),
columns=[u'A', u'B', u'A']),
cat_onecol=DataFrame({u'A': Categorical([u'foo', u'bar'])}),
cat_and_float=DataFrame({
u'A': Categorical([u'foo', u'bar', u'baz']),
u'B': np.arange(3).astype(np.int64)}),
mixed_dup=mixed_dup_df,
dt_mixed_tzs=DataFrame({
u'A': Timestamp('20130102', tz='US/Eastern'),
u'B': Timestamp('20130603', tz='CET')}, index=range(5)),
dt_mixed2_tzs=DataFrame({
u'A': Timestamp('20130102', tz='US/Eastern'),
u'B': Timestamp('20130603', tz='CET'),
u'C': Timestamp('20130603', tz='UTC')}, index=range(5))
)
with catch_warnings(record=True):
filterwarnings("ignore", "\\nPanel", FutureWarning)
mixed_dup_panel = Panel({u'ItemA': frame[u'float'],
u'ItemB': frame[u'int']})
mixed_dup_panel.items = [u'ItemA', u'ItemA']
panel = dict(float=Panel({u'ItemA': frame[u'float'],
u'ItemB': frame[u'float'] + 1}),
dup=Panel(
np.arange(30).reshape(3, 5, 2).astype(np.float64),
items=[u'A', u'B', u'A']),
mixed_dup=mixed_dup_panel)
cat = dict(int8=Categorical(list('abcdefg')),
int16=Categorical(np.arange(1000)),
int32=Categorical(np.arange(10000)))
timestamp = dict(normal=Timestamp('2011-01-01'),
nat=NaT,
tz=Timestamp('2011-01-01', tz='US/Eastern'))
if _loose_version < LooseVersion('0.19.2'):
timestamp['freq'] = Timestamp('2011-01-01', offset='D')
timestamp['both'] = Timestamp('2011-01-01', tz='Asia/Tokyo',
offset='M')
else:
timestamp['freq'] = Timestamp('2011-01-01', freq='D')
timestamp['both'] = Timestamp('2011-01-01', tz='Asia/Tokyo',
freq='M')
off = {'DateOffset': DateOffset(years=1),
'DateOffset_h_ns': DateOffset(hour=6, nanoseconds=5824),
'BusinessDay': BusinessDay(offset=timedelta(seconds=9)),
'BusinessHour': BusinessHour(normalize=True, n=6, end='15:14'),
'CustomBusinessDay': CustomBusinessDay(weekmask='Mon Fri'),
'SemiMonthBegin': SemiMonthBegin(day_of_month=9),
'SemiMonthEnd': SemiMonthEnd(day_of_month=24),
'MonthBegin': MonthBegin(1),
'MonthEnd': MonthEnd(1),
'QuarterBegin': QuarterBegin(1),
'QuarterEnd': QuarterEnd(1),
'Day': Day(1),
'YearBegin': YearBegin(1),
'YearEnd': YearEnd(1),
'Week': Week(1),
'Week_Tues': Week(2, normalize=False, weekday=1),
'WeekOfMonth': WeekOfMonth(week=3, weekday=4),
'LastWeekOfMonth': LastWeekOfMonth(n=1, weekday=3),
'FY5253': FY5253(n=2, weekday=6, startingMonth=7, variation="last"),
'Easter': Easter(),
'Hour': Hour(1),
'Minute': Minute(1)}
return dict(series=series,
frame=frame,
panel=panel,
index=index,
scalars=scalars,
mi=mi,
sp_series=dict(float=_create_sp_series(),
ts=_create_sp_tsseries()),
sp_frame=dict(float=_create_sp_frame()),
cat=cat,
timestamp=timestamp,
offsets=off)
def create_pickle_data():
data = create_data()
# Pre-0.14.1 versions generated non-unpicklable mixed-type frames and
# panels if their columns/items were non-unique.
if _loose_version < LooseVersion('0.14.1'):
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
if _loose_version < LooseVersion('0.17.0'):
del data['series']['period']
del data['scalars']['period']
return data
def _u(x):
return {u(k): _u(x[k]) for k in x} if isinstance(x, dict) else x
def create_msgpack_data():
data = create_data()
if _loose_version < LooseVersion('0.17.0'):
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
del data['frame']['dup']
del data['panel']['dup']
if _loose_version < LooseVersion('0.18.0'):
del data['series']['dt_tz']
del data['frame']['dt_mixed_tzs']
# Not supported
del data['sp_series']
del data['sp_frame']
del data['series']['cat']
del data['series']['period']
del data['frame']['cat_onecol']
del data['frame']['cat_and_float']
del data['scalars']['period']
if _loose_version < LooseVersion('0.23.0'):
del data['index']['interval']
del data['offsets']
return _u(data)
def platform_name():
return '_'.join([str(pandas.__version__), str(pl.machine()),
str(pl.system().lower()), str(pl.python_version())])
def write_legacy_pickles(output_dir):
# make sure we are < 0.13 compat (in py3)
try:
from pandas.compat import zip, cPickle as pickle # noqa
except ImportError:
import pickle
version = pandas.__version__
print("This script generates a storage file for the current arch, system, "
"and python version")
print(" pandas version: {0}".format(version))
print(" output dir : {0}".format(output_dir))
print(" storage format: pickle")
pth = '{0}.pickle'.format(platform_name())
fh = open(os.path.join(output_dir, pth), 'wb')
pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL)
fh.close()
print("created pickle file: %s" % pth)
def write_legacy_msgpack(output_dir, compress):
version = pandas.__version__
print("This script generates a storage file for the current arch, "
"system, and python version")
print(" pandas version: {0}".format(version))
print(" output dir : {0}".format(output_dir))
print(" storage format: msgpack")
pth = '{0}.msgpack'.format(platform_name())
to_msgpack(os.path.join(output_dir, pth), create_msgpack_data(),
compress=compress)
print("created msgpack file: %s" % pth)
def write_legacy_file():
# force our cwd to be the first searched
sys.path.insert(0, '.')
if not (3 <= len(sys.argv) <= 4):
exit("Specify output directory and storage type: generate_legacy_"
"storage_files.py <output_dir> <storage_type> "
"<msgpack_compress_type>")
output_dir = str(sys.argv[1])
storage_type = str(sys.argv[2])
try:
compress_type = str(sys.argv[3])
except IndexError:
compress_type = None
if storage_type == 'pickle':
write_legacy_pickles(output_dir=output_dir)
elif storage_type == 'msgpack':
write_legacy_msgpack(output_dir=output_dir, compress=compress_type)
else:
exit("storage_type must be one of {'pickle', 'msgpack'}")
if __name__ == '__main__':
write_legacy_file()
| bsd-3-clause |
glennq/scikit-learn | sklearn/ensemble/gradient_boosting.py | 6 | 74941 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta
from abc import abstractmethod
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
import numbers
import numpy as np
from scipy import stats
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from time import time
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE
from ..tree._tree import TREE_LEAF
from ..utils import check_random_state
from ..utils import check_array
from ..utils import check_X_y
from ..utils import column_or_1d
from ..utils import check_consistent_length
from ..utils import deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit
from ..utils.fixes import bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight,
self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, criterion,
min_samples_split, min_samples_leaf, min_weight_fraction_leaf,
max_depth, min_impurity_split, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.criterion = criterion
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.min_impurity_split = min_impurity_split
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.presort = presort
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, X_idx_sorted, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
if X_csc is not None:
tree.fit(X_csc, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
else:
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
# update tree leaves
if X_csr is not None:
loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
else:
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
X_idx_sorted = None
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if presort == 'auto' and issparse(X):
presort = False
elif presort == 'auto':
presort = True
if presort == True:
if issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
else:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor, X_idx_sorted)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None, X_idx_sorted=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, random_state, X_idx_sorted,
X_csc, X_csr)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
criterion : string, optional (default="friedman_mse")
The function to measure the quality of a split. Supported criteria
are "friedman_mse" for the mean squared error with improvement
score by Friedman, "mse" for mean squared error, and "mae" for
the mean absolute error. The default value of "friedman_mse" is
generally the best as it can provide a better approximation in
some cases.
.. versionadded:: 0.18
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
*presort* parameter.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, criterion='friedman_mse', min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, min_impurity_split=1e-7, init=None,
random_state=None, max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False,
presort='auto'):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
criterion=criterion, min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
warm_start=warm_start,
presort=presort)
def _validate_y(self, y):
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
criterion : string, optional (default="friedman_mse")
The function to measure the quality of a split. Supported criteria
are "friedman_mse" for the mean squared error with improvement
score by Friedman, "mse" for mean squared error, and "mae" for
the mean absolute error. The default value of "friedman_mse" is
generally the best as it can provide a better approximation in
some cases.
.. versionadded:: 0.18
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
optional parameter *presort*.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, criterion='friedman_mse', min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, min_impurity_split=1e-7, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
criterion=criterion, min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features, min_impurity_split=min_impurity_split,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
nelson-liu/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
wazeerzulfikar/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 20 | 26139 | import warnings
import numpy as np
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
rng = np.random.RandomState(0)
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = rng.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_lars_precompute():
# Check for different values of precompute
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
for classifier in [linear_model.Lars, linear_model.LarsCV,
linear_model.LassoLarsIC]:
clf = classifier(precompute=G)
output_1 = ignore_warnings(clf.fit)(X, y).coef_
for precompute in [True, False, 'auto', None]:
clf = classifier(precompute=precompute)
output_2 = clf.fit(X, y).coef_
assert_array_almost_equal(output_1, output_2, decimal=8)
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
# same test, with normalization
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
estimators = [
linear_model.LassoLars(),
linear_model.Lars(),
# regression test for gh-1615
linear_model.LassoLars(fit_intercept=False),
linear_model.Lars(fit_intercept=False),
]
for estimator in estimators:
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
assert_false(hasattr(lars_cv, 'n_nonzero_coefs'))
def test_lars_cv_max_iter():
with warnings.catch_warnings(record=True) as w:
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
x = rng.randn(len(y))
X = np.c_[X, x, x] # add correlated features
lars_cv = linear_model.LassoLarsCV(max_iter=5)
lars_cv.fit(X, y)
assert_true(len(w) == 0)
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_R_implementation():
# Test that sklearn LassoLars implementation agrees with the LassoLars
# implementation available in R (lars library) under the following
# scenarios:
# 1) fit_intercept=False and normalize=False
# 2) fit_intercept=True and normalize=True
# Let's generate the data used in the bug report 7778
y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822,
-19.42109366])
x = np.array([[0.47299829, 0, 0, 0, 0],
[0.08239882, 0.85784863, 0, 0, 0],
[0.30114139, -0.07501577, 0.80895216, 0, 0],
[-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0],
[-0.69363927, 0.06754067, 0.18064514, -0.0803561,
0.40427291]])
X = x.T
###########################################################################
# Scenario 1: Let's compare R vs sklearn when fit_intercept=False and
# normalize=False
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE,
# trace=TRUE, normalize=FALSE)
# r = t(model_lasso_lars$beta)
#
r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829,
-83.777653739190711, -83.784156932888934,
-84.033390591756657],
[0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0,
0.025219751009936],
[0, -3.577397088285891, -4.702795355871871,
-7.016748621359461, -7.614898471899412, -0.336938391359179,
0, 0, 0.001213370600853, 0.048162321585148],
[0, 0, 0, 2.231558436628169, 2.723267514525966,
2.811549786389614, 2.813766976061531, 2.817462468949557,
2.817368178703816, 2.816221090636795],
[0, 0, -1.218422599914637, -3.457726183014808,
-4.021304522060710, -45.827461592423745,
-47.776608869312305,
-47.911561610746404, -47.914845922736234,
-48.039562334265717]])
model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False,
normalize=False)
model_lasso_lars.fit(X, y)
skl_betas = model_lasso_lars.coef_path_
assert_array_almost_equal(r, skl_betas, decimal=12)
###########################################################################
###########################################################################
# Scenario 2: Let's compare R vs sklearn when fit_intercept=True and
# normalize=True
#
# Note: When normalize is equal to True, R returns the coefficients in
# their original units, that is, they are rescaled back, whereas sklearn
# does not do that, therefore, we need to do this step before comparing
# their results.
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars2 = lars(X, t(y), type="lasso", intercept=TRUE,
# trace=TRUE, normalize=TRUE)
# r2 = t(model_lasso_lars2$beta)
r2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 8.371887668009453, 19.463768371044026],
[0, 0, 0, 0, 9.901611055290553],
[0, 7.495923132833733, 9.245133544334507,
17.389369207545062, 26.971656815643499],
[0, 0, -1.569380717440311, -5.924804108067312,
-7.996385265061972]])
model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True,
normalize=True)
model_lasso_lars2.fit(X, y)
skl_betas2 = model_lasso_lars2.coef_path_
# Let's rescale back the coefficients returned by sklearn before comparing
# against the R result (read the note above)
temp = X - np.mean(X, axis=0)
normx = np.sqrt(np.sum(temp ** 2, axis=0))
skl_betas2 /= normx[:, np.newaxis]
assert_array_almost_equal(r2, skl_betas2, decimal=12)
###########################################################################
| bsd-3-clause |
nkhuyu/blaze | blaze/expr/arithmetic.py | 5 | 8762 | from __future__ import absolute_import, division, print_function
import operator
from toolz import first
import numpy as np
import pandas as pd
from datashape import dshape, var, DataShape
from dateutil.parser import parse as dt_parse
from datashape.predicates import isscalar, isboolean, isnumeric, isdatelike
from datashape import coretypes as ct, discover, unsigned, promote, optionify
from .core import parenthesize, eval_str
from .expressions import Expr, shape, ElemWise
from ..dispatch import dispatch
from ..compatibility import _strtypes
__all__ = '''
BinOp
UnaryOp
Arithmetic
Add
Mult
Repeat
Sub
Div
FloorDiv
Pow
Mod
Interp
USub
Relational
Eq
Ne
Ge
Lt
Le
Gt
Gt
And
Or
Not
'''.split()
def name(o):
if hasattr(o, '_name'):
return o._name
else:
return None
class BinOp(ElemWise):
__slots__ = '_hash', 'lhs', 'rhs'
__inputs__ = 'lhs', 'rhs'
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def __str__(self):
lhs = parenthesize(eval_str(self.lhs))
rhs = parenthesize(eval_str(self.rhs))
return '%s %s %s' % (lhs, self.symbol, rhs)
@property
def _name(self):
if not isscalar(self.dshape.measure):
return None
l, r = name(self.lhs), name(self.rhs)
if l and not r:
return l
if r and not l:
return r
if l == r:
return l
@property
def _inputs(self):
result = []
if isinstance(self.lhs, Expr):
result.append(self.lhs)
if isinstance(self.rhs, Expr):
result.append(self.rhs)
return tuple(result)
def maxvar(L):
"""
>>> maxvar([1, 2, var])
Var()
>>> maxvar([1, 2, 3])
3
"""
if var in L:
return var
else:
return max(L)
def maxshape(shapes):
"""
>>> maxshape([(10, 1), (1, 10), ()])
(10, 10)
>>> maxshape([(4, 5), (5,)])
(4, 5)
"""
shapes = [shape for shape in shapes if shape]
if not shapes:
return ()
ndim = max(map(len, shapes))
shapes = [(1,) * (ndim - len(shape)) + shape for shape in shapes]
for dims in zip(*shapes):
if len(set(dims) - set([1])) >= 2:
raise ValueError("Shapes don't align, %s" % str(dims))
return tuple(map(maxvar, zip(*shapes)))
class UnaryOp(ElemWise):
__slots__ = '_hash', '_child',
def __init__(self, child):
self._child = child
def __str__(self):
return '%s(%s)' % (self.symbol, eval_str(self._child))
@property
def symbol(self):
return type(self).__name__
@property
def dshape(self):
return DataShape(*(shape(self._child) + (self._dtype,)))
@property
def _name(self):
return self._child._name
class Arithmetic(BinOp):
""" Super class for arithmetic operators like add or mul """
@property
def _dtype(self):
# we can't simply use .schema or .datashape because we may have a bare
# integer, for example
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
return promote(lhs, rhs)
@property
def dshape(self):
# TODO: better inference. e.g. int + int -> int
return DataShape(*(maxshape([shape(self.lhs), shape(self.rhs)]) +
(self._dtype,)))
class Add(Arithmetic):
symbol = '+'
op = operator.add
class Mult(Arithmetic):
symbol = '*'
op = operator.mul
class Repeat(Arithmetic):
# Sequence repeat
symbol = '*'
op = operator.mul
class Sub(Arithmetic):
symbol = '-'
op = operator.sub
class Div(Arithmetic):
symbol = '/'
op = operator.truediv
@property
def _dtype(self):
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
return optionify(lhs, rhs, ct.float64)
class FloorDiv(Arithmetic):
symbol = '//'
op = operator.floordiv
@property
def _dtype(self):
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
is_unsigned = lhs in unsigned and rhs in unsigned
max_width = max(lhs.itemsize, rhs.itemsize)
prefix = 'u' if is_unsigned else ''
measure = getattr(ct, '%sint%d' % (prefix, max_width * 8))
return optionify(lhs, rhs, measure)
class Pow(Arithmetic):
symbol = '**'
op = operator.pow
class Mod(Arithmetic):
symbol = '%'
op = operator.mod
class Interp(Arithmetic):
# String interpolation
symbol = '%'
op = operator.mod
class USub(UnaryOp):
op = operator.neg
symbol = '-'
def __str__(self):
return '-%s' % parenthesize(eval_str(self._child))
@property
def _dtype(self):
# TODO: better inference. -uint -> int
return self._child.schema
@dispatch(ct.Option, object)
def scalar_coerce(ds, val):
if val or val == 0:
return scalar_coerce(ds.ty, val)
else:
return None
@dispatch((ct.Record, ct.Mono, ct.Option, DataShape), Expr)
def scalar_coerce(ds, val):
return val
@dispatch(ct.Date, _strtypes)
def scalar_coerce(_, val):
dt = dt_parse(val)
if dt.time():
raise ValueError("Can not coerce %s to type Date, "
"contains time information")
return dt.date()
@dispatch(ct.DateTime, _strtypes)
def scalar_coerce(_, val):
return pd.Timestamp(val)
@dispatch(ct.CType, _strtypes)
def scalar_coerce(dt, val):
return np.asscalar(np.asarray(val, dtype=dt.to_numpy_dtype()))
@dispatch(ct.Record, object)
def scalar_coerce(rec, val):
if len(rec.fields) == 1:
return scalar_coerce(first(rec.types), val)
else:
raise TypeError("Trying to coerce complex datashape\n"
"got dshape: %s\n"
"scalar_coerce only intended for scalar values" % rec)
@dispatch(ct.DataShape, object)
def scalar_coerce(ds, val):
return scalar_coerce(ds.measure, val)
@dispatch(object, object)
def scalar_coerce(dtype, val):
return val
@dispatch(_strtypes, object)
def scalar_coerce(ds, val):
return scalar_coerce(dshape(ds), val)
def _neg(self):
return USub(self)
def _mkbin(name, cons, private=True, reflected=True):
prefix = '_' if private else ''
def _bin(self, other):
result = cons(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
_bin.__name__ = prefix + name
if reflected:
def _rbin(self, other):
result = cons(scalar_coerce(self.dshape, other), self)
result.dshape # Check that shapes and dtypes match up
return result
_rbin.__name__ = prefix + 'r' + name
return _bin, _rbin
return _bin
_add, _radd = _mkbin('add', Add)
_div, _rdiv = _mkbin('div', Div)
_floordiv, _rfloordiv = _mkbin('floordiv', FloorDiv)
_mod, _rmod = _mkbin('mod', Mod)
_mul, _rmul = _mkbin('mul', Mult)
_pow, _rpow = _mkbin('pow', Pow)
repeat = _mkbin('repeat', Repeat, reflected=False, private=False)
_sub, _rsub = _mkbin('sub', Sub)
interp = _mkbin('interp', Interp, reflected=False, private=False)
class Relational(Arithmetic):
_dtype = ct.bool_
class Eq(Relational):
symbol = '=='
op = operator.eq
class Ne(Relational):
symbol = '!='
op = operator.ne
class Ge(Relational):
symbol = '>='
op = operator.ge
class Le(Relational):
symbol = '<='
op = operator.le
class Gt(Relational):
symbol = '>'
op = operator.gt
class Lt(Relational):
symbol = '<'
op = operator.lt
class And(Arithmetic):
symbol = '&'
op = operator.and_
_dtype = ct.bool_
class Or(Arithmetic):
symbol = '|'
op = operator.or_
_dtype = ct.bool_
class Not(UnaryOp):
symbol = '~'
op = operator.invert
_dtype = ct.bool_
def __str__(self):
return '~%s' % parenthesize(eval_str(self._child))
_and, _rand = _mkbin('and', And)
_eq = _mkbin('eq', Eq, reflected=False)
_ge = _mkbin('ge', Ge, reflected=False)
_gt = _mkbin('gt', Gt, reflected=False)
_le = _mkbin('le', Le, reflected=False)
_lt = _mkbin('lt', Lt, reflected=False)
_ne = _mkbin('ne', Ne, reflected=False)
_or, _ror = _mkbin('or', Or)
def _invert(self):
result = Invert(self)
result.dshape # Check that shapes and dtypes match up
return result
Invert = Not
BitAnd = And
BitOr = Or
from .expressions import schema_method_list
schema_method_list.extend([
(isnumeric,
set([_add, _radd, _mul, _rmul, _div, _rdiv, _floordiv, _rfloordiv, _sub,
_rsub, _pow, _rpow, _mod, _rmod, _neg])),
(isscalar, set([_eq, _ne, _lt, _le, _gt, _ge])),
(isboolean, set([_or, _ror, _and, _rand, _invert])),
(isdatelike, set([_add, _radd, _sub, _rsub])),
])
| bsd-3-clause |
binghongcha08/pyQMD | GWP/QTGB/wft.py | 1 | 1271 | ##!/usr/bin/python
import numpy as np
import pylab as plt
import seaborn as sns
sns.set_context('poster')
#plt.subplot(1,1,1)
dat = np.genfromtxt(fname='wf0.dat')
data = np.genfromtxt(fname='wft.dat')
pot = np.genfromtxt(fname='pes.dat')
#data1 = np.genfromtxt(fname='../spo/1.0.3/wft3.dat')
#dat = np.genfromtxt(fname='../spo/1.0.3/wft.dat')
#data0 = np.genfromtxt('../spo_1d/t100')
dat1 = np.loadtxt('/home/bing/spo/spo_1d/wft')
pot[:,1] = pot[:,1]/16.0
plt.plot(data[:,0],data[:,1],'--',lw=1, label='$|\psi(x,t)|^2$')
plt.plot(pot[:,0],pot[:,1],'k',linewidth=2, label='V')
plt.plot(dat[:,0],dat[:,1],'k--',linewidth=2, label='$|\psi(x,0)|^2$')
plt.plot(dat1[:,0],dat1[:,1],'g--',linewidth=2,label='$|\psi(x,0)|^2$')
#plt.plot(data1[:,0],data1[:,1],'--',linewidth=2,label='QM')
#plt.scatter(data0[:,0],data0[:,1],label='QM')
#plt.plot(data1[:,0],data1[:,1],'k-.',linewidth=2, label='t0')
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
plt.xlim(-10,6)
plt.xlabel('$x~ [bohr]$')
#plt.ylabel('$|\psi(x,t)|^2$')
#plt.title('traj')
#plt.subplot(2,1,2)
#data = np.genfromtxt(fname='c.dat')
#data = np.loadtxt('traj.dat')
#for x in range(1,10):
# plt.plot(data[:,0],data[:,x])
#plt.xlabel('time')
plt.savefig('wft.pdf')
plt.legend()
plt.show()
| gpl-3.0 |
zfrenchee/pandas | pandas/tests/indexing/test_indexing_slow.py | 5 | 3698 | # -*- coding: utf-8 -*-
import warnings
import numpy as np
import pandas as pd
from pandas.core.api import Series, DataFrame, MultiIndex
import pandas.util.testing as tm
import pytest
class TestIndexingSlow(object):
@pytest.mark.slow
def test_multiindex_get_loc(self): # GH7724, GH2646
with warnings.catch_warnings(record=True):
# test indexing into a multi-index before & past the lexsort depth
from numpy.random import randint, choice, randn
cols = ['jim', 'joe', 'jolie', 'joline', 'jolia']
def validate(mi, df, key):
mask = np.ones(len(df)).astype('bool')
# test for all partials of this key
for i, k in enumerate(key):
mask &= df.iloc[:, i] == k
if not mask.any():
assert key[:i + 1] not in mi.index
continue
assert key[:i + 1] in mi.index
right = df[mask].copy()
if i + 1 != len(key): # partial key
right.drop(cols[:i + 1], axis=1, inplace=True)
right.set_index(cols[i + 1:-1], inplace=True)
tm.assert_frame_equal(mi.loc[key[:i + 1]], right)
else: # full key
right.set_index(cols[:-1], inplace=True)
if len(right) == 1: # single hit
right = Series(right['jolia'].values,
name=right.index[0],
index=['jolia'])
tm.assert_series_equal(mi.loc[key[:i + 1]], right)
else: # multi hit
tm.assert_frame_equal(mi.loc[key[:i + 1]], right)
def loop(mi, df, keys):
for key in keys:
validate(mi, df, key)
n, m = 1000, 50
vals = [randint(0, 10, n), choice(
list('abcdefghij'), n), choice(
pd.date_range('20141009', periods=10).tolist(), n), choice(
list('ZYXWVUTSRQ'), n), randn(n)]
vals = list(map(tuple, zip(*vals)))
# bunch of keys for testing
keys = [randint(0, 11, m), choice(
list('abcdefghijk'), m), choice(
pd.date_range('20141009', periods=11).tolist(), m), choice(
list('ZYXWVUTSRQP'), m)]
keys = list(map(tuple, zip(*keys)))
keys += list(map(lambda t: t[:-1], vals[::n // m]))
# covers both unique index and non-unique index
df = DataFrame(vals, columns=cols)
a, b = pd.concat([df, df]), df.drop_duplicates(subset=cols[:-1])
for frame in a, b:
for i in range(5): # lexsort depth
df = frame.copy() if i == 0 else frame.sort_values(
by=cols[:i])
mi = df.set_index(cols[:-1])
assert not mi.index.lexsort_depth < i
loop(mi, df, keys)
@pytest.mark.slow
def test_large_dataframe_indexing(self):
# GH10692
result = DataFrame({'x': range(10 ** 6)}, dtype='int64')
result.loc[len(result)] = len(result) + 1
expected = DataFrame({'x': range(10 ** 6 + 1)}, dtype='int64')
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_large_mi_dataframe_indexing(self):
# GH10645
result = MultiIndex.from_arrays([range(10 ** 6), range(10 ** 6)])
assert (not (10 ** 6, 0) in result)
| bsd-3-clause |
tomolaf/trading-with-python | lib/backtest.py | 74 | 7381 | #-------------------------------------------------------------------------------
# Name: backtest
# Purpose: perform routine backtesting tasks.
# This module should be useable as a stand-alone library outide of the TWP package.
#
# Author: Jev Kuznetsov
#
# Created: 03/07/2014
# Copyright: (c) Jev Kuznetsov 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import pandas as pd
import matplotlib.pyplot as plt
import sys
import numpy as np
def tradeBracket(price,entryBar,upper=None, lower=None, timeout=None):
'''
trade a bracket on price series, return price delta and exit bar #
Input
------
price : numpy array of price values
entryBar: entry bar number, *determines entry price*
upper : high stop
lower : low stop
timeout : max number of periods to hold
Returns exit price and number of bars held
'''
assert isinstance(price, np.ndarray) , 'price must be a numpy array'
# create list of exit indices and add max trade duration. Exits are relative to entry bar
if timeout: # set trade length to timeout or series length
exits = [min(timeout,len(price)-entryBar-1)]
else:
exits = [len(price)-entryBar-1]
p = price[entryBar:entryBar+exits[0]+1] # subseries of price
# extend exits list with conditional exits
# check upper bracket
if upper:
assert upper>p[0] , 'Upper bracket must be higher than entry price '
idx = np.where(p>upper)[0] # find where price is higher than the upper bracket
if idx.any():
exits.append(idx[0]) # append first occurence
# same for lower bracket
if lower:
assert lower<p[0] , 'Lower bracket must be lower than entry price '
idx = np.where(p<lower)[0]
if idx.any():
exits.append(idx[0])
exitBar = min(exits) # choose first exit
return p[exitBar], exitBar
class Backtest(object):
"""
Backtest class, simple vectorized one. Works with pandas objects.
"""
def __init__(self,price, signal, signalType='capital',initialCash = 0, roundShares=True):
"""
Arguments:
*price* Series with instrument price.
*signal* Series with capital to invest (long+,short-) or number of shares.
*sitnalType* capital to bet or number of shares 'capital' mode is default.
*initialCash* starting cash.
*roundShares* round off number of shares to integers
"""
#TODO: add auto rebalancing
# check for correct input
assert signalType in ['capital','shares'], "Wrong signal type provided, must be 'capital' or 'shares'"
#save internal settings to a dict
self.settings = {'signalType':signalType}
# first thing to do is to clean up the signal, removing nans and duplicate entries or exits
self.signal = signal.ffill().fillna(0)
# now find dates with a trade
tradeIdx = self.signal.diff().fillna(0) !=0 # days with trades are set to True
if signalType == 'shares':
self.trades = self.signal[tradeIdx] # selected rows where tradeDir changes value. trades are in Shares
elif signalType =='capital':
self.trades = (self.signal[tradeIdx]/price[tradeIdx])
if roundShares:
self.trades = self.trades.round()
# now create internal data structure
self.data = pd.DataFrame(index=price.index , columns = ['price','shares','value','cash','pnl'])
self.data['price'] = price
self.data['shares'] = self.trades.reindex(self.data.index).ffill().fillna(0)
self.data['value'] = self.data['shares'] * self.data['price']
delta = self.data['shares'].diff() # shares bought sold
self.data['cash'] = (-delta*self.data['price']).fillna(0).cumsum()+initialCash
self.data['pnl'] = self.data['cash']+self.data['value']-initialCash
@property
def sharpe(self):
''' return annualized sharpe ratio of the pnl '''
pnl = (self.data['pnl'].diff()).shift(-1)[self.data['shares']!=0] # use only days with position.
return sharpe(pnl) # need the diff here as sharpe works on daily returns.
@property
def pnl(self):
'''easy access to pnl data column '''
return self.data['pnl']
def plotTrades(self):
"""
visualise trades on the price chart
long entry : green triangle up
short entry : red triangle down
exit : black circle
"""
l = ['price']
p = self.data['price']
p.plot(style='x-')
# ---plot markers
# this works, but I rather prefer colored markers for each day of position rather than entry-exit signals
# indices = {'g^': self.trades[self.trades > 0].index ,
# 'ko':self.trades[self.trades == 0].index,
# 'rv':self.trades[self.trades < 0].index}
#
#
# for style, idx in indices.iteritems():
# if len(idx) > 0:
# p[idx].plot(style=style)
# --- plot trades
#colored line for long positions
idx = (self.data['shares'] > 0) | (self.data['shares'] > 0).shift(1)
if idx.any():
p[idx].plot(style='go')
l.append('long')
#colored line for short positions
idx = (self.data['shares'] < 0) | (self.data['shares'] < 0).shift(1)
if idx.any():
p[idx].plot(style='ro')
l.append('short')
plt.xlim([p.index[0],p.index[-1]]) # show full axis
plt.legend(l,loc='best')
plt.title('trades')
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print '\r',self,
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
| bsd-3-clause |
evanbiederstedt/RRBSfun | epiphen/cll_tests/total_CLL_sorted.py | 1 | 10446 | import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(cw154))
print(len(trito))
totalfiles = cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
indexed_matrix = total_matrix ## keep a copy for index of genomic coordinates
total_matrix = total_matrix.drop("index", axis=1)
drop_columns = total_matrix ## keep copy in order to create 0/1/? matrix such that each character is a column
len(drop_columns.columns)
len(total_matrix.columns)
cell_samples = ['RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
total_matrix.columns = cell_samples
print(total_matrix.shape)
## >>> print(total_matrix.shape)
## (6336559, 104)
drop_columns = drop_columns.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
drop_columns = drop_columns.astype(str).apply(''.join)
drop_columns = drop_columns.reset_index()
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/CLL_tests")
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott_drop_columns = pd.Series(drop_columns.index.astype(str).str.cat(total_matrix.astype(str),' ')) ## [104 rows x 6336566 columns]
print(tott.shape)
print(tott_drop_columns.shape)
df_tott_column_position = tott_drop_columns.apply(lambda x: pd.Series(list(x))) ## [104 rows x 6336566 columns]
## extra NaN's here
df_tott_column_position_T = df_tott_column_position.T ## create transpose, and shift on columns [I don't think there is a pandas-efficient way to shift row elements left/right systematically]
for i in range(10): ## 0 to 9
df_tott_column_position_T[i]= df_tott_column_position_T[i].shift(2)
for i in range(90):
j = i + 10 ## 10 to 99
df_tott_column_position_T[j]= df_tott_column_position_T[j].shift(1)
df_tott_column_position = df_tott_column_position_T.T
df_tott_column_position.drop( df_tott_column_position.columns[[i for i in range(7)]], axis=1, inplace=True) ## drop first 6 columns
### rename columns
indexed_matrixT = indexed_matrix.T
df_tott_column_position.columns = indexed_matrixT.ix[0]
integers_to_sort = df_tott_column_position.columns.to_series().str.extract("([a-z-A-Z]+)(\d*)_(\d+)", expand=True) # use str.extract to get integers to sort
integers_to_sort[1] = integers_to_sort[1].str.zfill(2)
integers_to_sort[2] = integers_to_sort[2].str.zfill(10)
integers_to_sort["new_coordinates"] = integers_to_sort.apply(lambda x: "{}{}_{}".format(x[0],x[1],x[2]), axis=1)
df_tott_column_position.columns = integers_to_sort["new_coordinates"]
df_tott_column_position.columns.name = None
df_tott_column_position = df_tott_column_position.sort_index(axis=1)
df_tott_column_position = df_tott_column_position.T
df_tott_column_position = df_tott_column_position.astype(str).apply(''.join)
new = pd.Series(cell_samples) + " " + df_tott_column_position
new.to_csv("total_CLL_all_sorted.phy", header=None, index=None)
| mit |
gfyoung/pandas | pandas/io/clipboards.py | 5 | 4337 | """ io on the clipboard """
from io import StringIO
import warnings
from pandas.core.dtypes.generic import ABCDataFrame
from pandas import get_option, option_context
def read_clipboard(sep=r"\s+", **kwargs): # pragma: no cover
r"""
Read text from clipboard and pass to read_csv.
Parameters
----------
sep : str, default '\s+'
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
**kwargs
See read_csv for the full argument list.
Returns
-------
DataFrame
A parsed DataFrame object.
"""
encoding = kwargs.pop("encoding", "utf-8")
# only utf-8 is valid for passed value because that's what clipboard
# supports
if encoding is not None and encoding.lower().replace("-", "") != "utf8":
raise NotImplementedError("reading from clipboard only supports utf-8 encoding")
from pandas.io.clipboard import clipboard_get
from pandas.io.parsers import read_csv
text = clipboard_get()
# Try to decode (if needed, as "text" might already be a string here).
try:
text = text.decode(kwargs.get("encoding") or get_option("display.encoding"))
except AttributeError:
pass
# Excel copies into clipboard with \t separation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split("\n")[:-1][:10]
# Need to remove leading white space, since read_csv
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = {x.lstrip().count("\t") for x in lines}
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
sep = "\t"
# Edge case where sep is specified to be None, return to default
if sep is None and kwargs.get("delim_whitespace") is None:
sep = r"\s+"
# Regex separator currently only works with python engine.
# Default to python if separator is multi-character (regex)
if len(sep) > 1 and kwargs.get("engine") is None:
kwargs["engine"] = "python"
elif len(sep) > 1 and kwargs.get("engine") == "c":
warnings.warn(
"read_clipboard with regex separator does not work properly with c engine"
)
return read_csv(StringIO(text), sep=sep, **kwargs)
def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with PyQt4 modules)
- Windows:
- OS X:
"""
encoding = kwargs.pop("encoding", "utf-8")
# testing if an invalid encoding is passed to clipboard
if encoding is not None and encoding.lower().replace("-", "") != "utf8":
raise ValueError("clipboard only supports utf-8 encoding")
from pandas.io.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = "\t"
buf = StringIO()
# clipboard_set (pyperclip) expects unicode
obj.to_csv(buf, sep=sep, encoding="utf-8", **kwargs)
text = buf.getvalue()
clipboard_set(text)
return
except TypeError:
warnings.warn(
"to_clipboard in excel mode requires a single character separator."
)
elif sep is not None:
warnings.warn("to_clipboard with excel=False ignores the sep argument")
if isinstance(obj, ABCDataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context("display.max_colwidth", None):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
| bsd-3-clause |
demisto/content | Packs/Base/Scripts/DBotFindSimilarIncidents/dbot_find_similar_incidents_test.py | 1 | 16188 | # from CommonServerPython import *
import pytest
from DBotFindSimilarIncidents import normalize_command_line, main, demisto, keep_high_level_field, \
preprocess_incidents_field, PREFIXES_TO_REMOVE, check_list_of_dict, REGEX_IP, match_one_regex, \
SIMILARITY_COLUNM_NAME_INDICATOR, SIMILARITY_COLUNM_NAME, euclidian_similarity_capped, find_incorrect_fields, \
MESSAGE_NO_INCIDENT_FETCHED, MESSAGE_INCORRECT_FIELD, MESSAGE_WARNING_TRUNCATED, COLUMN_ID, COLUMN_TIME, \
TAG_SCRIPT_INDICATORS
import json
import numpy as np
import pandas as pd
CURRENT_INCIDENT_NOT_EMPTY = [
{'id': '123', 'commandline': 'powershell IP=1.1.1.1', 'CustomFields': {"nested_field": 'value_nested_field'},
'empty_current_incident_field': None, 'empty_fetched_incident_field': 'empty_fetched_incident_field_1'}]
FETCHED_INCIDENT_NOT_EMPTY = [
{'id': '1', 'created': "2021-01-30", 'commandline': 'powershell IP=1.1.1.1',
'CustomFields': {"nested_field": 'value_nested_field_1'},
'empty_current_incident_field': 'empty_current_incident_field_1', 'empty_fetched_incident_field': None,
"name": "incident_name_1"},
{'id': '2', 'created': "2021-01-30", 'commandline': 'powershell IP=2.2.2.2',
'CustomFields': {"nested_field": 'value_nested_field_2'},
'empty_current_incident_field': 'empty_current_incident_field2', 'empty_fetched_incident_field': "",
"name": "incident_name_2"},
{'id': '3', 'created': "2021-01-30", 'commandline': 'powershell IP=1.1.1.1',
'CustomFields': {"nested_field": 'value_nested_field_3'},
'empty_current_incident_field': 'empty_current_incident_field_3', 'empty_fetched_incident_field': None,
"name": "incident_name_3"}
]
FETCHED_INCIDENT_EMPTY = []
SIMILAR_INDICATORS_NOT_EMPTY = [
{"ID": "inc_1", "Identical indicators": "ind_1, ind_2", "created": "2021-01-30", "id": "1",
"name": "incident_name_1", "similarity indicators": 0.2},
{"ID": "inc_3", "Identical indicators": "ind_2", "created": "2021-01-30", "id": "3", "name": "incident_name_3",
"similarity indicators": 0.4},
]
SIMILAR_INDICATORS_EMPTY = []
def executeCommand(command, args):
global SIMILAR_INDICATORS, FETCHED_INCIDENT, CURRENT_INCIDENT
if command == 'DBotFindSimilarIncidentsByIndicators':
return [[], {'Contents': SIMILAR_INDICATORS, 'Type': 'note', 'Tags': [TAG_SCRIPT_INDICATORS]}]
if command == 'GetIncidentsByQuery':
if 'limit' in args:
return [{'Contents': json.dumps(FETCHED_INCIDENT), 'Type': 'note'}]
else:
return [{'Contents': json.dumps(CURRENT_INCIDENT), 'Type': 'note'}]
def check_exist_dataframe_columns(*fields, df):
for field in fields:
if field not in df.columns.tolist():
return False
return True
def test_keep_high_level_field():
incidents_field = ['xdralerts.comandline', 'commandline', 'CustomsFields.commandline']
res = ['xdralerts', 'commandline', 'CustomsFields']
assert keep_high_level_field(incidents_field) == res
def test_preprocess_incidents_field():
assert preprocess_incidents_field('incident.commandline', PREFIXES_TO_REMOVE) == 'commandline'
assert preprocess_incidents_field('commandline', PREFIXES_TO_REMOVE) == 'commandline'
def test_check_list_of_dict():
assert check_list_of_dict([{'test': 'value_test'}, {'test1': 'value_test1'}]) is True
assert check_list_of_dict({'test': 'value_test'}) is False
def test_match_one_regex():
assert match_one_regex('123.123.123.123', [REGEX_IP]) is True
assert match_one_regex('123.123.123', [REGEX_IP]) is False
assert match_one_regex('abc', [REGEX_IP]) is False
assert match_one_regex(1, [REGEX_IP]) is False
def test_normalize_command_line():
assert normalize_command_line('cmd -k IP=1.1.1.1 [1.1.1.1]') == 'cmd -k ip = IP IP'
assert normalize_command_line('powershell "remove_quotes"') == 'powershell remove_quotes'
def test_euclidian_similarity_capped():
x = np.array([[1, 1, 1], [2, 2, 2]])
y = np.array([[2.1, 2.1, 2.1]])
distance = euclidian_similarity_capped(x, y)
assert distance[0] == 0
assert distance[1] > 0
@pytest.mark.filterwarnings("ignore::pandas.core.common.SettingWithCopyWarning", "ignore::UserWarning")
def test_main_regular(mocker):
global SIMILAR_INDICATORS, FETCHED_INCIDENT, CURRENT_INCIDENT
FETCHED_INCIDENT = FETCHED_INCIDENT_NOT_EMPTY
CURRENT_INCIDENT = CURRENT_INCIDENT_NOT_EMPTY
SIMILAR_INDICATORS = SIMILAR_INDICATORS_NOT_EMPTY
mocker.patch.object(demisto, 'args',
return_value={
'incidentId': 12345,
'similarTextField': 'incident.commandline, commandline, command, '
'empty_current_incident_field, empty_fetched_incident_field',
'similarCategoricalField': 'signature, filehash, incident.commandline',
'similarJsonField': 'CustomFields',
'limit': 10000,
'fieldExactMatch': '',
'fieldsToDisplay': 'filehash, destinationip, closeNotes, sourceip, alertdescription',
'showIncidentSimilarityForAllFields': True,
'minimunIncidentSimilarity': 0.2,
'maxIncidentsToDisplay': 100,
'query': '',
'aggreagateIncidentsDifferentDate': 'False',
'includeIndicatorsSimilarity': 'True'
})
mocker.patch.object(demisto, 'dt', return_value=None)
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
res, msg = main()
assert ('empty_current_incident_field' not in res.columns)
assert (res.loc['3', 'Identical indicators'] == 'ind_2')
assert (res.loc['2', 'Identical indicators'] == "")
assert check_exist_dataframe_columns(SIMILARITY_COLUNM_NAME_INDICATOR, SIMILARITY_COLUNM_NAME,
COLUMN_ID, COLUMN_TIME, 'name', df=res)
assert res.loc['3', 'similarity indicators'] == 0.4
assert res.loc['2', 'similarity indicators'] == 0.0
@pytest.mark.filterwarnings("ignore::pandas.core.common.SettingWithCopyWarning")
def test_main_no_indicators_found(mocker):
"""
Test if no indicators found
:param mocker:
:return:
"""
global SIMILAR_INDICATORS, FETCHED_INCIDENT, CURRENT_INCIDENT
FETCHED_INCIDENT = FETCHED_INCIDENT_NOT_EMPTY
CURRENT_INCIDENT = CURRENT_INCIDENT_NOT_EMPTY
SIMILAR_INDICATORS = SIMILAR_INDICATORS_EMPTY
mocker.patch.object(demisto, 'args',
return_value={
'incidentId': 12345,
'similarTextField': 'incident.commandline, commandline, command,'
' empty_current_incident_field, empty_fetched_incident_field',
'similarCategoricalField': 'signature, filehash',
'similarJsonField': 'CustomFields',
'limit': 10000,
'fieldExactMatch': '',
'fieldsToDisplay': 'filehash, destinationip, closeNotes, sourceip, alertdescription',
'showIncidentSimilarityForAllFields': True,
'minimunIncidentSimilarity': 0.2,
'maxIncidentsToDisplay': 100,
'query': '',
'aggreagateIncidentsDifferentDate': 'False',
'includeIndicatorsSimilarity': 'True'
})
mocker.patch.object(demisto, 'dt', return_value=None)
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
res, msg = main()
assert ('empty_current_incident_field' not in res.columns)
assert (res['Identical indicators'] == ["", "", ""]).all()
assert check_exist_dataframe_columns(SIMILARITY_COLUNM_NAME_INDICATOR, SIMILARITY_COLUNM_NAME, COLUMN_ID,
COLUMN_TIME, 'name', df=res)
assert (res['similarity indicators'] == [0.0, 0.0, 0.0]).all()
@pytest.mark.filterwarnings("ignore::pandas.core.common.SettingWithCopyWarning")
def test_main_no_fetched_incidents_found(mocker):
"""
Test output if no related incidents found - Should return None and MESSAGE_NO_INCIDENT_FETCHED
:param mocker:
:return:
"""
global SIMILAR_INDICATORS, FETCHED_INCIDENT, CURRENT_INCIDENT
FETCHED_INCIDENT = FETCHED_INCIDENT_EMPTY
CURRENT_INCIDENT = CURRENT_INCIDENT_NOT_EMPTY
SIMILAR_INDICATORS = SIMILAR_INDICATORS_NOT_EMPTY
mocker.patch.object(demisto, 'args',
return_value={
'incidentId': 12345,
'similarTextField': 'incident.commandline, commandline, command, '
'empty_current_incident_field, empty_fetched_incident_field',
'similarCategoricalField': 'signature, filehash',
'similarJsonField': 'CustomFields',
'limit': 10000,
'fieldExactMatch': '',
'fieldsToDisplay': 'filehash, destinationip, closeNotes, sourceip, alertdescription',
'showIncidentSimilarityForAllFields': True,
'minimunIncidentSimilarity': 0.2,
'maxIncidentsToDisplay': 100,
'query': '',
'aggreagateIncidentsDifferentDate': 'False',
'includeIndicatorsSimilarity': 'True'
})
mocker.patch.object(demisto, 'dt', return_value=None)
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
res = main()
assert (not res[0])
assert MESSAGE_NO_INCIDENT_FETCHED in res[1]
def test_main_some_incorrect_fields():
wrong_field_1 = 'wrong_field_1'
wrong_field_2 = 'wrong_field_2'
correct_field_1 = 'empty_fetched_incident_field'
current_incident_df = pd.DataFrame(CURRENT_INCIDENT)
global_msg, incorrect_fields = find_incorrect_fields([correct_field_1, wrong_field_1, wrong_field_2],
current_incident_df, '')
assert incorrect_fields == ['wrong_field_1', 'wrong_field_2']
assert wrong_field_1 in global_msg
assert wrong_field_2 in global_msg
assert correct_field_1 not in global_msg
@pytest.mark.filterwarnings("ignore::pandas.core.common.SettingWithCopyWarning")
def test_main_all_incorrect_field(mocker):
"""
Test if only incorrect fields - Should return None and MESSAGE_INCORRECT_FIELD message for wrong fields
:param mocker:
:return:
"""
global SIMILAR_INDICATORS, FETCHED_INCIDENT, CURRENT_INCIDENT
FETCHED_INCIDENT = FETCHED_INCIDENT_NOT_EMPTY
CURRENT_INCIDENT = CURRENT_INCIDENT_NOT_EMPTY
SIMILAR_INDICATORS = SIMILAR_INDICATORS_NOT_EMPTY
wrong_field_1 = 'wrong_field_1'
wrong_field_2 = 'wrong_field_2'
wrong_field_3 = 'wrong_field_3'
wrong_field_4 = 'wrong_field_4'
mocker.patch.object(demisto, 'args',
return_value={
'incidentId': 12345,
'similarTextField': wrong_field_1,
'similarCategoricalField': wrong_field_2,
'similarJsonField': wrong_field_3,
'limit': 10000,
'fieldExactMatch': '',
'fieldsToDisplay': wrong_field_4,
'showIncidentSimilarityForAllFields': True,
'minimunIncidentSimilarity': 0.2,
'maxIncidentsToDisplay': 100,
'query': '',
'aggreagateIncidentsDifferentDate': 'False',
'includeIndicatorsSimilarity': 'True'
})
mocker.patch.object(demisto, 'dt', return_value=None)
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
df, msg = main()
assert (not df)
assert MESSAGE_INCORRECT_FIELD % ' , '.join([wrong_field_1, wrong_field_3, wrong_field_2, wrong_field_4]) in msg
assert all(field in msg for field in [wrong_field_1, wrong_field_2, wrong_field_3, wrong_field_4])
@pytest.mark.filterwarnings("ignore::pandas.core.common.SettingWithCopyWarning")
def test_main_incident_truncated(mocker):
"""
Test if fetched incident truncated - Should return MESSAGE_WARNING_TRUNCATED in the message
:param mocker:
:return:
"""
global SIMILAR_INDICATORS, FETCHED_INCIDENT, CURRENT_INCIDENT
FETCHED_INCIDENT = FETCHED_INCIDENT_NOT_EMPTY
CURRENT_INCIDENT = CURRENT_INCIDENT_NOT_EMPTY
SIMILAR_INDICATORS = SIMILAR_INDICATORS_NOT_EMPTY
correct_field_1 = 'commandline'
wrong_field_2 = 'wrong_field_2'
wrong_field_3 = 'wrong_field_3'
wrong_field_4 = 'wrong_field_4'
mocker.patch.object(demisto, 'args',
return_value={
'incidentId': 12345,
'similarTextField': correct_field_1,
'similarCategoricalField': wrong_field_2,
'similarJsonField': wrong_field_3,
'limit': 3,
'fieldExactMatch': '',
'fieldsToDisplay': wrong_field_4,
'showIncidentSimilarityForAllFields': True,
'minimunIncidentSimilarity': 0.2,
'maxIncidentsToDisplay': 100,
'query': '',
'aggreagateIncidentsDifferentDate': 'False',
'includeIndicatorsSimilarity': 'True'
})
mocker.patch.object(demisto, 'dt', return_value=None)
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
df, msg = main()
limit = demisto.args()['limit']
assert not df.empty
assert MESSAGE_WARNING_TRUNCATED % (limit, limit) in msg
@pytest.mark.filterwarnings("ignore::pandas.core.common.SettingWithCopyWarning")
def test_main_incident_nested(mocker):
"""
Test if fetched incident truncated - Should return MESSAGE_WARNING_TRUNCATED in the message
:param mocker:
:return:
"""
global SIMILAR_INDICATORS, FETCHED_INCIDENT, CURRENT_INCIDENT
FETCHED_INCIDENT = FETCHED_INCIDENT_NOT_EMPTY
CURRENT_INCIDENT = CURRENT_INCIDENT_NOT_EMPTY
SIMILAR_INDICATORS = SIMILAR_INDICATORS_NOT_EMPTY
wrong_field_2 = 'wrong_field_2'
wrong_field_3 = 'wrong_field_3'
wrong_field_4 = 'wrong_field_4'
nested_field = 'xdralerts.cmd'
mocker.patch.object(demisto, 'args',
return_value={
'incidentId': 12345,
'similarTextField': nested_field,
'similarCategoricalField': wrong_field_2,
'similarJsonField': wrong_field_3,
'limit': 3,
'fieldExactMatch': '',
'fieldsToDisplay': wrong_field_4,
'showIncidentSimilarityForAllFields': True,
'minimunIncidentSimilarity': 0.2,
'maxIncidentsToDisplay': 100,
'query': '',
'aggreagateIncidentsDifferentDate': 'False',
'includeIndicatorsSimilarity': 'True'
})
mocker.patch.object(demisto, 'dt', return_value=['nested_val_1', 'nested_val_2'])
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
df, msg = main()
assert not df.empty
assert (df['similarity %s' % nested_field] == [1.0, 1.0, 1.0]).all()
| mit |
nguyentu1602/statsmodels | statsmodels/examples/l1_demo/short_demo.py | 33 | 3737 | """
You can fit your LikelihoodModel using l1 regularization by changing
the method argument and adding an argument alpha. See code for
details.
The Story
---------
The maximum likelihood (ML) solution works well when the number of data
points is large and the noise is small. When the ML solution starts
"breaking", the regularized solution should do better.
The l1 Solvers
--------------
The standard l1 solver is fmin_slsqp and is included with scipy. It
sometimes has trouble verifying convergence when the data size is
large.
The l1_cvxopt_cp solver is part of CVXOPT and this package needs to be
installed separately. It works well even for larger data sizes.
"""
from __future__ import print_function
from statsmodels.compat.python import range
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
import pdb # pdb.set_trace()
## Load the data from Spector and Mazzeo (1980)
spector_data = sm.datasets.spector.load()
spector_data.exog = sm.add_constant(spector_data.exog)
N = len(spector_data.endog)
K = spector_data.exog.shape[1]
### Logit Model
logit_mod = sm.Logit(spector_data.endog, spector_data.exog)
## Standard logistic regression
logit_res = logit_mod.fit()
## Regularized regression
# Set the reularization parameter to something reasonable
alpha = 0.05 * N * np.ones(K)
# Use l1, which solves via a built-in (scipy.optimize) solver
logit_l1_res = logit_mod.fit_regularized(method='l1', alpha=alpha, acc=1e-6)
# Use l1_cvxopt_cp, which solves with a CVXOPT solver
logit_l1_cvxopt_res = logit_mod.fit_regularized(
method='l1_cvxopt_cp', alpha=alpha)
## Print results
print("============ Results for Logit =================")
print("ML results")
print(logit_res.summary())
print("l1 results")
print(logit_l1_res.summary())
print(logit_l1_cvxopt_res.summary())
### Multinomial Logit Example using American National Election Studies Data
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
mlogit_res = mlogit_mod.fit()
## Set the regularization parameter.
alpha = 10 * np.ones((mlogit_mod.J - 1, mlogit_mod.K))
# Don't regularize the constant
alpha[-1,:] = 0
mlogit_l1_res = mlogit_mod.fit_regularized(method='l1', alpha=alpha)
print(mlogit_l1_res.params)
#mlogit_l1_res = mlogit_mod.fit_regularized(
# method='l1_cvxopt_cp', alpha=alpha, abstol=1e-10, trim_tol=1e-6)
#print mlogit_l1_res.params
## Print results
print("============ Results for MNLogit =================")
print("ML results")
print(mlogit_res.summary())
print("l1 results")
print(mlogit_l1_res.summary())
#
#
#### Logit example with many params, sweeping alpha
spector_data = sm.datasets.spector.load()
X = spector_data.exog
Y = spector_data.endog
## Fit
N = 50 # number of points to solve at
K = X.shape[1]
logit_mod = sm.Logit(Y, X)
coeff = np.zeros((N, K)) # Holds the coefficients
alphas = 1 / np.logspace(-0.5, 2, N)
## Sweep alpha and store the coefficients
# QC check doesn't always pass with the default options.
# Use the options QC_verbose=True and disp=True
# to to see what is happening. It just barely doesn't pass, so I decreased
# acc and increased QC_tol to make it pass
for n, alpha in enumerate(alphas):
logit_res = logit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='off', QC_tol=0.1, disp=False,
QC_verbose=True, acc=1e-15)
coeff[n,:] = logit_res.params
## Plot
plt.figure(1);plt.clf();plt.grid()
plt.title('Regularization Path');
plt.xlabel('alpha');
plt.ylabel('Parameter value');
for i in range(K):
plt.plot(alphas, coeff[:,i], label='X'+str(i), lw=3)
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
ThomasChauve/aita | AITAToolbox/.ipynb_checkpoints/aita-checkpoint.py | 2 | 77555 | # -*- coding: utf-8 -*-
'''
Created on 3 juil. 2015
Toolbox for data obtained using G50 Automatique Ice Texture Analyser (AITA) provide by :
Russell-Head, D.S., Wilson, C., 2001. Automated fabric analyser system for quartz and ice. J. Glaciol. 24, 117–130
@author: Thomas Chauve
@contact: [email protected]
@license: CC-BY-CC
'''
import shapely.geometry
import pygmsh
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
import math
import pylab
from skimage import io
import skimage.morphology
import skimage.measure
import skimage.feature
import mahotas as mh
import datetime
import random
import scipy
import colorsys
import ipywidgets as widgets
import time
import vtk
from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy
from IPython import get_ipython
if get_ipython().__class__.__name__=='ZMQInteractiveShell':
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
import AITAToolbox.image2d as im2d
import AITAToolbox.setvector3d as vec3d
from AITAToolbox.function import normxcorr2
class aita(object):
'''
.. py:class:: aita
"aita" is a python class to analyse output from G50-AITA analyser.
It provide an environnement to plot data, to create inpur for CraFT code,...
'''
pass
def __init__(self,phi1_field,phi_field,qua_field,micro_field,resolution=1):
'''
:param phi1_field: Euler angle phi1 map
:param phi_field: Euler angle phi map
:param qua_field: quality facteur map
:param resolution: spatial step size (mm); default = 1 mm
:param micro_field: microstructure (0 background, 1 grain boundary)
:type phi1_field np.array
:type phi_field np.array
:type qua_field: np.array
:type resolution: float
:type micro_adress: np.array
:return: aita object output
:rtype: aita
.. note:: Bunge Euler Angle convention is used (phi1,phi,phi2) ,phi2 is not compute as during optical measurement phi2 is not know.
'''
# create image object from data
self.phi1=im2d.image2d(phi1_field,resolution)
self.phi=im2d.image2d(phi_field,resolution)
self.qua=im2d.image2d(qua_field,resolution)
# create microstructure
self.micro=im2d.micro2d(micro_field,resolution)
self.grains=self.micro.grain_label()
# replace grains boundary with NaN number
self.grains.field=np.array(self.grains.field,float)
idx=np.where(self.micro.field==1)
self.grains.field[idx]=np.nan
print("Sucessfull aita build !")
#####################################################################
######################Geometry conversion############################
#####################################################################
def fliplr(self):
'''
Applied an horizontal miror to the data
:return: aita object with an horizontal miror
:rtype: aita
:Exemple: >>> data.fliplr()
'''
# horizontal miror (fliplr) on all the data in self
self.phi.field=np.fliplr(self.phi.field)
self.phi1.field=np.mod(math.pi-np.fliplr(self.phi1.field),2*math.pi) # change phi1 angle by pi-phi1 modulo 2*pi
self.qua.field=np.fliplr(self.qua.field)
self.micro.field=np.fliplr(self.micro.field)
self.grains.field=np.fliplr(self.grains.field)
#---------------------------------------------------------------------
def rot180(self):
'''
Rotate the data of 180 degree
:return: crop aita object
:rtype: aita
:Exemple: >>> data.rot180()
'''
# rotate the position of the data if 180 degre
self.phi.field=np.flipud(np.fliplr(self.phi.field))
self.phi1.field=np.mod(math.pi+np.flipud(np.fliplr(self.phi1.field)),2*math.pi) # rotate the c-axis : phi1 = pi + phi1 mod(2*pi)
self.qua.field=np.flipud(np.fliplr(self.qua.field))
self.micro.field=np.flipud(np.fliplr(self.micro.field))
self.grains.field=np.flipud(np.fliplr(self.grains.field))
#----------------------------------------------------------------------
def imresize(self,res):
'''
Resize the data
:param res: the new resolution wanted in millimeter (mm)
:type res: float
:return: data with the resolution wanted
:rtype: aita
:Exemple: >>> data.imresize(0.25)
'''
self.phi.imresize(res)
self.phi1.imresize(res)
self.qua.imresize(res)
self.grains.imresize(res)
# make larger the boundaries to keep them
self.micro.field=scipy.ndimage.binary_dilation(self.micro.field, iterations=np.int32(res/(2*self.micro.res)))
# resize
self.micro.imresize(res)
#----------------------------------------------------------------------
def mask(self,mask):
'''
Applied mask on aita data
:param mask:
:type mask: im2d.mask2d
:return: aita object with the mask applied
:rtype: aita
'''
if (type(mask) is im2d.mask2d):
phi1=self.phi1*mask
phi=self.phi*mask
qua=self.qua*mask
micro=self.micro
res=self.micro.res
# reduce the size of the aita data : remouve band of NaN
x,y=np.where(mask.field==1)
minx=np.min(x)
maxx=np.max(x)
miny=np.min(y)
maxy=np.max(y)
ma=aita(phi1.field[minx:maxx,miny:maxy],phi.field[minx:maxx,miny:maxy],qua.field[minx:maxx,miny:maxy],micro.field[minx:maxx,miny:maxy],res)
else:
print('mask is not and mask2d object')
ma=False
return ma
#####################################################################
#########################Data traitement#############################
#####################################################################
def filter(self,value):
'''
Remove data of bad quality
:param value: limit quality value between 0 to 100
:type value: int
:return: data object with no orientation with quality value under threshold
:rtype: aita
:Exemple: >>> data.filter(75)
'''
# find where quality<value
x=np.where(self.qua.field < value)
self.phi.field[x]=np.NAN
self.phi1.field[x]=np.NAN
#--------------------------------------------------------------------
def mean_grain(self):
'''
Compute the mean orientation inside the grain
:return: data with only one orientation per grains, the mean orientation
:rtype: aita
:Exemple: >>> data.mean_orientation()
'''
allv=[]
# number of grain
nb_grain=int(np.nanmax(self.grains.field))
# loop on all the grain
for i in tqdm(range(nb_grain+1)):
idx=np.where(self.grains.field==i)
col=self.phi.field[idx[0],idx[1]]
azi=np.mod(self.phi1.field[idx[0],idx[1]]-math.pi/2,2*math.pi)
# remove nan value
azi=azi[~np.isnan(azi)]
col=col[~np.isnan(col)]
# compute [xc,yc,zc] the coordinate of the c-axis
xc = np.multiply(np.cos(azi),np.sin(col))
yc = np.multiply(np.sin(azi),np.sin(col))
zc = np.cos(col)
v=vec3d.setvector3d(np.transpose(np.array([xc,yc,zc])))
if len(v.vector)==0:
self.phi.field[idx]=np.nan
self.phi1.field[idx]=np.nan
else:
SOT_val,SOT_vec=v.OrientationTensor2nd()
vc=SOT_vec[:,0]
if vc[2]<0:
vc=-vc
col=np.arccos(vc[2])
azi=np.arctan2(vc[1],vc[0])
phi1=np.mod(azi+math.pi/2,2*math.pi)
self.phi.field[idx]=col
self.phi1.field[idx]=phi1
#--------------------------------------------------------------------
def misorientation_extractor(self,pos):
'''
Compute the misorientation profile along a line
:param pos: first point and last point of the line
:type pos: array
:return: x - coordinate along the line
:rtype: array, float
:return: mis2o,mis2p - misorientation angle to the origin, and misorientation angle to the previous pixel
:rtype: array, float
'''
# size of the map
ss=np.shape(self.phi1.field)
yy=np.float32([pos[0][0],pos[1][0]])/self.phi.res
xx=np.float32([pos[0][1],pos[1][1]])/self.phi.res
# numbers of pixel along the line
nb_pixel=np.int32(np.sqrt((xx[1]-xx[0])**2+(yy[1]-yy[0])**2))
# calcul for each pixel
phi=[]
phi1=[]
x=[]
xi=[]
yi=[]
mis2p=[]
mis2o=[]
ori=[]
for i in list(range(nb_pixel)):
# find the coordinate x an y along the line
xi.append(ss[0]-np.int32(np.round(i*(xx[1]-xx[0])/nb_pixel+xx[0])))
yi.append(np.int32(np.round(i*(yy[1]-yy[0])/nb_pixel+yy[0])))
# extract phi and phi1
phi.append(self.phi.field[xi[i],yi[i]])
phi1.append(self.phi1.field[xi[i],yi[i]])
# ori0 and orii are the c axis vector
ori.append(np.mat([np.cos(np.mod(phi1[i]-math.pi/2,2*math.pi))*np.sin(phi[i]) , np.sin(np.mod(phi1[i]-math.pi/2,2*math.pi))*np.sin(phi[i]) ,np.cos(phi[i])]))
# mis2o is the misorientation between pixel i and the origin
mis2o.append(np.float(np.arccos(np.abs(ori[0]*np.transpose(ori[i])))*180/math.pi))
if i>0:
# mis2p is the misorientation to the previous pixel
mis2p.append(np.float(np.arccos(np.abs(ori[i]*np.transpose(ori[i-1])))*180/math.pi))
# x is the position along the line
x.append(np.sqrt((xi[i]-xi[0])**2+(yi[i]-yi[0])**2))
else:
mis2p.append(0.0)
x.append(0.0)
return np.array(x)*self.phi.res, np.array(mis2o), np.array(mis2p)
#--------------------------------------------------------------------
def misorientation(self,random=False,filter_angle=math.pi/180):
'''
Compute the misorientation with the neighbouring grain
:param random: suffle the image and compute the angle
:type random: bool
:param filter_angle: threshold angle for removing small value in radians (default : pi/180)
:type filter_angle: float
'''
phi1=self.phi1.field
phi=self.phi.field
if random:
np.random.shuffle(phi1)
np.random.shuffle(phi)
phi1=phi1.flatten()
phi=phi.flatten()
phi1 = phi1[~np.isnan(phi1)]
phi = phi[~np.isnan(phi)]
dd=np.int(np.sqrt(len(phi1)))
phi1=phi1[0:dd**2].reshape([dd,dd])
phi=phi[0:dd**2].reshape([dd,dd])
mat=np.zeros([3,3])
mat[0,1]=1
phi_a=scipy.signal.convolve2d(phi,mat,mode='same',boundary='symm')
phi1_a=scipy.signal.convolve2d(phi1,mat,mode='same',boundary='symm')
mat=np.zeros([3,3])
mat[1,0]=1
phi_b=scipy.signal.convolve2d(phi,mat,mode='same',boundary='symm')
phi1_b=scipy.signal.convolve2d(phi1,mat,mode='same',boundary='symm')
mat=np.zeros([3,3])
mat[1,2]=1
phi_c=scipy.signal.convolve2d(phi,mat,mode='same',boundary='symm')
phi1_c=scipy.signal.convolve2d(phi1,mat,mode='same',boundary='symm')
mat=np.zeros([3,3])
mat[2,1]=1
phi_d=scipy.signal.convolve2d(phi,mat,mode='same',boundary='symm')
phi1_d=scipy.signal.convolve2d(phi1,mat,mode='same',boundary='symm')
phi1_s=[phi1_a,phi1_b,phi1_c,phi1_d]
phi_s=[phi_a,phi_b,phi_c,phi_d]
for i in range(4):
nphi1=phi1_s[i]
nphi=phi_s[i]
res=np.arccos(np.round(np.sin(phi1)*np.sin(nphi1)*np.sin(phi)*np.sin(nphi)+np.cos(phi1)*np.cos(nphi1)*np.sin(phi)*np.sin(nphi)+np.cos(phi)*np.cos(nphi),5))
res = np.delete(res, 0, 0) # delete first row
res = np.delete(res, -1, 0) # delete last row
res = np.delete(res, 0, 1) # delete first column
res = np.delete(res, -1, 1) # delete last column
#put everything between 0 and pi/2 because c=-c
id=np.where(res>math.pi/2)
res[id]=math.pi-res[id]
id=np.where(res>filter_angle)
xres=res[id]
if i==0:
angle=xres
else:
angle=np.concatenate((angle,xres),axis=0)
return angle
#--------------------------------------------------------------------
def project_end2init(self,self_e,image,mask=0,min_win=20,apply_morph=True):
'''
Project the final microstructure on the initial microstrucure
:param self_e: final AITA
:type self_e: aita
:param image: Binairy image to project instead of final microstructure.
:type image: im2d.image2d
:return: image projected on aita initial
:rtype: im2d.image2d
.. note:: It works better if you pass aita with mean_grain function applied before.
'''
# compute img semi
f,img_i=self.plot(semi=True)
f,img_e=self_e.plot(semi=True)
# gray value for img
img_i=np.mean(img_i,axis=2)
img_e=np.mean(img_e,axis=2)
#size of the final image
ss=img_i.shape
# create projected image
RX_i=np.zeros(ss)
# position of the
idx,idy=np.where(image.field==1)
idxm=np.array([0])
idym=np.array([0])
if mask!=0:
idxm2,idym2=np.where(mask.field==1)
idxm=np.concatenate([idxm,idxm2])
idym=np.concatenate([idym,idym2])
# The tricks used here is to performed cross correlation between the init images and subset of final images to find the best localization for the center pixel of the subset image.
for i in tqdm(range(len(idx))):
x=idx[i]
y=idy[i]
win=np.min(np.array([np.min(np.abs(x-idxm)),np.min(np.abs(y-idym)),ss[0]-x,ss[1]-y]))
if win>min_win:
look_for1=img_e[np.int32(x-win):np.int32(x+win),np.int32(y-win):np.int32(y+win)]
look_for2=self_e.phi1.field[np.int32(x-win):np.int32(x+win),np.int32(y-win):np.int32(y+win)]
look_for3=self_e.phi.field[np.int32(x-win):np.int32(x+win),np.int32(y-win):np.int32(y+win)]
look_for4=self_e.micro.field[np.int32(x-win):np.int32(x+win),np.int32(y-win):np.int32(y+win)]
#RX_look_for=RX.field[np.int32(x-win):np.int32(x+win),np.int32(y-win):np.int32(y+win)]
res1=normxcorr2(look_for1,img_i,mode='same')
res2=normxcorr2(look_for2,self.phi1.field,mode='same')
res3=normxcorr2(look_for3,self.phi.field,mode='same')
res4=normxcorr2(look_for4,self.micro.field,mode='same')
res=res1+res2+res3+res4
xn,yn=np.where(res==np.max(res))
#print(win)
RX_i[xn[0],yn[0]]=1
if apply_morph:
for i in range(2):
RX_i=scipy.ndimage.morphology.binary_closing(RX_i,iterations=2)
RX_i=scipy.ndimage.morphology.binary_erosion(RX_i,iterations=1)
RX_i=scipy.ndimage.morphology.binary_dilation(RX_i,iterations=2)
return im2d.image2d(RX_i,self.phi1.res)
#####################################################################
##########################Plot function##############################
#####################################################################
def plot(self,nlut=512,semi=False):
'''
Plot the data using a 2d lut
:param nlut: number of pixel tou want for the 2d LUT (default 512)
:type nlut: int
:return: figure of orientation mapping
:rtype: matplotlib figure
:Exemple:
>>> data.plot()
>>> plt.show()
>>> # print the associated color wheel
>>> lut=lut()
>>> plt.show()
.. note:: It takes time to build the colormap
'''
# size of the map
nx=np.shape(self.phi.field)
# create image for color map
img=np.ones([nx[0],nx[1],3])
# load the colorwheel
rlut=lut(nx=nlut,semi=semi,circle=False)
nnlut=np.shape(rlut)
nnnlut=nnlut[0]
# fill the color map
XX=np.int32((nnnlut-1)/2*np.multiply(np.sin(self.phi.field),np.cos(self.phi1.field))+(nnnlut-1)/2)
YY=np.int32((nnnlut-1)/2*np.multiply(np.sin(self.phi.field),np.sin(self.phi1.field))+(nnnlut-1)/2)
id=XX<0
XX[id]=0
YY[id]=0
idx,idy=np.where(id==True)
img=rlut[XX,YY]
img[idx,idy,:]=np.array([255,255,255])
h=plt.imshow(img,extent=(0,nx[1]*self.phi.res,0,nx[0]*self.phi.res))
return h,img
#---------------------------------------------------------------------
def plotpdf(self,peigen=True,grainlist=[],nbp=10000,contourf=True,cm2=cm.viridis,bw=0.1,projz=1,angle=np.array([30.,60.]),cline=15,n_jobs=-1):
'''
Plot pole figure for c-axis (0001)
:param peigen: Plot the eigenvalues and eigenvectors on the pole figure (default = False)
:type peigen: bool
:param grainlist: give the list of the grainId you want to plot
:type grainlist: list
:param nbp: number of pixel plotted
:type nbp: int
:param contourf: Do you want to add contouring to your pole figure ? (Default : False)
:type contourf: bool
:param cm2: colorbar (default : cm.viridis)
:type cm2: cm
:param bw: bandwidth to compute kernel density (default : 0.1) bw=0 mean find the best fit between 0.01 and 1
:type bw: float
:param projz: 0 or 1. It choose the type of projection. 0 (1) means projection in the plane z=0 (1).
:type projz: int
:param angle: plot circle for this angle value (default : np.array([30.,60.])) 0 if you don't want inner circle.
:type angle: np.array
:param cline: Number of line in contourf (default 15) Used only when contourf=True.
:type cline: int
:param n_jobs: number of job in parellel (CPU). Only use when bw=0 (best fit) (default : -1 mean all processor)
:type n_jobs: int
:return: pole figure image
:rtype: matplotlib figure
:return: eigenvalue
:rtype: float
:Exemple:
>>> eigenvalue = data.plotpdf(peigen=True)
'''
if grainlist!=[]:
azi=np.array([np.nan])
col=np.array([np.nan])
for ig in grainlist:
idx=np.where(self.grains.field==ig)
col=np.concatenate([col,self.phi.field[idx[0],idx[1]]])
azi=np.concatenate([azi,np.mod(self.phi1.field[idx[0],idx[1]]-math.pi/2,2*math.pi)])
else:
# compute azimuth and colatitude
azi=np.mod(self.phi1.field.flatten()-math.pi/2,2*math.pi)
col=self.phi.field.flatten()
# remove nan value
azi=azi[~np.isnan(azi)]
col=col[~np.isnan(col)]
# compute [xc,yc,zc] the coordinate of the c-axis
xc = np.multiply(np.cos(azi),np.sin(col))
yc = np.multiply(np.sin(azi),np.sin(col))
zc = np.cos(col)
v=vec3d.setvector3d(np.transpose(np.array([xc,yc,zc])))
v.stereoplot(nbpoints=nbp,contourf=contourf,bw=bw,cm=cm2,angle=angle,plotOT=peigen,projz=projz,cline=cline,n_jobs=n_jobs)
plt.text(-1.4, 1.4, r'[0001]')
eigvalue,eigvector=v.OrientationTensor2nd()
return eigvalue
#####################################################################
##########################Exportation################################
#####################################################################
def craft(self,nameId):
'''
Create the inputs for craft
:param nameId: name of the prefixe used for craft files
:type nameId: str
:return: create file : nameId_micro.vtk, nameId.phase, nameId.in, nameId.load, nameId.output
:Exemple: >>> data.craft('manip01')
.. note:: nameId.load, nameId.output need to be rewrite to get a correct loading and the output wanted
.. note:: nameId.in need to be adapt depending of your folder structure used for craft
.. note:: NaN orientation value are removed by the closest orientation
'''
##############################################
# remove the grain boundary (remove NaN value)
##############################################
# find where are the NaN Value corresponding to the grain boundary
idx=np.where(np.isnan(self.grains.field))
# while NaN value are in the microstructure we replace by an other value ...
while np.size(idx)>0:
# for all the pixel NaN
for i in list(range(np.shape(idx)[1])):
# if the pixel is at the bottom line of the sample, we choose the pixel one line upper ...
if idx[0][i]==0:
k=idx[0][i]+1
#... else we choose the pixel one line higher.
else:
k=idx[0][i]-1
# if the pixel is at the left side of the sample, we choose the pixel at its right ...
if idx[1][i]==0:
kk=idx[1][i]+1
# else we choose the pixel at its left.
else:
kk=idx[1][i]-1
# Replace the value by the value of the neighbor select just before
self.phi.field[idx[0][i], idx[1][i]]= self.phi.field[k, kk]
self.phi1.field[idx[0][i], idx[1][i]]= self.phi1.field[k, kk]
self.grains.field[idx[0][i], idx[1][i]]= self.grains.field[k, kk]
# re-evaluate if there is sill NaN value inside the microstructure
idx=np.where(np.isnan(self.grains.field))# re-evaluate the NaN
# find the value of the orientation for each phase
phi1=[]
phi=[]
phi2=[]
for i in list(range(np.max(np.int32(self.grains.field)+1))):
idx=np.where(np.int32(self.grains.field)==i)
if np.size(idx)!=0:
phi1.append(self.phi1.field[idx[0][0]][idx[1][0]])
phi.append(self.phi.field[idx[0][0]][idx[1][0]])
phi2.append(random.random()*2*math.pi)
else:
phi1.append(0)
phi.append(0)
phi2.append(0)
if np.isnan(phi1[-1]):
phi1[-1]=0
phi[-1]=0
phi2[-1]=0
################################
# Write the microstructure input
################################
# size of the map
ss=np.shape(self.grains.field)
# open micro.vtk file
micro_out=open(nameId+'_micro.vtk','w')
# write the header of the file
micro_out.write('# vtk DataFile Version 3.0 ' + str(datetime.date.today()) + '\n')
micro_out.write('craft output \n')
micro_out.write('ASCII \n')
micro_out.write('DATASET STRUCTURED_POINTS \n')
micro_out.write('DIMENSIONS ' + str(ss[1]) + ' ' + str(ss[0]) + ' 1\n')
micro_out.write('ORIGIN 0.000000 0.000000 0.000000 \n')
micro_out.write('SPACING ' + str(self.grains.res) + ' ' + str(self.grains.res) + ' 1.000000 \n')
micro_out.write('POINT_DATA ' + str(ss[0]*ss[1]) + '\n')
micro_out.write('SCALARS scalars float \n')
micro_out.write('LOOKUP_TABLE default \n')
for i in list(range(ss[0]))[::-1]:
for j in list(range(ss[1])):
micro_out.write(str(int(self.grains.field[i][j]))+' ')
micro_out.write('\n')
micro_out.close()
################################
##### Write the phase input ####
################################
phase_out=open(nameId+'.phase','w')
phase_out.write('#------------------------------------------------------------\n')
phase_out.write('# Date ' + str(datetime.date.today()) + ' Manip: ' + nameId + '\n')
phase_out.write('#------------------------------------------------------------\n')
phase_out.write('# This file give for each phase \n# *the matetial \n# *its orientation (3 euler angles)\n')
phase_out.write('#\n#------------------------------------------------------------\n')
phase_out.write('# phase material phi1 Phi phi2\n')
phase_out.write('#------------------------------------------------------------\n')
for i in list(range(np.size(phi))):
#if 1-np.isnan(phi[i]):
phase_out.write(str(i) + ' 0 ' + str(phi1[i]) + ' ' + str(phi[i]) + ' ' + str(phi2[i]) + '\n');
phase_out.close()
################################
# Write an exemple of load file##
################################
out_load=open(nameId + '.load','w');
out_load.write('#------------------------------------------------------------\n')
out_load.write('# Date ' + str(datetime.date.today()) + ' Manip: ' + nameId + '\n')
out_load.write('#------------------------------------------------------------\n')
out_load.write('# choix du type de chargement \n')
out_load.write('# direction contrainte imposée: S \n')
out_load.write('# contrainte imposée: C \n')
out_load.write('# déformation imposée: D \n')
out_load.write('C\n')
out_load.write('#------------------------------------------------------------\n')
out_load.write('# nb de pas temps direction facteur\n')
out_load.write('# 11 22 33 12 13 23\n')
out_load.write(' 5. 0 1 0 0 0 0 -0.5\n')
out_load.write('5. 100. 0 1 0 0 0 0 -0.5\n')
out_load.write('#\n')
out_load.write('#------------------------------------------------------------\n')
out_load.close()
###################################
# Write an exemple of output file #
###################################
out_output=open(nameId + '.output','w')
out_output.write('#------------------------------------------------------------\n')
out_output.write('# Date ' + str(datetime.date.today()) + ' Manip: ' + nameId + '\n')
out_output.write('#------------------------------------------------------------\n')
out_output.write('equivalent stress image = yes 10,60,100\n')
out_output.write('equivalent strain image = yes 10,60,100\n')
out_output.write('#\n')
out_output.write('stress image = yes 10,60,100\n')
out_output.write('strain image = yes 10,60,100\n')
out_output.write('#\n')
out_output.write('backstress image = yes 10,60,100\n')
out_output.write('#\n')
out_output.write('strain moment = yes 5:100\n')
out_output.write('stress moment = yes 5:100\n')
out_output.write('im_format=vtk\n')
out_output.close()
#####################################
## Write the input file for craft####
#####################################
out_in=open(nameId + '.in','w');
out_in.write('#------------------------------------------------------------\n')
out_in.write('# Date ' + str(datetime.date.today()) + ' Manip: ' + nameId + '\n')
out_in.write('#------------------------------------------------------------\n')
out_in.write('#\n')
out_in.write('#\n')
out_in.write('#------------------------------------------------------------\n')
out_in.write('# name of the file of the image of the microstructure\n')
out_in.write('microstructure=../'+ nameId+'_micro.vtk\n')
out_in.write('#\n')
out_in.write('#------------------------------------------------------------\n')
out_in.write('# name of the file of the description of phases\n')
out_in.write('phases=../'+nameId+'.phase\n')
out_in.write('#\n')
out_in.write('#------------------------------------------------------------\n')
out_in.write('# name of the file describing the materials the phases are made of:\n')
out_in.write('materials=../../../../Ice_Constitutive_Law/glace3_oc2_5mai2011.mat\n')
out_in.write('#\n')
out_in.write('#------------------------------------------------------------\n')
out_in.write('# file of the loading conditions:\n')
out_in.write('loading=../'+nameId + '.load\n')
out_in.write('#\n')
out_in.write('#------------------------------------------------------------\n')
out_in.write('# file telling the outputs one wants to obtain:\n')
out_in.write('output=../' +nameId + '.output\n')
out_in.write('#\n')
out_in.write('#------------------------------------------------------------\n')
out_in.write('# The parameter C0 has to be set by craft:\n')
out_in.write('C0=auto\n')
out_in.write('#\n')
out_in.write('#------------------------------------------------------------\n')
out_in.write('# # required precision for equilibrium and for loading conditions:\n')
out_in.write('precision=1.e-4, 1.e-4\n')
out_in.write('#------------------------------------------------------------\n')
out_in.close()
#-----------------------------------------------------------------------------------------------
def mesh(self,name,resGB=1,resInG=5,DistMin=5):
'''
Create mesh in vtk format
resInG _______
/
/
/ |
/
resGB ________/ |
DistMin
:param name: output file name without extension
:type name: str
:param resGB: resolution on the Grains Boundaries (in pixel)
:type resGB: float
:param resInG: resolution within the Grains (in pixel)
:type resInG: float
:param DistMin: starting distance for the transition between resGB and resInG
:type LcMin: float
'''
self.mean_grain()
nbG=np.int(np.nanmax(self.grains.field))
ori_vector=np.zeros([nbG+1,3])
for i in list(range(nbG+1)):
id=np.where(self.grains.field==i)
if len(id[0])>0:
phi1=self.phi1.field[id[0][0],id[1][0]]
phi=self.phi.field[id[0][0],id[1][0]]
ori_vector[i,0]=math.sin(phi)*math.cos(phi1-math.pi/2)
ori_vector[i,1]=math.sin(phi)*math.sin(phi1-math.pi/2)
ori_vector[i,2]=math.cos(phi)
ss=np.shape(self.grains.field)
res=self.grains.res
#Extract grainId map
grainId=self.grains.field
#remove the 0 value in the grainId numpy. To do so it is dilating each grain once.
#print('Building grainId map')
for i in list(range(np.int(np.nanmax(grainId)))):
mask=grainId==i+1
mask=skimage.morphology.dilation(mask)
grainId[mask]=i+1
grainId=np.pad(grainId,[(1, 1), (1, 1)],mode='constant')
# Extract contours of each grains
contour=[]
gId_list=[]
erode_list=[]
erode_Id=[]
for i in list(range(np.int(np.nanmax(grainId)))):
gi=grainId==i+1
if np.sum(gi)!=0:
pp=skimage.measure.find_contours(gi,level=0.5,fully_connected='high')
for j in list(range(len(pp))):
pp2=np.zeros(pp[j].shape)
pp2[:,0]=pp[j][:,1]
pp2[:,1]=ss[0]-pp[j][:,0]
contour.append(pp2)
gId_list.append(i+1)
## detect contour for inner mesh
for j in list(range(DistMin)):
gi=skimage.morphology.erosion(gi)
if np.sum(gi)!=0:
pp=skimage.measure.find_contours(gi,level=0.5,fully_connected='high')
for j in list(range(len(pp))):
pp2=np.zeros(pp[j].shape)
pp2[:,0]=pp[j][:,1]
pp2[:,1]=ss[0]-pp[j][:,0]
erode_list.append(pp2*res)
erode_Id.append(i+1)
# Find xmin, ymin, xmax, ymax, because of the padding xmin and ymin are not equal to 0
ss=grainId.shape
xmin=np.min(contour[0][:,0])
ymin=np.min(contour[0][:,1])
xmax=np.max(contour[0][:,0])
ymax=np.max(contour[0][:,1])
for i in list(range(len(contour))):
if xmin>np.min(contour[i][:,0]):
xmin=np.min(contour[i][:,0])
if xmax<np.max(contour[i][:,0]):
xmax=np.max(contour[i][:,0])
if ymin>np.min(contour[i][:,1]):
ymin=np.min(contour[i][:,1])
if ymax<np.max(contour[i][:,1]):
ymax=np.max(contour[i][:,1])
# move the the microstructure to have a starting point at (0,0) This is needed for easier assignement of the grainId.
xmax=-xmin+xmax
ymax=-ymin+ymax
xminI=xmin
yminI=ymin
xmin=0
ymin=0
polyG=[]
Gcentroid=[]
for i in list(range(len(contour))):
contour[i][:,0]=(contour[i][:,0]-xminI)*res
contour[i][:,1]=(contour[i][:,1]-yminI)*res
polyG.append(shapely.geometry.Polygon(contour[i]))#.simplify(0))
Gcentroid.append(polyG[i].centroid)
# to remove hole in polygon
multi_polygon = shapely.geometry.MultiPolygon(polyG)
square=multi_polygon.convex_hull
x,y=square.exterior.xy
Cxmin=np.min(x)
Cxmax=np.max(x)
Cymin=np.min(y)
Cymax=np.max(y)
square=shapely.geometry.Polygon([[Cxmin,Cymin],[Cxmin,Cymax],[Cxmax,Cymax],[Cxmax,Cymin]])
missing=square-multi_polygon
allpolyG=[]
for ipoly in polyG:
allpolyG.append(ipoly)
for ipoly in missing:
allpolyG.append(ipoly)
allPoints=[]
GB=[]
for i in tqdm(range(len(allpolyG))):
gi=[]
xG,yG=allpolyG[i].exterior.xy
for j in list(range(len(xG))):
x=xG[j]
y=yG[j]
pos=np.array([x,y]) # Position of the point in pixel
gi.append(pos)
GB.append(gi)
Pin=0
Pout=0
with pygmsh.geo.Geometry() as geom:
geofile = []
allSurface=[]
# add all line to geom
for i in list(range(len(GB))):
# add polygon
evaltxt='geofile.append(geom.add_polygon(['
for j in list(range(len(GB[i])-2)):
evaltxt=evaltxt+'['+str(GB[i][j][0])+','+str(GB[i][j][1])+'],'
evaltxt=evaltxt+'['+str(GB[i][j+1][0])+','+str(GB[i][j+1][1])+']],mesh_size=resGB*res))'
eval(evaltxt)
allSurface.append(len(geofile)-1)
if (i+1) in erode_Id:
id=np.where(np.array(erode_Id)==i+1)[0]
for k in id:
evaltxt='geofile.append(geom.add_polygon(['
for j in list(range(len(erode_list[k])-1)):
if j%resInG==0:
geofile.append(geom.add_point([erode_list[k][j][0],erode_list[k][j][1]],resInG*res))
p1=shapely.geometry.Point(np.array([erode_list[k][j][0],erode_list[k][j][1]]))
for ik in allSurface:
liscor=[]
for cor in geofile[ik].points:
liscor.append(cor.x)
ggg=shapely.geometry.Polygon(liscor)
if ggg.contains(p1):
geom.in_surface(geofile[-1], geofile[ik].surface)
break
#add physical line
p0 = geom.add_point([Cxmin, Cymin, 0], mesh_size=resGB)
p1 = geom.add_point([Cxmin, Cymax, 0], mesh_size=resGB)
p2 = geom.add_point([Cxmax, Cymax, 0], mesh_size=resGB)
p3 = geom.add_point([Cxmax, Cymin, 0], mesh_size=resGB)
l0 = geom.add_line(p0, p1)
l1 = geom.add_line(p1, p2)
l2 = geom.add_line(p2, p3)
l3 = geom.add_line(p3, p0)
geofile.append(geom.add_physical(l1,label='Top'))
geofile.append(geom.add_physical(l3,label='Bottom'))
geofile.append(geom.add_physical(l0,label='Left'))
geofile.append(geom.add_physical(l2,label='Right'))
print('geo done')
mesh = geom.generate_mesh()
print('mesh done')
#################################
mesh.write(name+'.vtk')
# Use vtk to add grainId value
reader = vtk.vtkUnstructuredGridReader()
reader.SetFileName(name+'.vtk')
reader.Update()
polydata = reader.GetOutput()
# compute grainId
mesh_grains=vtk.vtkIntArray()
mesh_grains.SetNumberOfComponents(0)
mesh_grains.SetName("GrainsId")
# compute orientation
ori=vtk.vtkDoubleArray()
ori.SetNumberOfComponents(3)
ori.SetName("Orientation")
kkk=0
while np.sum(grainId==0)!=0:
for i in list(range(np.int(np.nanmax(grainId)))):
mask=grainId==i+1
mask=skimage.morphology.dilation(mask)
grainId[mask]=i+1
kkk+=1
for i in tqdm(range(polydata.GetNumberOfCells())):
if polydata.GetCellType(i)==5:
tri=polydata.GetCell(i)
center=np.zeros(3)
tri.TriangleCenter(tri.GetPoints().GetPoint(0),tri.GetPoints().GetPoint(1),tri.GetPoints().GetPoint(2),center)
p1=shapely.geometry.Point(center)
id_g=-1
for j in list(range(len(polyG))):
if polyG[j].contains(p1):
id_g=gId_list[j]
if id_g==-1:
id_g=np.int(grainId[np.int(ss[0]-center[1]/res),np.int(center[0]/res)])
if id_g==0:
print('find 0')
mesh_grains.InsertNextValue(id_g)
ori.InsertNextValue(ori_vector[id_g,0])
ori.InsertNextValue(ori_vector[id_g,1])
ori.InsertNextValue(ori_vector[id_g,2])
if np.isnan(ori_vector[id_g,0]):
print('Warning nan value', id_g)
else:
mesh_grains.InsertNextValue(0)
ori.InsertNextValue(0)
ori.InsertNextValue(0)
ori.InsertNextValue(0)
polydata.GetCellData().SetScalars(mesh_grains)
polydata.GetCellData().AddArray(ori)
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName(name+'.vtu')
writer.SetInputData(polydata)
writer.Write()
return
################################
def new_ori_TJ(self,mask,mean=True):
'''
Extract orientation to compare with CraFT simulation
'''
ng=(self.grains*mask).field
res=[]
con=True
while con:
gID=self.grains.mask_build()
print('triple junction label')
x=input()
ng=(self.grains*gID).field
ngmax=np.nanmax(ng)
for i in list(range(np.int32(ngmax))):
id=np.where(self.grains.field==i)
if len(id[0])>0:
if mean:
pp=np.array([[id[0][0],id[1][0]]])
phi1,pos=self.phi1.extract_data(pos=pp)
phi,pos=self.phi.extract_data(pos=pp)
if ~np.isnan(phi1):
res.append([i,phi1,phi,float(x)])
else:
for j in list(range(len(id[0]))):
pp=np.array([[id[0][j],id[1][j]]])
phi1,pos=self.phi1.extract_data(pos=pp)
phi,pos=self.phi.extract_data(pos=pp)
if ~np.isnan(phi1):
res.append([i,phi1,phi,float(x)])
print('continue ? 0 no, 1 yes')
con=input()
return res
##########################################################################
###########################Interactive function###########################
##########################################################################
def misorientation_profile(self, ploton=True, plot='all',orientation=False):
'''
Compute the misorientation profile along a line
:param plot: option for to misorientation profile plot, 'all' (default), 'mis2o', 'mis2p'
:type plot: str
:param orientation: option for the color code used for the map, False (default) use phi1 and True use colorwheel (take time)
:type orientation: bool
:param pos: coordinate of the profile line - 0 (default) click on the map to select the 2 points
:type pos: array
:return: x - coordinate along the line
:rtype: array, float
:return: mis2o,mis2p - misorientation angle to the origin, and misorientation angle to the previous pixel
:rtype: array, float
:return: h - matplotlib image with line draw on the orientation map, subplot with mis2o and/or mis2p profile
:return: pos - coordinate of the profile line
:Exemple:
>>> [x,mis2o,mis2p,h,pos]=data.misorientation_profile()
>>> rpos = pos[::-1]
>>> [x,mis2o,mis2p,hr,pos]=data.misorientation_profile(pos=rpos)
>>> plt.show()
'''
# plot the data with phi1 value
h=plt.figure()
self.phi1.plot()
# select initial and final points for the line
print('Select initial and final points for the line :')
pos=np.array(pylab.ginput(2))
plt.close(h)
x, mis2o, mis2p=self.misorientation_extractor(pos)
if ploton:
plt.subplot(1,2,1)
if orientation:
self.plot()
else:
self.phi1.plot()
plt.plot(pos[:,0],pos[:,1],'-k')
plt.subplot(122)
if plot in ['all','mis2o']:
plt.plot(x,mis2o,'-b',label='mis2o')
if plot in ['all','mis2p']:
plt.plot(x,mis2p,'-k',label='mis2p')
plt.legend()
plt.grid()
plt.xlabel('Distance')
plt.ylabel('Angle')
return x, mis2o, mis2p, pos
def grain_ori(self):
'''
Give the grain orientation output
'''
plt.imshow(self.grains.field,aspect='equal')
plt.waitforbuttonpress()
print('midle mouse clic when you are finish')
#grain wanted for the plot
id=np.int32(np.array(pylab.ginput(0)))
plt.close('all')
phi=self.phi.field[id[:,1],id[:,0]]
phi1=self.phi1.field[id[:,1],id[:,0]]
return [phi1,phi]
def crop(self,xmin=0,xmax=0,ymin=0,ymax=0,new=False):
'''
Crop function to select the area of interest
:return: crop aita object
:rtype: aita
:Exemple: >>> data.crop()
.. note:: clic on the top left corner and bottom right corner to select the area
'''
if (xmin+xmax+ymin+ymax)==0:
print('Warning : if you are using jupyter notebook with %matplotlib inline option, you should add %matplotlib qt to have a pop up figure before this function. You can add %matplotlib inline after if you want to come back to the initial configuration')
# plot the data
h=self.phi.plot()
# select top left and bottom right corner for crop
print('Select top left and bottom right corner for crop :')
x=np.array(pylab.ginput(2))/self.phi.res
plt.close("all")
# create x and Y coordinate
xx=[x[0][0],x[1][0]]
yy=[x[0][1],x[1][1]]
# size of the initial map
ss=np.shape(self.phi.field)
# find xmin xmax ymin and ymax
xmin=int(np.ceil(np.min(xx)))
xmax=int(np.floor(np.max(xx)))
ymin=int(ss[0]-np.ceil(np.max(yy)))
ymax=int(ss[0]-np.floor(np.min(yy)))
if new:
res=self.phi1.res
# crop the map
phi=self.phi.field[ymin:ymax, xmin:xmax]
phi1=self.phi1.field[ymin:ymax, xmin:xmax]
qua=self.qua.field[ymin:ymax, xmin:xmax]
micro=self.micro.field[ymin:ymax, xmin:xmax]
new_data=aita(phi1,phi,qua,micro,resolution=res)
pos=np.array([xmin,xmax,ymin,ymax])
print('Cropped')
return pos,new_data
else:
# crop the map
self.phi.field=self.phi.field[ymin:ymax, xmin:xmax]
self.phi1.field=self.phi1.field[ymin:ymax, xmin:xmax]
self.qua.field=self.qua.field[ymin:ymax, xmin:xmax]
self.micro.field=self.micro.field[ymin:ymax, xmin:xmax]
self.grains=self.micro.grain_label()
# replace grains boundary with NaN number
self.grains.field=np.array(self.grains.field,float)
idx=np.where(self.micro.field==1)
self.grains.field[idx]=np.nan
print('Cropped')
return np.array([xmin,xmax,ymin,ymax])
#-------------------------------------------------------------------------
def grelon(self,posc=np.array([0,0])):
'''
Compute the angle between the directions defined by the "center" and the pixel with the c-axis direction
:return: angle (degree)
:rtype: im2d.image2d
'''
if (posc==0).all():
# Find the center
self.phi1.plot()
print('Click on the center of the hailstone')
posc=np.array(plt.ginput(1)[0])
plt.close('all')
ss=np.shape(self.phi1.field)
# vecteur C
xc=np.cos(self.phi1.field-math.pi/2)*np.sin(self.phi.field)
yc=np.sin(self.phi1.field-math.pi/2)*np.sin(self.phi.field)
nn=(xc**2+yc**2.)**.5
xc=xc/nn
yc=yc/nn
# build x y
xi=np.zeros(ss)
yi=np.transpose(np.zeros(ss))
xl=np.arange(ss[0])
yl=np.arange(ss[1])
xi[:,:]=yl
yi[:,:]=xl
yi=np.transpose(yi)
# center and norm
xcen=np.int32(posc[0]/self.phi1.res)
ycen=(ss[0]-np.int32(posc[1]/self.phi1.res))
xv=xi-xcen
yv=yi-ycen
nn=(xv**2.+yv**2.)**0.5
xv=xv/nn
yv=yv/nn
#
#plt.figure()
#plt.imshow(nn)
#plt.figure()
#plt.quiver(xi[xcen-50:xcen-50],yi[ycen-50:ycen-50],xv[xcen-50:xcen-50],yv[ycen-50:ycen-50],scale=1000)
#
acos=xv*xc+yv*yc
angle=np.arccos(acos)*180./math.pi
id=np.where(angle>90)
angle[id]=180-angle[id]
return im2d.image2d(angle,self.phi1.res)
#-------------------------------------------------------------------------------------
def addgrain(self,ori=0):
'''
add a grain inside the microstructure
:param ori: orienation of the new grain [phi1 phi] (default random value)
:type ori: array, float
:return: new_micro, object with the new grain include
:rtype: aita
:Exemple:
>>> data.addgrain()
'''
# select the contour of the grains
h=self.grains.plot()
# click on the submit of the new grain
plt.waitforbuttonpress()
print('click on the submit of the new grain :')
x=np.array(pylab.ginput(3))/self.grains.res
plt.close('all')
# select a subarea contening the triangle
minx=np.int(np.fix(np.min(x[:,0])))
maxx=np.int(np.ceil(np.max(x[:,0])))
miny=np.int(np.fix(np.min(x[:,1])))
maxy=np.int(np.ceil(np.max(x[:,1])))
# write all point inside this area
gpoint=[]
for i in list(range(minx,maxx)):
for j in list(range(miny,maxy)):
gpoint.append([i,j])
# test if the point is inside the triangle
gIn=[]
for i in list(range(len(gpoint))):
gIn.append(isInsideTriangle(gpoint[i],x[0,:],x[1,:],x[2,:]))
gpointIn=np.array(gpoint)[np.array(gIn)]
#transform in xIn and yIn, the coordinate of the map
xIn=np.shape(self.grains.field)[0]-gpointIn[:,1]
yIn=gpointIn[:,0]
# add one grains
self.grains.field[xIn,yIn]=np.nanmax(self.grains.field)+1
# add the orientation of the grains
if ori==0:
self.phi1.field[xIn,yIn]=random.random()*2*math.pi
self.phi.field[xIn,yIn]=random.random()*math.pi/2
else:
self.phi1.field[xIn,yIn]=ori[0]
self.phi.field[xIn,yIn]=ori[1]
return
##########################################################################
####################Interactive function for notebook#####################
##########################################################################
def interactive_grelon(self):
'''
Interactuve grelon function for jupyter notebook
'''
f,a = plt.subplots()
self.phi1.plot()
pos = []
def onclick(event):
pos.append([event.xdata,event.ydata])
plt.plot(pos[-1][0],pos[-1][1],'+k')
f.canvas.mpl_connect('button_press_event', onclick)
buttonExport = widgets.Button(description='Export')
def export(_):
res=self.grelon(posc=np.array(pos[-1]))
export.map=res
export.center=np.array(pos[-1])
return export
buttonExport.on_click(export)
# displaying button and its output together
display(buttonExport)
return export
#--------------------------------------------------------------------------
def interactive_misorientation_profile(self):
'''
Interactive misorientation profile for jupyter notebook
'''
f,a = plt.subplots()
self.phi1.plot()
pos = []
def onclick(event):
pos.append([event.xdata,event.ydata])
f.canvas.mpl_connect('button_press_event', onclick)
buttonShow = widgets.Button(description='Show line')
buttonExtract = widgets.Button(description='Extract profile')
def draw_line(_):
pos_mis=np.array(pos[-2::])
plt.plot(pos_mis[:,0],pos_mis[:,1],'-k')
def extract_data(_):
pos_mis=np.array(pos[-2::])
x,mis2o,mis2p=self.misorientation_extractor(pos_mis)
extract_data.x=x
extract_data.mis2o=mis2o
extract_data.mis2p=mis2p
extract_data.pos=pos_mis
return extract_data
# linking button and function together using a button's method
buttonShow.on_click(draw_line)
buttonExtract.on_click(extract_data)
# displaying button and its output together
display(buttonShow,buttonExtract)
return extract_data
#--------------------------------------------------------------------------
def interactive_crop(self,new=False):
'''
out=data_aita.interactive_crop()
This function can be use to crop within a jupyter notebook
It will crop the data and export the value of the crop in out.pos
:param new: create a new data variable (default:False; erase input data)
:type new: bool
.. note:: If you use new=True (out=interactive_crop(new=true)) you can find the cropped data in out.crop_data
.. note:: The position of the rectangle used for the cropping is in out.pos
'''
def onselect(eclick, erelease):
"eclick and erelease are matplotlib events at press and release."
print('startposition: (%f, %f)' % (eclick.xdata, eclick.ydata))
print('endposition : (%f, %f)' % (erelease.xdata, erelease.ydata))
print('used button : ', eclick.button)
def toggle_selector(event):
print('Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print('RectangleSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print('RectangleSelector activated.')
toggle_selector.RS.set_active(True)
print('1. click and drag the mouse on the figure to selecte the area')
print('2. you can draw the rectangle using the button "Draw area"')
print('3. if you are unhappy with the selection restart to 1.')
print('4. if you are happy with the selection click on "Export crop" (only the last rectangle is taken into account)')
fig,ax=plt.subplots()
self.phi1.plot()
toggle_selector.RS = matplotlib.widgets.RectangleSelector(ax, onselect, drawtype='box')
fig.canvas.mpl_connect('key_press_event', toggle_selector)
buttonCrop = widgets.Button(description='Export crop')
buttonDraw = widgets.Button(description='Draw area')
ss=np.shape(self.phi1.field)
def draw_area(_):
x=list(toggle_selector.RS.corners[0])
x.append(x[0])
y=list(toggle_selector.RS.corners[1])
y.append(y[0])
xmin=int(np.ceil(np.min(x)))
xmax=int(np.floor(np.max(x)))
ymin=int(ss[0]-np.ceil(np.max(y)))
ymax=int(ss[0]-np.floor(np.min(y)))
plt.plot(x,y,'-k')
def get_data(_):
# what happens when we press the button
x=list(toggle_selector.RS.corners[0])
x.append(x[0])
x=np.array(x)/self.phi1.res
y=list(toggle_selector.RS.corners[1])
y.append(y[0])
y=np.array(y)/self.phi1.res
xmin=int(np.ceil(np.min(x)))
xmax=int(np.floor(np.max(x)))
ymin=int(ss[0]-np.ceil(np.max(y)))
ymax=int(ss[0]-np.floor(np.min(y)))
plt.plot(x*self.phi1.res,y*self.phi1.res,'-b')
out=self.crop(xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax,new=new)
if new:
get_data.pos=out[0]
get_data.crop_data=out[1]
else:
get_data.pos=out
return get_data
# linking button and function together using a button's method
buttonDraw.on_click(draw_area)
buttonCrop.on_click(get_data)
# displaying button and its output together
display(buttonDraw,buttonCrop)
return get_data
#--------------------------------------------------------------------------
def interactive_segmentation(self,val_scharr_init=1.5,use_scharr_init=True,val_canny_init=1.5,use_canny_init=True,val_qua_init=60,use_qua_init=False,inc_border_init=False,mask=False):
'''
This function allow you to performed grain segmentation on aita data.
The intitial value of the segmenation function can be set-up initially
:param val_scharr_init: scharr filter usually between 0 and 10 (default : 1.5)
:type val_scharr_init: float
:param use_scharr_init: use scharr filter
:type use_scharr_init: bool
:param val_canny_init: canny filter usually between 0 and 10 (default : 1.5)
:type val_canny_init: float
:param use_canny_init: use canny filter
:type use_canny_init: bool
:param val_qua_init: quality filter usually between 0 and 100 (default : 60)
:type val_qua_init: int
:param use_qua_init: use quality filter
:type use_qua_init: bool
:param inc_border_init: add image border to grain boundaries
:type inc_border_init: bool
.. note:: on data with holes such as snow, using quality filter is not recommended
'''
#~~~~~~~~~~~~~~~~~~ segmentation function~~~~~~~~~~~~~~~~
def seg_scharr(field):
## Commented bit are previous settings which just use raw Phi1
## define Scharr filter
scharr = np.array([[-3-3j,0-10j,3-3j],[-10+0j,0+0j,10+0j],[-3+3j,0+10j,3+3j]])
## run edge detection.
edge_sin = np.abs(np.real(scipy.signal.convolve2d(np.sin(field*2)+1,scharr,boundary='symm',mode='same')))
return edge_sin
#~~~~~~~~~~~~~~~~~~pruning function~~~~~~~~~~~~~~~~~~~~~~
def endPoints(skel):
endpoint1=np.array([[0, 0, 0],
[0, 1, 0],
[2, 1, 2]])
endpoint2=np.array([[0, 0, 0],
[0, 1, 2],
[0, 2, 1]])
endpoint3=np.array([[0, 0, 2],
[0, 1, 1],
[0, 0, 2]])
endpoint4=np.array([[0, 2, 1],
[0, 1, 2],
[0, 0, 0]])
endpoint5=np.array([[2, 1, 2],
[0, 1, 0],
[0, 0, 0]])
endpoint6=np.array([[1, 2, 0],
[2, 1, 0],
[0, 0, 0]])
endpoint7=np.array([[2, 0, 0],
[1, 1, 0],
[2, 0, 0]])
endpoint8=np.array([[0, 0, 0],
[2, 1, 0],
[1, 2, 0]])
ep1=mh.morph.hitmiss(skel,endpoint1)
ep2=mh.morph.hitmiss(skel,endpoint2)
ep3=mh.morph.hitmiss(skel,endpoint3)
ep4=mh.morph.hitmiss(skel,endpoint4)
ep5=mh.morph.hitmiss(skel,endpoint5)
ep6=mh.morph.hitmiss(skel,endpoint6)
ep7=mh.morph.hitmiss(skel,endpoint7)
ep8=mh.morph.hitmiss(skel,endpoint8)
ep = ep1+ep2+ep3+ep4+ep5+ep6+ep7+ep8
return ep
def pruning(skeleton, size):
'''remove iteratively end points "size"
times from the skeletonget_ipython().__class__.__name__
'''
for i in range(0, size):
endpoints = endPoints(skeleton)
endpoints = np.logical_not(endpoints)
skeleton = np.logical_and(skeleton,endpoints)
return skeleton
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#plot image
pltimg,data_img=self.plot()
pltimg,data_img_semi=self.plot(semi=True)
phi1=self.phi1.field
phi=self.phi.field
qua=self.qua.field
if mask!=False:
id=np.isnan(mask.field)
data_img[id,0]=np.nan
data_img_semi[id,0]=np.nan
data_img[id,1]=np.nan
data_img_semi[id,1]=np.nan
data_img[id,2]=np.nan
data_img_semi[id,2]=np.nan
phi1[id]=np.nan
phi[id]=np.nan
qua[id]=np.nan
def calcGB(val_scharr,use_scharr,val_canny,use_canny,val_qua,use_qua,dilate,CM,CW,inc_border):
micro=[]
IMdata=[]
if CW=='semi color wheel' or CW=='both color wheel':
IMdata.append(data_img_semi)
if CW=='full color wheel' or CW=='both color wheel':
IMdata.append(data_img)
if use_canny:
for im in IMdata:
edges1 = skimage.feature.canny(im[:,:,0],sigma=val_canny)
edges2 = skimage.feature.canny(im[:,:,1],sigma=val_canny)
edges3 = skimage.feature.canny(im[:,:,2],sigma=val_canny)
micro.append((edges1+edges2+edges3)>0.5)
if use_scharr:
seg1=seg_scharr(phi1)
seg2=seg_scharr(phi)
micro.append((seg1+seg2)>val_scharr)
if use_qua:
micro.append(qua<val_qua)
Edge_detect=np.zeros(micro[0].shape)
for m in micro:
Edge_detect+=m/len(micro)
if inc_border:
if mask==False:
Edge_detect[0,:]=1
Edge_detect[-1,:]=1
Edge_detect[:,0]=1
Edge_detect[:,-1]=1
else:
id=np.isnan(mask.field)
idx,idy=np.where(mask.field==1)
xmin=np.min(idx)
xmax=np.max(idx)
ymin=np.min(idy)
ymax=np.max(idy)
Edge_detect[xmin,:]=1
Edge_detect[xmax,:]=1
Edge_detect[:,ymin]=1
Edge_detect[:,ymax]=1
Edge_detect[id]=0
microCL=skimage.morphology.area_closing(Edge_detect)
# skeleton
skeleton = skimage.morphology.skeletonize(microCL,method='lee')
# prunnig
skeleton=pruning(skeleton,100)
# remove dot
mat1=np.array([[-1,-1,-1],[-1,1,-1],[-1,-1,-1]])
skeleton[scipy.signal.convolve2d(skeleton,mat1,mode='same',boundary='fill')==1]=0
#remove small grain
#skeleton2=skeleton
#for i in range(small_grain):
# skeleton2=skimage.morphology.dilation(skeleton2)
# skeleton2=pruning(skeleton2,100)
#TrueMicro=skimage.morphology.skeletonize(skeleton2)
TrueMicro=skeleton
if inc_border:
if mask==False:
TrueMicro[0,:]=1
TrueMicro[-1,:]=1
TrueMicro[:,0]=1
TrueMicro[:,-1]=1
else:
id=np.isnan(mask.field)
idx,idy=np.where(mask.field==1)
xmin=np.min(idx)
xmax=np.max(idx)
ymin=np.min(idy)
ymax=np.max(idy)
TrueMicro[xmin,:]=1
TrueMicro[xmax,:]=1
TrueMicro[:,ymin]=1
TrueMicro[:,ymax]=1
TrueMicro[id]=0
dTrueMicro=TrueMicro
for i in range(dilate):
dTrueMicro=skimage.morphology.dilation(dTrueMicro)
#fig,ax=plt.subplots()
if CM=='semi color wheel':
plt.imshow(data_img_semi)
plt.imshow(dTrueMicro,alpha=dTrueMicro.astype(float),cmap=cm.gray)
elif CM=='full color wheel':
plt.imshow(data_img)
plt.imshow(dTrueMicro,alpha=dTrueMicro.astype(float),cmap=cm.gray)
elif CM=='none':
plt.imshow(dTrueMicro,cmap=cm.gray)
#toggle_selector.RS = matplotlib.widgets.RectangleSelector(ax, onselect, drawtype='box')
#fig.canvas.mpl_connect('key_press_event', toggle_selector)
return TrueMicro
def export_micro(_):
TrueMicro=calcGB(val_scharr.get_interact_value(),use_scharr.get_interact_value(),val_canny.get_interact_value(),use_canny.get_interact_value(),val_qua.get_interact_value(),use_qua.get_interact_value(),dilate.get_interact_value(),CM.get_interact_value(),CW.get_interact_value(),inc_border.get_interact_value())
# create microstructure
self.micro=im2d.micro2d(TrueMicro,self.micro.res)
self.grains=self.micro.grain_label()
# replace grains boundary with NaN number
self.grains.field=np.array(self.grains.field,float)
idx=np.where(self.micro.field==1)
self.grains.field[idx]=np.nan
export_micro.val_scharr=val_scharr.get_interact_value()
export_micro.use_scharr=use_scharr.get_interact_value()
export_micro.val_canny=val_canny.get_interact_value()
export_micro.use_canny=use_canny.get_interact_value()
export_micro.img_canny=CW.get_interact_value()
export_micro.val_quality=val_qua.get_interact_value()
export_micro.use_quality=use_qua.get_interact_value()
export_micro.include_border=inc_border.get_interact_value()
return export_micro
#~~~~~~~~~~~~~~~~~~~~~~~~~ interactive plot~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
val_scharr=widgets.FloatSlider(value=val_scharr_init,min=0,max=10.0,step=0.1,description='Scharr filter:',disabled=False,continuous_update=False,orientation='horizontal',readout=True,readout_format='.1f')
use_scharr=widgets.Checkbox(value=use_scharr_init,description='Use scharr filter',disabled=False)
val_canny=widgets.FloatSlider(value=val_canny_init,min=0,max=10.0,step=0.1,description='Canny filter:',disabled=False,continuous_update=False,orientation='horizontal',readout=True,readout_format='.1f')
use_canny=widgets.Checkbox(value=use_canny_init,description='Use canny filter',disabled=False)
val_qua=widgets.FloatSlider(value=val_qua_init,min=0,max=100,step=1,description='Quatlity filter:',disabled=False,continuous_update=False,orientation='horizontal',readout=True,readout_format='.1f')
use_qua=widgets.Checkbox(value=use_qua_init,description='Use Quality filter',disabled=False)
inc_border=widgets.Checkbox(value=inc_border_init,description='Include border as grain boundaries',disabled=False)
#small_grain=widgets.IntSlider(value=0,min=0,max=5,step=1,description='Remove small grain:',disabled=False,continuous_update=False,orientation='horizontal',readout=True,readout_format='d')
dilate=widgets.IntSlider(value=0,min=0,max=10,step=1,description='Dilate GB:',disabled=False,continuous_update=False,orientation='horizontal',readout=True,readout_format='d')
CM=widgets.Dropdown(value='semi color wheel', options=['semi color wheel', 'full color wheel', 'none'], description='Plot colormap')
CW=widgets.Dropdown(value='semi color wheel', options=['semi color wheel', 'full color wheel', 'both color wheel'], description='Segmentation colormap')
buttonExport = widgets.Button(description='Export AITA')
ui_scharr=widgets.HBox([val_scharr,use_scharr])
ui_canny=widgets.HBox([val_canny,use_canny,CW])
ui_quality=widgets.HBox([val_qua,use_qua])
ui=widgets.VBox([ui_scharr,ui_canny,ui_quality,inc_border,dilate,CM,buttonExport])
out = widgets.interactive_output(calcGB,{'val_scharr': val_scharr,'use_scharr':use_scharr,'val_canny':val_canny,'use_canny':use_canny,'val_qua':val_qua,'use_qua':use_qua,'dilate': dilate,'CM': CM,'CW': CW,'inc_border': inc_border})
display(ui,out)
buttonExport.on_click(export_micro)
return export_micro
#--------------------------------------------------------------------------
def rectangular_mask(self):
'''
out=data_aita.mask()
This function can be use to crop within a jupyter notebook
It will crop the data and export the value of the crop in out.pos
:param new: create a new data variable (default:False; erase input data)
:type new: bool
.. note:: If you use new=True (out=interactive_crop(new=true)) you can find the cropped data in out.crop_data
.. note:: The position of the rectangle used for the cropping is in out.pos
'''
def onselect(eclick, erelease):
"eclick and erelease are matplotlib events at press and release."
print('startposition: (%f, %f)' % (eclick.xdata, eclick.ydata))
print('endposition : (%f, %f)' % (erelease.xdata, erelease.ydata))
print('used button : ', eclick.button)
def toggle_selector(event):
print('Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print('RectangleSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print('RectangleSelector activated.')
toggle_selector.RS.set_active(True)
print('1. click and drag the mouse on the figure to select the area')
print('2. you can draw the rectangle using the button "Draw area"')
print('3. if you are unhappy with the selection restart to 1.')
print('4. if you are happy with the selection click on "Export mask" (only the last rectangle is taken into account)')
fig,ax=plt.subplots()
self.phi1.plot()
toggle_selector.RS = matplotlib.widgets.RectangleSelector(ax, onselect, drawtype='box')
fig.canvas.mpl_connect('key_press_event', toggle_selector)
buttonCrop = widgets.Button(description='Export mask')
buttonDraw = widgets.Button(description='Draw area')
ss=np.shape(self.phi1.field)
def draw_area(_):
x=list(toggle_selector.RS.corners[0])
x.append(x[0])
y=list(toggle_selector.RS.corners[1])
y.append(y[0])
xmin=int(np.ceil(np.min(x)))
xmax=int(np.floor(np.max(x)))
ymin=int(ss[0]-np.ceil(np.max(y)))
ymax=int(ss[0]-np.floor(np.min(y)))
plt.plot(x,y,'-k')
def get_data(_):
# what happens when we press the button
x=list(toggle_selector.RS.corners[0])
x.append(x[0])
x=np.array(x)/self.phi1.res
y=list(toggle_selector.RS.corners[1])
y.append(y[0])
y=np.array(y)/self.phi1.res
xmin=int(np.ceil(np.min(x)))
xmax=int(np.floor(np.max(x)))
ymin=int(ss[0]-np.ceil(np.max(y)))
ymax=int(ss[0]-np.floor(np.min(y)))
# create the mask
out=np.ones(ss)
for i in list(range(ymin)):
out[i,:]=np.nan
for i in list(range(xmin)):
out[:,i]=np.nan
listx=np.linspace(xmax,ss[0]-1,ss[0]-xmax)
listy=np.linspace(ymax,ss[1]-1,ss[1]-ymax)
plt.plot(x*self.phi1.res,y*self.phi1.res,'-b')
for i in listy:
out[np.int32(i),:]=np.nan
for i in listx:
out[:,np.int32(i)]=np.nan
out=im2d.mask2d(out,self.phi1.res)
get_data.mask=out
return get_data
# linking button and function together using a button's method
buttonDraw.on_click(draw_area)
buttonCrop.on_click(get_data)
# displaying button and its output together
display(buttonDraw,buttonCrop)
return get_data
##########################################################################
###################### Function need for aita class #####################
##########################################################################
def cart2pol(x, y):
'''
Convert cartesien coordinate x,y into polar coordinate rho, theta
:param x: x cartesian coordinate
:param y: y cartesian coordinate
:type x: float
:type y: float
:return: rho (radius), theta (angle)
:rtype: float
:Exemple: >>> rho,theta=cart2pol(x,y)
'''
# transform cartesien to polar coordinate
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def lut(nx=512,semi=False,circle=True):
'''
Create a 2D colorwheel
:param nx: number of pixel for the colorwheel
:param circle: do you want create a black circle around
:param semi: do you want a semi LUT
:type nx: int
:type circle: bool
:type semi: bool
:return: lut
:rtype: array of size [nx,nx,3]
:Exemple:
>>> lut2d=lut()
>>> plt.imshow(lut)
>>> plt.show()
'''
x=np.linspace(-math.pi/2, math.pi/2, nx)
y=np.linspace(-math.pi/2, math.pi/2, nx)
xv, yv = np.meshgrid(x, y)
rho,phi=cart2pol(xv, yv)
if semi:
phi=np.mod(phi,np.pi)
h = (phi-np.min(phi))/(np.max(phi)-np.min(phi))
v = rho/np.max(rho)
luthsv = np.ones((nx, nx,3))
luthsv[:,:,0]=h
luthsv[:,:,2]=v
# colorwheel rgb
lutrgb = np.ones((nx, nx,3))
for i in list(range(nx)):
for j in list(range(nx)):
lutrgb[i,j,0],lutrgb[i,j,1],lutrgb[i,j,2]=colorsys.hsv_to_rgb(luthsv[i,j,0],luthsv[i,j,1],luthsv[i,j,2])
# build a circle color bar
if circle:
for i in list(range(nx)):
for j in list(range(nx)):
if ((i-nx/2)**2+(j-nx/2)**2)**0.5>(nx/2):
lutrgb[i,j,0]=0
lutrgb[i,j,1]=0
lutrgb[i,j,2]=0
return lutrgb
def isInsideTriangle(P,p1,p2,p3): #is P inside triangle made by p1,p2,p3?
'''
test if P is inside the triangle define by p1 p2 p3
:param P: point you want test
:param p1: one submit of the triangle
:param p2: one submit of the triangle
:param p3: one submit of the triangle
:type P: array
:type p1: array
:type p2: array
:type p3: array
:return: isIn
:rtype: bool
:Exemple:
>>> isInsideTriangle([0,0],[-1,0],[0,1],[1,0])
>>> isInsideTriangle([0,-0.1],[-1,0],[0,1],[1,0])
'''
x,x1,x2,x3 = P[0],p1[0],p2[0],p3[0]
y,y1,y2,y3 = P[1],p1[1],p2[1],p3[1]
full = abs (x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2))
first = abs (x1 * (y2 - y) + x2 * (y - y1) + x * (y1 - y2))
second = abs (x1 * (y - y3) + x * (y3 - y1) + x3 * (y1 - y))
third = abs (x * (y2 - y3) + x2 * (y3 - y) + x3 * (y - y2))
return abs(first + second + third - full) < .0000001
def euler2azi(phi1,phi):
'''
Convert Euler angle to azimuth and colatitude
:param phi1:
:type phi1: array
:param phi:
:type phi: array
:return: azi
:rtype: array
:return: col
:rtype: array
'''
col=phi
azi=np.mod((phi1-math.pi/2.),2.*math.pi)
return azi,col
| gpl-3.0 |
ThomasMiconi/nupic.research | htmresearch/frameworks/sp_paper/sp_metrics.py | 6 | 32444 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import random
import matplotlib.pyplot as plt
import numpy as np
from nupic.bindings.math import GetNTAReal
# !/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import random
import numpy as np
import pandas as pd
uintType = "uint32"
def getConnectedSyns(sp):
numInputs = sp.getNumInputs()
numColumns = np.prod(sp.getColumnDimensions())
connectedSyns = np.zeros((numColumns, numInputs), dtype=uintType)
for columnIndex in range(numColumns):
sp.getConnectedSynapses(columnIndex, connectedSyns[columnIndex, :])
connectedSyns = connectedSyns.astype('float32')
return connectedSyns
def getMovingBar(startLocation,
direction,
imageSize=(20, 20),
steps=5,
barHalfLength=3,
orientation='horizontal'):
"""
Generate a list of bars
:param startLocation:
(list) start location of the bar center, e.g. (10, 10)
:param direction:
direction of movement, e.g., (1, 0)
:param imageSize:
(list) number of pixels on horizontal and vertical dimension
:param steps:
(int) number of steps
:param barHalfLength:
(int) length of the bar
:param orientation:
(string) "horizontal" or "vertical"
:return:
"""
startLocation = np.array(startLocation)
direction = np.array(direction)
barMovie = []
for step in range(steps):
barCenter = startLocation + step * direction
barMovie.append(getBar(imageSize,
barCenter,
barHalfLength,
orientation))
return barMovie
def getBar(imageSize, barCenter, barHalfLength, orientation='horizontal'):
"""
Generate a single horizontal or vertical bar
:param imageSize
a list of (numPixelX. numPixelY). The number of pixels on horizontal
and vertical dimension, e.g., (20, 20)
:param barCenter:
(list) center of the bar, e.g. (10, 10)
:param barHalfLength
(int) half length of the bar. Full length is 2*barHalfLength +1
:param orientation:
(string) "horizontal" or "vertical"
:return:
"""
(nX, nY) = imageSize
(xLoc, yLoc) = barCenter
bar = np.zeros((nX, nY), dtype=uintType)
if orientation == 'horizontal':
xmin = max(0, (xLoc - barHalfLength))
xmax = min(nX - 1, (xLoc + barHalfLength + 1))
bar[xmin:xmax, yLoc] = 1
elif orientation == 'vertical':
ymin = max(0, (yLoc - barHalfLength))
ymax = min(nY - 1, (yLoc + barHalfLength + 1))
bar[xLoc, ymin:ymax] = 1
else:
raise RuntimeError("orientation has to be horizontal or vertical")
return bar
def getCross(nX, nY, barHalfLength):
cross = np.zeros((nX, nY), dtype=uintType)
xLoc = np.random.randint(barHalfLength, nX - barHalfLength)
yLoc = np.random.randint(barHalfLength, nY - barHalfLength)
cross[(xLoc - barHalfLength):(xLoc + barHalfLength + 1), yLoc] = 1
cross[xLoc, (yLoc - barHalfLength):(yLoc + barHalfLength + 1)] = 1
return cross
def generateRandomSDR(numSDR, numDims, numActiveInputBits, seed=42):
"""
Generate a set of random SDR's
@param numSDR:
@param nDim:
@param numActiveInputBits:
"""
randomSDRs = np.zeros((numSDR, numDims), dtype=uintType)
indices = np.array(range(numDims))
np.random.seed(seed)
for i in range(numSDR):
randomIndices = np.random.permutation(indices)
activeBits = randomIndices[:numActiveInputBits]
randomSDRs[i, activeBits] = 1
return randomSDRs
def getRandomBar(imageSize, barHalfLength, orientation='horizontal'):
(nX, nY) = imageSize
if orientation == 'horizontal':
xLoc = np.random.randint(barHalfLength, nX - barHalfLength)
yLoc = np.random.randint(0, nY)
bar = getBar(imageSize, (xLoc, yLoc), barHalfLength, orientation)
elif orientation == 'vertical':
xLoc = np.random.randint(0, nX)
yLoc = np.random.randint(barHalfLength, nY - barHalfLength)
bar = getBar(imageSize, (xLoc, yLoc), barHalfLength, orientation)
else:
raise RuntimeError("orientation has to be horizontal or vertical")
# shift bar with random phases
bar = np.roll(bar, np.random.randint(10 * nX), 0)
bar = np.roll(bar, np.random.randint(10 * nY), 1)
return bar
def generateCorrelatedSDRPairs(numInputVectors,
inputSize,
numInputVectorPerSensor,
numActiveInputBits,
corrStrength=0.1,
seed=42):
inputVectors1 = generateRandomSDR(
numInputVectorPerSensor, int(inputSize / 2), numActiveInputBits, seed)
inputVectors2 = generateRandomSDR(
numInputVectorPerSensor, int(inputSize / 2), numActiveInputBits, seed + 1)
# for each input on sensor 1, how many inputs on the 2nd sensor are
# strongly correlated with it?
numCorrPairs = 2
numInputVector1 = numInputVectorPerSensor
numInputVector2 = numInputVectorPerSensor
corrPairs = np.zeros((numInputVector1, numInputVector2))
for i in range(numInputVector1):
idx = np.random.choice(np.arange(numInputVector2),
size=(numCorrPairs,), replace=False)
corrPairs[i, idx] = 1.0 / numCorrPairs
uniformDist = np.ones((numInputVector1, numInputVector2)) / numInputVector2
sampleProb = corrPairs * corrStrength + uniformDist * (1 - corrStrength)
inputVectors = np.zeros((numInputVectors, inputSize))
for i in range(numInputVectors):
vec1 = np.random.randint(numInputVector1)
vec2 = np.random.choice(np.arange(numInputVector2), p=sampleProb[vec1, :])
inputVectors[i][:] = np.concatenate((inputVectors1[vec1],
inputVectors2[vec2]))
return inputVectors, inputVectors1, inputVectors2, corrPairs
def generateDenseVectors(numVectors, inputSize, seed):
np.random.seed(seed)
inputVectors = np.zeros((numVectors, inputSize), dtype=uintType)
for i in range(numVectors):
for j in range(inputSize):
inputVectors[i][j] = random.randrange(2)
return inputVectors
def convertToBinaryImage(image, thresh=75):
binaryImage = np.zeros(image.shape)
binaryImage[image > np.percentile(image, thresh)] = 1
return binaryImage
def getImageData(numInputVectors):
from htmresearch.algorithms.image_sparse_net import ImageSparseNet
DATA_PATH = "../sparse_net/data/IMAGES.mat"
DATA_NAME = "IMAGES"
DEFAULT_SPARSENET_PARAMS = {
"filterDim": 64,
"outputDim": 64,
"batchSize": numInputVectors,
"numLcaIterations": 75,
"learningRate": 2.0,
"decayCycle": 100,
"learningRateDecay": 1.0,
"lcaLearningRate": 0.1,
"thresholdDecay": 0.95,
"minThreshold": 1.0,
"thresholdType": 'soft',
"verbosity": 0, # can be changed to print training loss
"showEvery": 500,
"seed": 42,
}
network = ImageSparseNet(**DEFAULT_SPARSENET_PARAMS)
print "Loading training data..."
images = network.loadMatlabImages(DATA_PATH, DATA_NAME)
nDim1, nDim2, numImages = images.shape
binaryImages = np.zeros(images.shape)
for i in range(numImages):
binaryImages[:, :, i] = convertToBinaryImage(images[:, :, i])
inputVectors = network._getDataBatch(binaryImages)
inputVectors = inputVectors.T
return inputVectors
class SDRDataSet(object):
"""
Generate, store, and manipulate SDR dataset
"""
def __init__(self,
params):
self._params = params
self._inputVectors = []
self._dataType = params['dataType']
self._additionalInfo = {}
self.generateInputVectors(params)
def generateInputVectors(self, params):
if params['dataType'] == 'randomSDR':
self._inputVectors = generateRandomSDR(
params['numInputVectors'],
params['inputSize'],
params['numActiveInputBits'],
params['seed'])
elif params['dataType'] == 'denseVectors':
self._inputVectors = generateDenseVectors(
params['numInputVectors'],
params['inputSize'],
params['seed'])
elif params['dataType'] == 'randomBarPairs':
inputSize = params['nX'] * params['nY']
numInputVectors = params['numInputVectors']
self._inputVectors = np.zeros((numInputVectors, inputSize),
dtype=uintType)
for i in range(numInputVectors):
bar1 = getRandomBar((params['nX'], params['nY']),
params['barHalfLength'], 'horizontal')
bar2 = getRandomBar((params['nX'], params['nY']),
params['barHalfLength'], 'vertical')
data = bar1 + bar2
data[data > 0] = 1
self._inputVectors[i, :] = np.reshape(data, newshape=(1, inputSize))
elif params['dataType'] == 'randomBarSets':
inputSize = params['nX'] * params['nY']
numInputVectors = params['numInputVectors']
self._inputVectors = np.zeros((numInputVectors, inputSize),
dtype=uintType)
for i in range(numInputVectors):
data = 0
for barI in range(params['numBarsPerInput']):
orientation = np.random.choice(['horizontal', 'vertical'])
bar = getRandomBar((params['nX'], params['nY']),
params['barHalfLength'], orientation)
data += bar
data[data > 0] = 1
self._inputVectors[i, :] = np.reshape(data, newshape=(1, inputSize))
elif params['dataType'] == 'randomCross':
inputSize = params['nX'] * params['nY']
numInputVectors = params['numInputVectors']
self._inputVectors = np.zeros((numInputVectors, inputSize),
dtype=uintType)
for i in range(numInputVectors):
data = getCross(params['nX'], params['nY'], params['barHalfLength'])
self._inputVectors[i, :] = np.reshape(data, newshape=(1, inputSize))
elif params['dataType'] == 'correlatedSDRPairs':
(inputVectors, inputVectors1, inputVectors2, corrPairs) = \
generateCorrelatedSDRPairs(
params['numInputVectors'],
params['inputSize'],
params['numInputVectorPerSensor'],
params['numActiveInputBits'],
params['corrStrength'],
params['seed'])
self._inputVectors = inputVectors
self._additionalInfo = {"inputVectors1": inputVectors1,
"inputVectors2": inputVectors2,
"corrPairs": corrPairs}
elif params['dataType'] == 'nyc_taxi':
from nupic.encoders.scalar import ScalarEncoder
df = pd.read_csv('./data/nyc_taxi.csv', header=0, skiprows=[1, 2])
inputVectors = np.zeros((5000, params['n']))
for i in range(5000):
inputRecord = {
"passenger_count": float(df["passenger_count"][i]),
"timeofday": float(df["timeofday"][i]),
"dayofweek": float(df["dayofweek"][i]),
}
enc = ScalarEncoder(w=params['w'],
minval=params['minval'],
maxval=params['maxval'],
n=params['n'])
inputSDR = enc.encode(inputRecord["passenger_count"])
inputVectors[i, :] = inputSDR
self._inputVectors = inputVectors
def getInputVectors(self):
return self._inputVectors
def getAdditionalInfo(self):
return self._additionalInfo
from nupic.math.topology import coordinatesFromIndex
realDType = GetNTAReal()
uintType = "uint32"
def percentOverlap(x1, x2):
"""
Computes the percentage of overlap between vectors x1 and x2.
@param x1 (array) binary vector
@param x2 (array) binary vector
@param size (int) length of binary vectors
@return percentOverlap (float) percentage overlap between x1 and x2
"""
nonZeroX1 = np.count_nonzero(x1)
nonZeroX2 = np.count_nonzero(x2)
percentOverlap = 0
minX1X2 = min(nonZeroX1, nonZeroX2)
if minX1X2 > 0:
overlap = float(np.dot(x1.T, x2))
percentOverlap = overlap / minX1X2
return percentOverlap
def addNoiseToVector(inputVector, noiseLevel, vectorType):
"""
Add noise to SDRs
@param inputVector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
@param vectorType (string) "sparse" or "dense"
"""
if vectorType == 'sparse':
corruptSparseVector(inputVector, noiseLevel)
elif vectorType == 'dense':
corruptDenseVector(inputVector, noiseLevel)
else:
raise ValueError("vectorType must be 'sparse' or 'dense' ")
def corruptDenseVector(vector, noiseLevel):
"""
Corrupts a binary vector by inverting noiseLevel percent of its bits.
@param vector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
"""
size = len(vector)
for i in range(size):
rnd = random.random()
if rnd < noiseLevel:
if vector[i] == 1:
vector[i] = 0
else:
vector[i] = 1
def corruptSparseVector(sdr, noiseLevel):
"""
Add noise to sdr by turning off numNoiseBits active bits and turning on
numNoiseBits in active bits
@param sdr (array) Numpy array of the SDR
@param noiseLevel (float) amount of noise to be applied on the vector.
"""
numNoiseBits = int(noiseLevel * np.sum(sdr))
if numNoiseBits <= 0:
return sdr
activeBits = np.where(sdr > 0)[0]
inActiveBits = np.where(sdr == 0)[0]
turnOffBits = np.random.permutation(activeBits)
turnOnBits = np.random.permutation(inActiveBits)
turnOffBits = turnOffBits[:numNoiseBits]
turnOnBits = turnOnBits[:numNoiseBits]
sdr[turnOffBits] = 0
sdr[turnOnBits] = 1
def calculateOverlapCurve(sp, inputVectors):
"""
Evalulate noise robustness of SP for a given set of SDRs
@param sp a spatial pooler instance
@param inputVectors list of arrays.
:return:
"""
columnNumber = np.prod(sp.getColumnDimensions())
numInputVector, inputSize = inputVectors.shape
outputColumns = np.zeros((numInputVector, columnNumber), dtype=uintType)
outputColumnsCorrupted = np.zeros((numInputVector, columnNumber),
dtype=uintType)
noiseLevelList = np.linspace(0, 1.0, 21)
inputOverlapScore = np.zeros((numInputVector, len(noiseLevelList)))
outputOverlapScore = np.zeros((numInputVector, len(noiseLevelList)))
for i in range(numInputVector):
for j in range(len(noiseLevelList)):
inputVectorCorrupted = copy.deepcopy(inputVectors[i][:])
corruptSparseVector(inputVectorCorrupted, noiseLevelList[j])
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
sp.compute(inputVectorCorrupted, False,
outputColumnsCorrupted[i][:])
inputOverlapScore[i][j] = percentOverlap(inputVectors[i][:],
inputVectorCorrupted)
outputOverlapScore[i][j] = percentOverlap(outputColumns[i][:],
outputColumnsCorrupted[i][:])
return noiseLevelList, inputOverlapScore, outputOverlapScore
def classifySPoutput(targetOutputColumns, outputColumns):
"""
Classify the SP output
@param targetOutputColumns (list) The target outputs, corresponding to
different classes
@param outputColumns (array) The current output
@return classLabel (int) classification outcome
"""
numTargets, numDims = targetOutputColumns.shape
overlap = np.zeros((numTargets,))
for i in range(numTargets):
overlap[i] = percentOverlap(outputColumns, targetOutputColumns[i, :])
classLabel = np.argmax(overlap)
return classLabel
def classificationAccuracyVsNoise(sp, inputVectors, noiseLevelList):
"""
Evaluate whether the SP output is classifiable, with varying amount of noise
@param sp a spatial pooler instance
@param inputVectors (list) list of input SDRs
@param noiseLevelList (list) list of noise levels
:return:
"""
numInputVector, inputSize = inputVectors.shape
if sp is None:
targetOutputColumns = copy.deepcopy(inputVectors)
else:
columnNumber = np.prod(sp.getColumnDimensions())
# calculate target output given the uncorrupted input vectors
targetOutputColumns = np.zeros((numInputVector, columnNumber),
dtype=uintType)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, targetOutputColumns[i][:])
outcomes = np.zeros((len(noiseLevelList), numInputVector))
for i in range(len(noiseLevelList)):
for j in range(numInputVector):
corruptedInputVector = copy.deepcopy(inputVectors[j][:])
corruptSparseVector(corruptedInputVector, noiseLevelList[i])
if sp is None:
outputColumns = copy.deepcopy(corruptedInputVector)
else:
outputColumns = np.zeros((columnNumber, ), dtype=uintType)
sp.compute(corruptedInputVector, False, outputColumns)
predictedClassLabel = classifySPoutput(targetOutputColumns, outputColumns)
outcomes[i][j] = predictedClassLabel == j
predictionAccuracy = np.mean(outcomes, 1)
return predictionAccuracy
def plotExampleInputOutput(sp, inputVectors, saveFigPrefix=None):
"""
Plot example input & output
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors
"""
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
inputOverlap = np.zeros((numInputVector, numColumns), dtype=uintType)
connectedCounts = np.zeros((numColumns,), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
winnerInputOverlap = np.zeros(numInputVector)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
inputOverlap[i][:] = sp.getOverlaps()
activeColumns = np.where(outputColumns[i][:] > 0)[0]
if len(activeColumns) > 0:
winnerInputOverlap[i] = np.mean(
inputOverlap[i][np.where(outputColumns[i][:] > 0)[0]])
fig, axs = plt.subplots(2, 1)
axs[0].imshow(inputVectors[:, :200], cmap='gray', interpolation="nearest")
axs[0].set_ylabel('input #')
axs[0].set_title('input vectors')
axs[1].imshow(outputColumns[:, :200], cmap='gray', interpolation="nearest")
axs[1].set_ylabel('input #')
axs[1].set_title('output vectors')
if saveFigPrefix is not None:
plt.savefig('figures/{}_example_input_output.pdf'.format(saveFigPrefix))
inputDensity = np.sum(inputVectors, 1) / float(inputSize)
outputDensity = np.sum(outputColumns, 1) / float(numColumns)
fig, axs = plt.subplots(2, 1)
axs[0].plot(inputDensity)
axs[0].set_xlabel('input #')
axs[0].set_ylim([0, 0.2])
axs[1].plot(outputDensity)
axs[1].set_xlabel('input #')
axs[1].set_ylim([0, 0.05])
if saveFigPrefix is not None:
plt.savefig('figures/{}_example_input_output_density.pdf'.format(saveFigPrefix))
def inspectSpatialPoolerStats(sp, inputVectors, saveFigPrefix=None):
"""
Inspect the statistics of a spatial pooler given a set of input vectors
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors
"""
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
inputOverlap = np.zeros((numInputVector, numColumns), dtype=uintType)
connectedCounts = np.zeros((numColumns, ), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
winnerInputOverlap = np.zeros(numInputVector)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
inputOverlap[i][:] = sp.getOverlaps()
activeColumns = np.where(outputColumns[i][:] > 0)[0]
if len(activeColumns) > 0:
winnerInputOverlap[i] = np.mean(
inputOverlap[i][np.where(outputColumns[i][:] > 0)[0]])
avgInputOverlap = np.mean(inputOverlap, 0)
entropy = calculateEntropy(outputColumns)
activationProb = np.mean(outputColumns.astype(realDType), 0)
dutyCycleDist, binEdge = np.histogram(activationProb,
bins=10, range=[-0.005, 0.095])
dutyCycleDist = dutyCycleDist.astype('float32') / np.sum(dutyCycleDist)
binCenter = (binEdge[1:] + binEdge[:-1])/2
fig, axs = plt.subplots(2, 2)
axs[0, 0].hist(connectedCounts)
axs[0, 0].set_xlabel('# Connected Synapse')
axs[0, 1].hist(winnerInputOverlap)
axs[0, 1].set_xlabel('# winner input overlap')
axs[1, 0].bar(binEdge[:-1]+0.001, dutyCycleDist, width=.008)
axs[1, 0].set_xlim([-0.005, .1])
axs[1, 0].set_xlabel('Activation Frequency')
axs[1, 0].set_title('Entropy: {}'.format(entropy))
axs[1, 1].plot(connectedCounts, activationProb, '.')
axs[1, 1].set_xlabel('connection #')
axs[1, 1].set_ylabel('activation freq')
plt.tight_layout()
if saveFigPrefix is not None:
plt.savefig('figures/{}_network_stats.pdf'.format(saveFigPrefix))
return fig
def getRFCenters(sp, params, type='connected'):
numColumns = np.product(sp.getColumnDimensions())
dimensions = (params['nX'], params['nY'])
meanCoordinates = np.zeros((numColumns, 2))
avgDistToCenter = np.zeros((numColumns, 2))
for columnIndex in range(numColumns):
receptiveField = np.zeros((sp.getNumInputs(), ))
if type == 'connected':
sp.getConnectedSynapses(columnIndex, receptiveField)
elif type == 'potential':
sp.getPotential(columnIndex, receptiveField)
else:
raise RuntimeError('unknown RF type')
connectedSynapseIndex = np.where(receptiveField)[0]
if len(connectedSynapseIndex) == 0:
continue
coordinates = []
for synapseIndex in connectedSynapseIndex:
coordinate = coordinatesFromIndex(synapseIndex, dimensions)
coordinates.append(coordinate)
coordinates = np.array(coordinates)
coordinates = coordinates.astype('float32')
angularCoordinates = np.array(coordinates)
angularCoordinates[:, 0] = coordinates[:, 0] / params['nX'] * 2 * np.pi
angularCoordinates[:, 1] = coordinates[:, 1] / params['nY'] * 2 * np.pi
for i in range(2):
meanCoordinate = np.arctan2(
np.sum(np.sin(angularCoordinates[:, i])),
np.sum(np.cos(angularCoordinates[:, i])))
if meanCoordinate < 0:
meanCoordinate += 2 * np.pi
dist2Mean = angularCoordinates[:, i] - meanCoordinate
dist2Mean = np.arctan2(np.sin(dist2Mean), np.cos(dist2Mean))
dist2Mean = np.max(np.abs(dist2Mean))
meanCoordinate *= dimensions[i] / (2 * np.pi)
dist2Mean *= dimensions[i] / (2 * np.pi)
avgDistToCenter[columnIndex, i] = dist2Mean
meanCoordinates[columnIndex, i] = meanCoordinate
return meanCoordinates, avgDistToCenter
def binaryEntropyVectorized(x):
"""
Calculate entropy for a list of binary random variables
:param x: (numpy array) the probability of the variable to be 1.
:return: entropy: (numpy array) entropy
"""
entropy = - x*np.log2(x) - (1-x)*np.log2(1-x)
entropy[x*(1 - x) == 0] = 0
return entropy
def renyiEntropyVectorized(x):
entropy = -np.log2(np.square(x) + np.square(1-x))
return entropy
def calculateEntropy(activeColumns, type='binary'):
"""
calculate the mean entropy given activation history
@param activeColumns (array) 2D numpy array of activation history
@return entropy (float) mean entropy
"""
activationProb = np.mean(activeColumns, 0)
if type == 'binary':
totalEntropy = np.sum(binaryEntropyVectorized(activationProb))
elif type == 'renyi':
totalEntropy = np.sum(renyiEntropyVectorized(activationProb))
else:
raise ValueError('unknown entropy type')
numberOfColumns = activeColumns.shape[1]
# return mean entropy
return totalEntropy/numberOfColumns
def calculateInputOverlapMat(inputVectors, sp):
numColumns = np.product(sp.getColumnDimensions())
numInputVector, inputSize = inputVectors.shape
overlapMat = np.zeros((numColumns, numInputVector))
for c in range(numColumns):
connectedSynapses = np.zeros((inputSize, ), dtype=uintType)
sp.getConnectedSynapses(c, connectedSynapses)
for i in range(numInputVector):
overlapMat[c, i] = percentOverlap(connectedSynapses, inputVectors[i, :])
return overlapMat
def calculateStability(activeColumnsCurrentEpoch, activeColumnsPreviousEpoch):
activeColumnsStable = np.logical_and(activeColumnsCurrentEpoch,
activeColumnsPreviousEpoch)
stability = np.mean(np.sum(activeColumnsStable, 1))/\
np.mean(np.sum(activeColumnsCurrentEpoch, 1))
return stability
def calculateInputSpaceCoverage(sp):
numInputs = np.prod(sp.getInputDimensions())
numColumns = np.prod(sp.getColumnDimensions())
inputSpaceCoverage = np.zeros(numInputs)
connectedSynapses = np.zeros((numInputs), dtype=uintType)
for columnIndex in range(numColumns):
sp.getConnectedSynapses(columnIndex, connectedSynapses)
inputSpaceCoverage += connectedSynapses
inputSpaceCoverage = np.reshape(inputSpaceCoverage, sp.getInputDimensions())
return inputSpaceCoverage
def reconstructionError(sp, inputVectors, activeColumnVectors, threshold=0.):
"""
Computes a reconstruction error. The reconstuction $r(x)$ of an input vector $x$
is given by the sum of the active column's connected synapses vector of
the SDR representation $sdr(x)$ of $x$ normalized by $1/numActiveColumns$.
The error is the normalized sum over the "hamming distance" (i.e. distance
induced by L1 norm) of $x$ and its reconstruction $r(x)$, i.e. (mathy stuff in LaTex)
\[
Reconstruction Error = (1/batchSize) * \sum_{x \in InputBatch} \| x - r(x) \|_1 .
\]
Note that $r(x)$ can be expressed as
\[
r(x) = (1/numActiveColumns) * C * sdr(x) ,
\]
where we view $sdr(x)$ as a binary column vector and $C$ is the
binary matrix whose jth column encodes the synaptic connectivity of
the pooler's columns and the input bits, i.e.
\[
c_{i,j} = 1 :<=> column j has a stable synaptic
connection to input bit i.
\]
Note: Turns out that in our setting (x and syn(i) binary vectors) we have
\[
Reconstruction Error = Witness Error.
\]
It can be shown that the error is optimized by the Hebbian-like update rule
of the spatial pooler.
@param sp (SpatialPooler) the spatial pooler instance
@param inputVectors (array) 2D numpy array of input vectors
@param activeColumnVectors (array) 2D numpy array of activation history
@param threshold (float) if set > 0 it serves as threshold for a step function
applied to the reconstruction vectors (values smaller than
threshold are set to zero, and values bigger to one)
@return error (float) the reconstruction error
"""
batchSize = inputVectors.shape[0]
connectionMatrix = getConnectedSyns(sp)
reconstructionVectors = np.dot(activeColumnVectors, connectionMatrix)
numActiveColumns = np.sum(activeColumnVectors, 1)[0]
reconstructionVectors = reconstructionVectors/numActiveColumns
if threshold > 0.:
reconstructionVectors =np.where(
reconstructionVectors > threshold,
np.ones( reconstructionVectors.shape),
np.zeros(reconstructionVectors.shape))
Err = np.sum(np.absolute(reconstructionVectors - inputVectors))
return Err/batchSize
def witnessError(sp, inputVectors, activeColumnsCurrentEpoch):
"""
Computes a variation of a reconstruction error. It measures the average
hamming distance of an active column's connected synapses vector and its witnesses.
An input vector is called witness for a column, iff the column is among
the active columns for the input computed by the spatial pooler.
The error is given by
\[
Witness Error = (1/batchSize) * \sum_{x \in InputBatch}
(1/numActiveColumns) * \sum_{i active column of sdr(x)} \| x - syn(i) \|_1.
\]
Note: Turns out that in our setting (x and syn(i) binary vectors) we have
\[
Witness Error = Reconstruction Error.
\]
It can be shown that the error is optimized by the Hebbian-like update rule
of the spatial pooler.
"""
connectionMatrix = getConnectedSyns(sp)
batchSize = inputVectors.shape[0]
# 1st sum... over each input in batch
Err = 0.
for i in range(batchSize):
activeColumns = np.where(activeColumnsCurrentEpoch[i] > 0.)[0]
numActiveColumns = activeColumns.shape[0]
# 2nd sum... over each active colum
err = 0.
for j in activeColumns:
# Compute hamming distance and accumulate
err += np.sum(np.absolute(connectionMatrix[j] - inputVectors[i]))
Err += err/numActiveColumns
return Err/batchSize
def mutualInformation(sp, activeColumnsCurrentEpoch, column_1, column_2):
"""
Computes the mutual information of the binary variables that represent
the activation probabilities of two columns. The mutual information I(X,Y)
of two random variables is given by
\[
I (X,Y) = \sum_{x,y} p(x,y) log( p(x,y) / ( p(x) p(y) ) ).
\]
(https://en.wikipedia.org/wiki/Mutual_information)
"""
i, j = column_1, column_2
batchSize = activeColumnsCurrentEpoch.shape[0]
# Activity Counts
ci, cj, cij = 0., 0., dict([((0,0),0.), ((1,0),0.), ((0,1),0.), ((1,1),0.)])
for t in range(batchSize):
ai = activeColumnsCurrentEpoch[t, i]
aj = activeColumnsCurrentEpoch[t, j]
cij[(ai, aj)] += 1.
ci += ai
cj += aj
# Mutual information calculation
Iij = 0
for a,b in [(0,0), (1,0), (0,1), (1,1)]:
# Compute probabilities
pij = cij[(a,b)]/batchSize
pi = ci/batchSize if a == 1 else 1. - ci/batchSize
pj = cj/batchSize if b == 1 else 1. - cj/batchSize
# Add current term of mutual information
Iij += pij * np.log2(pij/(pi*pj)) if pij > 0 else 0
return Iij
def meanMutualInformation(sp, activeColumnsCurrentEpoch, columnsUnderInvestigation = []):
"""
Computes the mean of the mutual information
of pairs taken from a list of columns.
"""
if len(columnsUnderInvestigation) == 0:
columns = range(np.prod(sp.getColumnDimensions()))
else:
columns = columnsUnderInvestigation
numCols = len(columns)
sumMutualInfo = 0
normalizingConst = numCols*(numCols - 1)/2
for i in range(numCols):
for j in range(i+1, numCols):
sumMutualInfo += mutualInformation(sp, activeColumnsCurrentEpoch, columns[i], columns[j])
return sumMutualInfo/normalizingConst
| agpl-3.0 |
agarciamontoro/TFG | Software/Samples/nbody/plot.py | 1 | 3624 | # coding: utf-8
import numpy as np
import h5py
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
from progress_lib import progress_bar_init
import os
import time
import re
import argparse
# ============================= PARSE ARGUMENTS ============================= #
parser = argparse.ArgumentParser(description='Creates a gif movie from the nbody simulation')
parser.add_argument('-i', '--input', dest='input', type=str,
default="Output/nbody.hdf5",
help='Path to the file where the data is stored.')
parser.add_argument('-o', '--output', dest='output', type=str,
default="out_default.gif",
help='Path to the file where the result will be stored.')
parser.add_argument('-f', '--frames', dest='numFrames', type=int,
default=200,
help='Number of steps simulated.')
parser.add_argument('-b', '--bodies', dest='numBodies', type=int,
default=8192,
help='Number of bodies used in the simulation.')
args = parser.parse_args()
num_frames = args.numFrames
out_path = args.output
num_bodies = args.numBodies
# Get data
hdf5_root = h5py.File(args.input, "r")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.patch.set_visible(False)
axes = fig.gca()
axes.set_xlim([-15, 15])
axes.set_ylim([-15, 15])
axes.set_zlim([-15, 15])
axes.set_axis_off()
ax.set_axis_bgcolor((0.15, 0.15, 0.15))
axes.set_position([0, 0, 1, 1])
azimuth = 0
ax.view_init(elev=10., azim=azimuth)
scat1 = ax.scatter([], [], [], c="darkcyan", s=2, lw=0)
scat2 = ax.scatter([], [], [], c="darkolivegreen", s=2, lw=0)
scat3 = ax.scatter([], [], [], c="paleturquoise", s=2, lw=0)
scat4 = ax.scatter([], [], [], c="olive", s=2, lw=0)
scat5 = ax.scatter([], [], [], c="darkcyan", s=2, lw=0)
scat6 = ax.scatter([], [], [], c="darkolivegreen", s=2, lw=0)
progress_bar = progress_bar_init(num_frames-1)
old_end = time.time()
def animate(frame):
global old_end
start = old_end
set_name = "out_%03d.csv" % frame
data = hdf5_root[set_name]
and_disk = data[:16384, :]
mil_disk = data[16384:32768, :]
and_bulg = data[32768:40960, :]
mil_bulg = data[40960:49152, :]
and_halo = data[49152:65536, :]
mil_halo = data[65536:, :]
scat1._offsets3d = (np.ma.ravel(and_disk[:, 0]),
np.ma.ravel(and_disk[:, 1]),
np.ma.ravel(and_disk[:, 2]))
scat2._offsets3d = (np.ma.ravel(mil_disk[:, 0]),
np.ma.ravel(mil_disk[:, 1]),
np.ma.ravel(mil_disk[:, 2]))
scat3._offsets3d = (np.ma.ravel(and_bulg[:, 0]),
np.ma.ravel(and_bulg[:, 1]),
np.ma.ravel(and_bulg[:, 2]))
scat4._offsets3d = (np.ma.ravel(mil_bulg[:, 0]),
np.ma.ravel(mil_bulg[:, 1]),
np.ma.ravel(mil_bulg[:, 2]))
scat5._offsets3d = (np.ma.ravel(and_halo[:, 0]),
np.ma.ravel(and_halo[:, 1]),
np.ma.ravel(and_halo[:, 2]))
scat6._offsets3d = (np.ma.ravel(mil_halo[:, 0]),
np.ma.ravel(mil_halo[:, 1]),
np.ma.ravel(mil_halo[:, 2]))
end = time.time()
old_end = end
progress_bar(end - start)
return scat1, scat2,
anim = animation.FuncAnimation(fig, animate,
frames=num_frames,
interval=20)
anim.save(out_path, writer='imagemagick', fps=30)
| gpl-2.0 |
korbonits/data-science-from-scratch | code/introduction.py | 48 | 8085 | from __future__ import division
##########################
# #
# FINDING KEY CONNECTORS #
# #
##########################
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" },
{ "id": 10, "name": "Jen" }
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# first give each user an empty list
for user in users:
user["friends"] = []
# and then populate the lists with friendships
for i, j in friendships:
# this works because users[i] is the user whose id is i
users[i]["friends"].append(users[j]) # add i as a friend of j
users[j]["friends"].append(users[i]) # add j as a friend of i
def number_of_friends(user):
"""how many friends does _user_ have?"""
return len(user["friends"]) # length of friend_ids list
total_connections = sum(number_of_friends(user)
for user in users) # 24
num_users = len(users)
avg_connections = total_connections / num_users # 2.4
################################
# #
# DATA SCIENTISTS YOU MAY KNOW #
# #
################################
def friends_of_friend_ids_bad(user):
# "foaf" is short for "friend of a friend"
return [foaf["id"]
for friend in user["friends"] # for each of user's friends
for foaf in friend["friends"]] # get each of _their_ friends
from collections import Counter # not loaded by default
def not_the_same(user, other_user):
"""two users are not the same if they have different ids"""
return user["id"] != other_user["id"]
def not_friends(user, other_user):
"""other_user is not a friend if he's not in user["friends"];
that is, if he's not_the_same as all the people in user["friends"]"""
return all(not_the_same(friend, other_user)
for friend in user["friends"])
def friends_of_friend_ids(user):
return Counter(foaf["id"]
for friend in user["friends"] # for each of my friends
for foaf in friend["friends"] # count *their* friends
if not_the_same(user, foaf) # who aren't me
and not_friends(user, foaf)) # and aren't my friends
print friends_of_friend_ids(users[3]) # Counter({0: 2, 5: 1})
interests = [
(0, "Hadoop"), (0, "Big Data"), (0, "HBase"), (0, "Java"),
(0, "Spark"), (0, "Storm"), (0, "Cassandra"),
(1, "NoSQL"), (1, "MongoDB"), (1, "Cassandra"), (1, "HBase"),
(1, "Postgres"), (2, "Python"), (2, "scikit-learn"), (2, "scipy"),
(2, "numpy"), (2, "statsmodels"), (2, "pandas"), (3, "R"), (3, "Python"),
(3, "statistics"), (3, "regression"), (3, "probability"),
(4, "machine learning"), (4, "regression"), (4, "decision trees"),
(4, "libsvm"), (5, "Python"), (5, "R"), (5, "Java"), (5, "C++"),
(5, "Haskell"), (5, "programming languages"), (6, "statistics"),
(6, "probability"), (6, "mathematics"), (6, "theory"),
(7, "machine learning"), (7, "scikit-learn"), (7, "Mahout"),
(7, "neural networks"), (8, "neural networks"), (8, "deep learning"),
(8, "Big Data"), (8, "artificial intelligence"), (9, "Hadoop"),
(9, "Java"), (9, "MapReduce"), (9, "Big Data")
]
def data_scientists_who_like(target_interest):
return [user_id
for user_id, user_interest in interests
if user_interest == target_interest]
from collections import defaultdict
# keys are interests, values are lists of user_ids with that interest
user_ids_by_interest = defaultdict(list)
for user_id, interest in interests:
user_ids_by_interest[interest].append(user_id)
# keys are user_ids, values are lists of interests for that user_id
interests_by_user_id = defaultdict(list)
for user_id, interest in interests:
interests_by_user_id[user_id].append(interest)
def most_common_interests_with(user_id):
return Counter(interested_user_id
for interest in interests_by_user["user_id"]
for interested_user_id in users_by_interest[interest]
if interested_user_id != user_id)
###########################
# #
# SALARIES AND EXPERIENCE #
# #
###########################
salaries_and_tenures = [(83000, 8.7), (88000, 8.1),
(48000, 0.7), (76000, 6),
(69000, 6.5), (76000, 7.5),
(60000, 2.5), (83000, 10),
(48000, 1.9), (63000, 4.2)]
def make_chart_salaries_by_tenure(plt):
tenures = [tenure for salary, tenure in salaries_and_tenures]
salaries = [salary for salary, tenure in salaries_and_tenures]
plt.scatter(tenures, salaries)
plt.xlabel("Years Experience")
plt.ylabel("Salary")
plt.show()
# keys are years
# values are the salaries for each tenure
salary_by_tenure = defaultdict(list)
for salary, tenure in salaries_and_tenures:
salary_by_tenure[tenure].append(salary)
average_salary_by_tenure = {
tenure : sum(salaries) / len(salaries)
for tenure, salaries in salary_by_tenure.items()
}
def tenure_bucket(tenure):
if tenure < 2: return "less than two"
elif tenure < 5: return "between two and five"
else: return "more than five"
salary_by_tenure_bucket = defaultdict(list)
for salary, tenure in salaries_and_tenures:
bucket = tenure_bucket(tenure)
salary_by_tenure_bucket[bucket].append(salary)
average_salary_by_bucket = {
tenure_bucket : sum(salaries) / len(salaries)
for tenure_bucket, salaries in salary_by_tenure_bucket.iteritems()
}
#################
# #
# PAID_ACCOUNTS #
# #
#################
def predict_paid_or_unpaid(years_experience):
if years_experience < 3.0: return "paid"
elif years_experience < 8.5: return "unpaid"
else: return "paid"
######################
# #
# TOPICS OF INTEREST #
# #
######################
words_and_counts = Counter(word
for user, interest in interests
for word in interest.lower().split())
if __name__ == "__main__":
print
print "######################"
print "#"
print "# FINDING KEY CONNECTORS"
print "#"
print "######################"
print
print "total connections", total_connections
print "number of users", num_users
print "average connections", total_connections / num_users
print
# create a list (user_id, number_of_friends)
num_friends_by_id = [(user["id"], number_of_friends(user))
for user in users]
print "users sorted by number of friends:"
print sorted(num_friends_by_id,
key=lambda (user_id, num_friends): num_friends, # by number of friends
reverse=True) # largest to smallest
print
print "######################"
print "#"
print "# DATA SCIENTISTS YOU MAY KNOW"
print "#"
print "######################"
print
print "friends of friends bad for user 0:", friends_of_friend_ids_bad(users[0])
print "friends of friends for user 3:", friends_of_friend_ids(users[3])
print
print "######################"
print "#"
print "# SALARIES AND TENURES"
print "#"
print "######################"
print
print "average salary by tenure", average_salary_by_tenure
print "average salary by tenure bucket", average_salary_by_bucket
print
print "######################"
print "#"
print "# MOST COMMON WORDS"
print "#"
print "######################"
print
for word, count in words_and_counts.most_common():
if count > 1:
print word, count | unlicense |
phobson/statsmodels | statsmodels/formula/tests/test_formula.py | 29 | 4647 | from statsmodels.compat.python import iteritems, StringIO
import warnings
from statsmodels.formula.api import ols
from statsmodels.formula.formulatools import make_hypotheses_matrices
from statsmodels.tools import add_constant
from statsmodels.datasets.longley import load, load_pandas
import numpy.testing as npt
from statsmodels.tools.testing import assert_equal
from numpy.testing.utils import WarningManager
longley_formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
class CheckFormulaOLS(object):
@classmethod
def setupClass(cls):
cls.data = load()
def test_endog_names(self):
assert self.model.endog_names == 'TOTEMP'
def test_exog_names(self):
assert self.model.exog_names == ['Intercept', 'GNPDEFL', 'GNP',
'UNEMP', 'ARMED', 'POP', 'YEAR']
def test_design(self):
npt.assert_equal(self.model.exog,
add_constant(self.data.exog, prepend=True))
def test_endog(self):
npt.assert_equal(self.model.endog, self.data.endog)
def test_summary(self):
# smoke test
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.filterwarnings("ignore",
"kurtosistest only valid for n>=20")
self.model.fit().summary()
finally:
warn_ctx.__exit__()
class TestFormulaPandas(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load_pandas().data
cls.model = ols(longley_formula, data)
super(TestFormulaPandas, cls).setupClass()
class TestFormulaDict(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = dict((k, v.tolist()) for k, v in iteritems(load_pandas().data))
cls.model = ols(longley_formula, data)
super(TestFormulaDict, cls).setupClass()
class TestFormulaRecArray(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load().data
cls.model = ols(longley_formula, data)
super(TestFormulaRecArray, cls).setupClass()
def test_tests():
formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
dta = load_pandas().data
results = ols(formula, dta).fit()
test_formula = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
LC = make_hypotheses_matrices(results, test_formula)
R = LC.coefs
Q = LC.constants
npt.assert_almost_equal(R, [[0, 1, -1, 0, 0, 0, 0],
[0, 0 , 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1./1829]], 8)
npt.assert_array_equal(Q, [[0],[2],[1]])
def test_formula_labels():
# make sure labels pass through patsy as expected
# data(Duncan) from car in R
dta = StringIO(""""type" "income" "education" "prestige"\n"accountant" "prof" 62 86 82\n"pilot" "prof" 72 76 83\n"architect" "prof" 75 92 90\n"author" "prof" 55 90 76\n"chemist" "prof" 64 86 90\n"minister" "prof" 21 84 87\n"professor" "prof" 64 93 93\n"dentist" "prof" 80 100 90\n"reporter" "wc" 67 87 52\n"engineer" "prof" 72 86 88\n"undertaker" "prof" 42 74 57\n"lawyer" "prof" 76 98 89\n"physician" "prof" 76 97 97\n"welfare.worker" "prof" 41 84 59\n"teacher" "prof" 48 91 73\n"conductor" "wc" 76 34 38\n"contractor" "prof" 53 45 76\n"factory.owner" "prof" 60 56 81\n"store.manager" "prof" 42 44 45\n"banker" "prof" 78 82 92\n"bookkeeper" "wc" 29 72 39\n"mail.carrier" "wc" 48 55 34\n"insurance.agent" "wc" 55 71 41\n"store.clerk" "wc" 29 50 16\n"carpenter" "bc" 21 23 33\n"electrician" "bc" 47 39 53\n"RR.engineer" "bc" 81 28 67\n"machinist" "bc" 36 32 57\n"auto.repairman" "bc" 22 22 26\n"plumber" "bc" 44 25 29\n"gas.stn.attendant" "bc" 15 29 10\n"coal.miner" "bc" 7 7 15\n"streetcar.motorman" "bc" 42 26 19\n"taxi.driver" "bc" 9 19 10\n"truck.driver" "bc" 21 15 13\n"machine.operator" "bc" 21 20 24\n"barber" "bc" 16 26 20\n"bartender" "bc" 16 28 7\n"shoe.shiner" "bc" 9 17 3\n"cook" "bc" 14 22 16\n"soda.clerk" "bc" 12 30 6\n"watchman" "bc" 17 25 11\n"janitor" "bc" 7 20 8\n"policeman" "bc" 34 47 41\n"waiter" "bc" 8 32 10""")
from pandas import read_table
dta = read_table(dta, sep=" ")
model = ols("prestige ~ income + education", dta).fit()
assert_equal(model.fittedvalues.index, dta.index)
def test_formula_predict():
from numpy import log
formula = """TOTEMP ~ log(GNPDEFL) + log(GNP) + UNEMP + ARMED +
POP + YEAR"""
data = load_pandas()
dta = load_pandas().data
results = ols(formula, dta).fit()
npt.assert_almost_equal(results.fittedvalues.values,
results.predict(data.exog), 8)
| bsd-3-clause |
DGrady/pandas | pandas/tests/indexes/test_base.py | 2 | 79780 | # -*- coding: utf-8 -*-
import pytest
from datetime import datetime, timedelta
import pandas.util.testing as tm
from pandas.core.indexes.api import Index, MultiIndex
from pandas.tests.indexes.common import Base
from pandas.compat import (range, lrange, lzip, u,
text_type, zip, PY3, PY36)
import operator
import numpy as np
from pandas import (period_range, date_range, Series,
DataFrame, Float64Index, Int64Index,
CategoricalIndex, DatetimeIndex, TimedeltaIndex,
PeriodIndex, isna)
from pandas.core.index import _get_combined_index
from pandas.util.testing import assert_almost_equal
from pandas.compat.numpy import np_datetime64_compat
import pandas.core.config as cf
from pandas.core.indexes.datetimes import _to_m8
import pandas as pd
from pandas._libs.lib import Timestamp
class TestIndex(Base):
_holder = Index
def setup_method(self, method):
self.indices = dict(unicodeIndex=tm.makeUnicodeIndex(100),
strIndex=tm.makeStringIndex(100),
dateIndex=tm.makeDateIndex(100),
periodIndex=tm.makePeriodIndex(100),
tdIndex=tm.makeTimedeltaIndex(100),
intIndex=tm.makeIntIndex(100),
uintIndex=tm.makeUIntIndex(100),
rangeIndex=tm.makeIntIndex(100),
floatIndex=tm.makeFloatIndex(100),
boolIndex=Index([True, False]),
catIndex=tm.makeCategoricalIndex(100),
empty=Index([]),
tuples=MultiIndex.from_tuples(lzip(
['foo', 'bar', 'baz'], [1, 2, 3])))
self.setup_indices()
def create_index(self):
return Index(list('abcde'))
def test_new_axis(self):
new_index = self.dateIndex[None, :]
assert new_index.ndim == 2
assert isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
super(TestIndex, self).test_copy_and_deepcopy()
new_copy2 = self.intIndex.copy(dtype=int)
assert new_copy2.dtype.kind == 'i'
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
tm.assert_index_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
assert isinstance(index, Index)
assert index.name == 'name'
tm.assert_numpy_array_equal(arr, index.values)
arr[0] = "SOMEBIGLONGSTRING"
assert index[0] != "SOMEBIGLONGSTRING"
# what to do here?
# arr = np.array(5.)
# pytest.raises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
pytest.raises(TypeError, Index, 0)
def test_construction_list_mixed_tuples(self):
# see gh-10697: if we are constructing from a mixed list of tuples,
# make sure that we are independent of the sorting order.
idx1 = Index([('A', 1), 'B'])
assert isinstance(idx1, Index)
assert not isinstance(idx1, MultiIndex)
idx2 = Index(['B', ('A', 1)])
assert isinstance(idx2, Index)
assert not isinstance(idx2, MultiIndex)
def test_constructor_from_index_datetimetz(self):
idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
assert result.tz == idx.tz
result = pd.Index(idx.asobject)
tm.assert_index_equal(result, idx)
assert result.tz == idx.tz
def test_constructor_from_index_timedelta(self):
idx = pd.timedelta_range('1 days', freq='D', periods=3)
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
result = pd.Index(idx.asobject)
tm.assert_index_equal(result, idx)
def test_constructor_from_index_period(self):
idx = pd.period_range('2015-01-01', freq='D', periods=3)
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
result = pd.Index(idx.asobject)
tm.assert_index_equal(result, idx)
def test_constructor_from_series_datetimetz(self):
idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = pd.Index(pd.Series(idx))
tm.assert_index_equal(result, idx)
assert result.tz == idx.tz
def test_constructor_from_series_timedelta(self):
idx = pd.timedelta_range('1 days', freq='D', periods=3)
result = pd.Index(pd.Series(idx))
tm.assert_index_equal(result, idx)
def test_constructor_from_series_period(self):
idx = pd.period_range('2015-01-01', freq='D', periods=3)
result = pd.Index(pd.Series(idx))
tm.assert_index_equal(result, idx)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
s = Series([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
result = Index(s)
tm.assert_index_equal(result, expected)
result = DatetimeIndex(s)
tm.assert_index_equal(result, expected)
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990',
'4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990',
'4-1-1990', '5-1-1990'], freq='MS')
tm.assert_index_equal(result, expected)
df = pd.DataFrame(np.random.rand(5, 3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990',
'5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
expected.name = 'date'
tm.assert_index_equal(result, expected)
assert df['date'].dtype == object
exp = pd.Series(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990',
'5-1-1990'], name='date')
tm.assert_series_equal(df['date'], exp)
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
assert result == 'MS'
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5), np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
tm.assert_index_equal(result, expected)
def test_constructor_int_dtype_nan(self):
# see gh-15187
data = [np.nan]
msg = "cannot convert"
with tm.assert_raises_regex(ValueError, msg):
Index(data, dtype='int64')
with tm.assert_raises_regex(ValueError, msg):
Index(data, dtype='uint64')
# This, however, should not break
# because NaN is float.
expected = Float64Index(data)
result = Index(data, dtype='float')
tm.assert_index_equal(result, expected)
def test_index_ctor_infer_nan_nat(self):
# GH 13467
exp = pd.Float64Index([np.nan, np.nan])
assert exp.dtype == np.float64
tm.assert_index_equal(Index([np.nan, np.nan]), exp)
tm.assert_index_equal(Index(np.array([np.nan, np.nan])), exp)
exp = pd.DatetimeIndex([pd.NaT, pd.NaT])
assert exp.dtype == 'datetime64[ns]'
tm.assert_index_equal(Index([pd.NaT, pd.NaT]), exp)
tm.assert_index_equal(Index(np.array([pd.NaT, pd.NaT])), exp)
exp = pd.DatetimeIndex([pd.NaT, pd.NaT])
assert exp.dtype == 'datetime64[ns]'
for data in [[pd.NaT, np.nan], [np.nan, pd.NaT],
[np.nan, np.datetime64('nat')],
[np.datetime64('nat'), np.nan]]:
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
exp = pd.TimedeltaIndex([pd.NaT, pd.NaT])
assert exp.dtype == 'timedelta64[ns]'
for data in [[np.nan, np.timedelta64('nat')],
[np.timedelta64('nat'), np.nan],
[pd.NaT, np.timedelta64('nat')],
[np.timedelta64('nat'), pd.NaT]]:
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
# mixed np.datetime64/timedelta64 nat results in object
data = [np.datetime64('nat'), np.timedelta64('nat')]
exp = pd.Index(data, dtype=object)
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
data = [np.timedelta64('nat'), np.datetime64('nat')]
exp = pd.Index(data, dtype=object)
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
tm.assert_index_equal(rs, xp)
assert isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
tm.assert_index_equal(result, idx)
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
tm.assert_index_equal(result, idx)
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
tm.assert_index_equal(result, idx)
def test_constructor_dtypes(self):
for idx in [Index(np.array([1, 2, 3], dtype=int)),
Index(np.array([1, 2, 3], dtype=int), dtype=int),
Index([1, 2, 3], dtype=int)]:
assert isinstance(idx, Int64Index)
# These should coerce
for idx in [Index(np.array([1., 2., 3.], dtype=float), dtype=int),
Index([1., 2., 3.], dtype=int)]:
assert isinstance(idx, Int64Index)
for idx in [Index(np.array([1., 2., 3.], dtype=float)),
Index(np.array([1, 2, 3], dtype=int), dtype=float),
Index(np.array([1., 2., 3.], dtype=float), dtype=float),
Index([1, 2, 3], dtype=float),
Index([1., 2., 3.], dtype=float)]:
assert isinstance(idx, Float64Index)
for idx in [Index(np.array([True, False, True], dtype=bool)),
Index([True, False, True]),
Index(np.array([True, False, True], dtype=bool),
dtype=bool),
Index([True, False, True], dtype=bool)]:
assert isinstance(idx, Index)
assert idx.dtype == object
for idx in [Index(np.array([1, 2, 3], dtype=int), dtype='category'),
Index([1, 2, 3], dtype='category'),
Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')]),
dtype='category'),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)],
dtype='category')]:
assert isinstance(idx, CategoricalIndex)
for idx in [Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')])),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])]:
assert isinstance(idx, DatetimeIndex)
for idx in [Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')]),
dtype=object),
Index([datetime(2011, 1, 1),
datetime(2011, 1, 2)], dtype=object)]:
assert not isinstance(idx, DatetimeIndex)
assert isinstance(idx, Index)
assert idx.dtype == object
for idx in [Index(np.array([np.timedelta64(1, 'D'), np.timedelta64(
1, 'D')])), Index([timedelta(1), timedelta(1)])]:
assert isinstance(idx, TimedeltaIndex)
for idx in [Index(np.array([np.timedelta64(1, 'D'),
np.timedelta64(1, 'D')]), dtype=object),
Index([timedelta(1), timedelta(1)], dtype=object)]:
assert not isinstance(idx, TimedeltaIndex)
assert isinstance(idx, Index)
assert idx.dtype == object
def test_constructor_dtypes_datetime(self):
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.date_range('2011-01-01', periods=5, tz=tz)
dtype = idx.dtype
# pass values without timezone, as DatetimeIndex localizes it
for values in [pd.date_range('2011-01-01', periods=5).values,
pd.date_range('2011-01-01', periods=5).asi8]:
for res in [pd.Index(values, tz=tz),
pd.Index(values, dtype=dtype),
pd.Index(list(values), tz=tz),
pd.Index(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
# check compat with DatetimeIndex
for res in [pd.DatetimeIndex(values, tz=tz),
pd.DatetimeIndex(values, dtype=dtype),
pd.DatetimeIndex(list(values), tz=tz),
pd.DatetimeIndex(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
def test_constructor_dtypes_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
dtype = idx.dtype
for values in [idx.values, idx.asi8]:
for res in [pd.Index(values, dtype=dtype),
pd.Index(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
# check compat with TimedeltaIndex
for res in [pd.TimedeltaIndex(values, dtype=dtype),
pd.TimedeltaIndex(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
def test_view_with_args(self):
restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex',
'empty']
for i in restricted:
ind = self.indices[i]
# with arguments
pytest.raises(TypeError, lambda: ind.view('i8'))
# these are ok
for i in list(set(self.indices.keys()) - set(restricted)):
ind = self.indices[i]
# with arguments
ind.view('i8')
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
assert casted.name == 'foobar'
def test_equals_object(self):
# same
assert Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c']))
# different length
assert not Index(['a', 'b', 'c']).equals(Index(['a', 'b']))
# same length, different values
assert not Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd']))
# Must also be an Index
assert not Index(['a', 'b', 'c']).equals(['a', 'b', 'c'])
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
# test 0th element
tm.assert_index_equal(Index(['a', 'b', 'c', 'd']),
result.insert(0, 'a'))
# test Nth element that follows Python list behavior
tm.assert_index_equal(Index(['b', 'c', 'e', 'd']),
result.insert(-1, 'e'))
# test loc +/- neq (0, -1)
tm.assert_index_equal(result.insert(1, 'z'), result.insert(-2, 'z'))
# test empty
null_index = Index([])
tm.assert_index_equal(Index(['a']), null_index.insert(0, 'a'))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
assert i1.identical(i2)
i1 = i1.rename('foo')
assert i1.equals(i2)
assert not i1.identical(i2)
i2 = i2.rename('foo')
assert i1.identical(i2)
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
assert not i3.identical(i4)
def test_is_(self):
ind = Index(range(10))
assert ind.is_(ind)
assert ind.is_(ind.view().view().view().view())
assert not ind.is_(Index(range(10)))
assert not ind.is_(ind.copy())
assert not ind.is_(ind.copy(deep=False))
assert not ind.is_(ind[:])
assert not ind.is_(ind.view(np.ndarray).view(Index))
assert not ind.is_(np.array(range(10)))
# quasi-implementation dependent
assert ind.is_(ind.view())
ind2 = ind.view()
ind2.name = 'bob'
assert ind.is_(ind2)
assert ind2.is_(ind)
# doesn't matter if Indices are *actually* views of underlying data,
assert not ind.is_(Index(ind.values))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
assert not ind1.is_(ind2)
def test_asof(self):
d = self.dateIndex[0]
assert self.dateIndex.asof(d) == d
assert isna(self.dateIndex.asof(d - timedelta(1)))
d = self.dateIndex[-1]
assert self.dateIndex.asof(d + timedelta(1)) == d
d = self.dateIndex[0].to_pydatetime()
assert isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-02-28')
result = idx.asof('2010-02')
assert result == expected
assert not isinstance(result, Index)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
# assert first_value == x['2013-01-01 00:00:00.000000050+0000']
exp_ts = np_datetime64_compat('2013-01-01 00:00:00.000000050+0000',
'ns')
assert first_value == x[Timestamp(exp_ts)]
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
assert i == sl[sl.get_loc(i)]
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
assert idx[[]].identical(empty_idx)
assert idx[empty_iarr].identical(empty_idx)
assert idx[empty_barr].identical(empty_idx)
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
pytest.raises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
assert exp == arr[5]
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first)
assert inter is first
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
tm.assert_index_equal(result2, expected2)
assert result2.name == expected2.name
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
tm.assert_index_equal(result3, expected3)
assert result3.name == expected3.name
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
expected = Index([5, 3, 4], name='idx')
result = idx1.intersection(idx2)
tm.assert_index_equal(result, expected)
idx2 = Index([4, 7, 6, 5, 3], name='other')
expected = Index([5, 3, 4], name=None)
result = idx1.intersection(idx2)
tm.assert_index_equal(result, expected)
# non-monotonic non-unique
idx1 = Index(['A', 'B', 'A', 'C'])
idx2 = Index(['B', 'D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
tm.assert_index_equal(result, expected)
idx2 = Index(['B', 'D', 'A'])
expected = Index(['A', 'B', 'A'], dtype='object')
result = idx1.intersection(idx2)
tm.assert_index_equal(result, expected)
# preserve names
first = self.strIndex[5:20]
second = self.strIndex[:10]
first.name = 'A'
second.name = 'A'
intersect = first.intersection(second)
assert intersect.name == 'A'
second.name = 'B'
intersect = first.intersection(second)
assert intersect.name is None
first.name = None
second.name = 'B'
intersect = first.intersection(second)
assert intersect.name is None
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
assert tm.equalContents(result, everything)
# Corner cases
union = first.union(first)
assert union is first
union = first.union([])
assert union is first
union = Index([]).union(first)
assert union is first
# preserve names
first = Index(list('ab'), name='A')
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name=None)
tm.assert_index_equal(union, expected)
first = Index(list('ab'), name='A')
second = Index([], name='B')
union = first.union(second)
expected = Index(list('ab'), name=None)
tm.assert_index_equal(union, expected)
first = Index([], name='A')
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name=None)
tm.assert_index_equal(union, expected)
first = Index(list('ab'))
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name='B')
tm.assert_index_equal(union, expected)
first = Index([])
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name='B')
tm.assert_index_equal(union, expected)
first = Index(list('ab'))
second = Index([], name='B')
union = first.union(second)
expected = Index(list('ab'), name='B')
tm.assert_index_equal(union, expected)
first = Index(list('ab'), name='A')
second = Index(list('ab'))
union = first.union(second)
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
first = Index(list('ab'), name='A')
second = Index([])
union = first.union(second)
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
first = Index([], name='A')
second = Index(list('ab'))
union = first.union(second)
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
with tm.assert_produces_warning(RuntimeWarning):
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
assert tm.equalContents(firstCat, appended)
assert tm.equalContents(secondCat, self.strIndex)
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_add(self):
idx = self.strIndex
expected = Index(self.strIndex.values * 2)
tm.assert_index_equal(idx + idx, expected)
tm.assert_index_equal(idx + idx.tolist(), expected)
tm.assert_index_equal(idx.tolist() + idx, expected)
# test add and radd
idx = Index(list('abc'))
expected = Index(['a1', 'b1', 'c1'])
tm.assert_index_equal(idx + '1', expected)
expected = Index(['1a', '1b', '1c'])
tm.assert_index_equal('1' + idx, expected)
def test_sub(self):
idx = self.strIndex
pytest.raises(TypeError, lambda: idx - 'a')
pytest.raises(TypeError, lambda: idx - idx)
pytest.raises(TypeError, lambda: idx - idx.tolist())
pytest.raises(TypeError, lambda: idx.tolist() - idx)
def test_map_identity_mapping(self):
# GH 12766
for name, cur_index in self.indices.items():
tm.assert_index_equal(cur_index, cur_index.map(lambda x: x))
def test_map_with_tuples(self):
# GH 12766
# Test that returning a single tuple from an Index
# returns an Index.
boolean_index = tm.makeIntIndex(3).map(lambda x: (x,))
expected = Index([(0,), (1,), (2,)])
tm.assert_index_equal(boolean_index, expected)
# Test that returning a tuple from a map of a single index
# returns a MultiIndex object.
boolean_index = tm.makeIntIndex(3).map(lambda x: (x, x == 1))
expected = MultiIndex.from_tuples([(0, False), (1, True), (2, False)])
tm.assert_index_equal(boolean_index, expected)
# Test that returning a single object from a MultiIndex
# returns an Index.
first_level = ['foo', 'bar', 'baz']
multi_index = MultiIndex.from_tuples(lzip(first_level, [1, 2, 3]))
reduced_index = multi_index.map(lambda x: x[0])
tm.assert_index_equal(reduced_index, Index(first_level))
def test_map_tseries_indices_return_index(self):
date_index = tm.makeDateIndex(10)
exp = Index([1] * 10)
tm.assert_index_equal(exp, date_index.map(lambda x: 1))
period_index = tm.makePeriodIndex(10)
tm.assert_index_equal(exp, period_index.map(lambda x: 1))
tdelta_index = tm.makeTimedeltaIndex(10)
tm.assert_index_equal(exp, tdelta_index.map(lambda x: 1))
date_index = tm.makeDateIndex(24, freq='h', name='hourly')
exp = Index(range(24), name='hourly')
tm.assert_index_equal(exp, date_index.map(lambda x: x.hour))
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, index)
# empty
result = index.append([])
tm.assert_index_equal(result, index)
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
assert result.name == 'foo'
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
assert result.name is None
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
assert 'a' not in index2
assert 'afoo' in index2
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
assert 'a' in index
index += '_x'
assert 'a_x' in index
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
assert tm.equalContents(result, answer)
assert result.name is None
# same names
second.name = 'name'
result = first.difference(second)
assert result.name == 'name'
# with empty
result = first.difference([])
assert tm.equalContents(result, first)
assert result.name == first.name
# with everything
result = first.difference(first)
assert len(result) == 0
assert result.name == first.name
def test_symmetric_difference(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.symmetric_difference(idx2)
expected = Index([1, 5])
assert tm.equalContents(result, expected)
assert result.name is None
# __xor__ syntax
expected = idx1 ^ idx2
assert tm.equalContents(result, expected)
assert result.name is None
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.symmetric_difference(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
assert tm.equalContents(result, expected)
# nans:
# GH 13514 change: {nan} - {nan} == {}
# (GH 6444, sorting of nans, is no longer an issue)
idx1 = Index([1, np.nan, 2, 3])
idx2 = Index([0, 1, np.nan])
idx3 = Index([0, 1])
result = idx1.symmetric_difference(idx2)
expected = Index([0.0, 2.0, 3.0])
tm.assert_index_equal(result, expected)
result = idx1.symmetric_difference(idx3)
expected = Index([0.0, 2.0, 3.0, np.nan])
tm.assert_index_equal(result, expected)
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.symmetric_difference(idx2)
assert tm.equalContents(result, expected)
assert result.name == 'idx1'
result = idx1.symmetric_difference(idx2, result_name='new_name')
assert tm.equalContents(result, expected)
assert result.name == 'new_name'
def test_is_numeric(self):
assert not self.dateIndex.is_numeric()
assert not self.strIndex.is_numeric()
assert self.intIndex.is_numeric()
assert self.floatIndex.is_numeric()
assert not self.catIndex.is_numeric()
def test_is_object(self):
assert self.strIndex.is_object()
assert self.boolIndex.is_object()
assert not self.catIndex.is_object()
assert not self.intIndex.is_object()
assert not self.dateIndex.is_object()
assert not self.floatIndex.is_object()
def test_is_all_dates(self):
assert self.dateIndex.is_all_dates
assert not self.strIndex.is_all_dates
assert not self.intIndex.is_all_dates
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
assert '~:{range}:0' in result
assert '{other}%s' in result
def test_format(self):
self._check_method_works(Index.format)
# GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
# include us since the default for Timestamp shows these but Index
# formating does not we are skipping)
now = datetime.now()
if not str(now).endswith("000"):
index = Index([now])
formatted = index.format()
expected = [str(index[0])]
assert formatted == expected
# 2845
index = Index([1, 2.0 + 3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
assert formatted == expected
# is this really allowed?
index = Index([1, 2.0 + 3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
assert formatted == expected
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
assert formatted[0] == 'something'
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
assert len(result) == 2
assert result == expected
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
assert idx[3] is None
def test_logical_compat(self):
idx = self.create_index()
assert idx.all() == idx.values.all()
assert idx.any() == idx.values.any()
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
method(self.catIndex)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
def test_get_indexer_invalid(self):
# GH10411
idx = Index(np.arange(10))
with tm.assert_raises_regex(ValueError, 'tolerance argument'):
idx.get_indexer([1, 0], tolerance=1)
with tm.assert_raises_regex(ValueError, 'limit argument'):
idx.get_indexer([1, 0], limit=1)
def test_get_indexer_nearest(self):
idx = Index(np.arange(10))
all_methods = ['pad', 'backfill', 'nearest']
for method in all_methods:
actual = idx.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([0, 5, 9],
dtype=np.intp))
actual = idx.get_indexer([0, 5, 9], method=method, tolerance=0)
tm.assert_numpy_array_equal(actual, np.array([0, 5, 9],
dtype=np.intp))
for method, expected in zip(all_methods, [[0, 1, 8], [1, 2, 9],
[0, 2, 9]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method,
tolerance=1)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
for method, expected in zip(all_methods, [[0, -1, -1], [-1, 2, -1],
[0, 2, -1]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method,
tolerance=0.2)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
with tm.assert_raises_regex(ValueError, 'limit argument'):
idx.get_indexer([1, 0], method='nearest', limit=1)
def test_get_indexer_nearest_decreasing(self):
idx = Index(np.arange(10))[::-1]
all_methods = ['pad', 'backfill', 'nearest']
for method in all_methods:
actual = idx.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([9, 4, 0],
dtype=np.intp))
for method, expected in zip(all_methods, [[8, 7, 0], [9, 8, 1],
[9, 7, 0]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
def test_get_indexer_strings(self):
idx = pd.Index(['b', 'c'])
actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='pad')
expected = np.array([-1, 0, 1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(actual, expected)
actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='backfill')
expected = np.array([0, 0, 1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(actual, expected)
with pytest.raises(TypeError):
idx.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
with pytest.raises(TypeError):
idx.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
def test_get_loc(self):
idx = pd.Index([0, 1, 2])
all_methods = [None, 'pad', 'backfill', 'nearest']
for method in all_methods:
assert idx.get_loc(1, method=method) == 1
if method is not None:
assert idx.get_loc(1, method=method, tolerance=0) == 1
with pytest.raises(TypeError):
idx.get_loc([1, 2], method=method)
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
assert idx.get_loc(1.1, method) == loc
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
assert idx.get_loc(1.1, method, tolerance=1) == loc
for method in ['pad', 'backfill', 'nearest']:
with pytest.raises(KeyError):
idx.get_loc(1.1, method, tolerance=0.05)
with tm.assert_raises_regex(ValueError, 'must be numeric'):
idx.get_loc(1.1, 'nearest', tolerance='invalid')
with tm.assert_raises_regex(ValueError, 'tolerance .* valid if'):
idx.get_loc(1.1, tolerance=1)
idx = pd.Index(['a', 'c'])
with pytest.raises(TypeError):
idx.get_loc('a', method='nearest')
with pytest.raises(TypeError):
idx.get_loc('a', method='pad', tolerance='invalid')
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
assert idx.slice_locs(start=2) == (2, n)
assert idx.slice_locs(start=3) == (3, n)
assert idx.slice_locs(3, 8) == (3, 6)
assert idx.slice_locs(5, 10) == (3, n)
assert idx.slice_locs(end=8) == (0, 6)
assert idx.slice_locs(end=9) == (0, 7)
# reversed
idx2 = idx[::-1]
assert idx2.slice_locs(8, 2) == (2, 6)
assert idx2.slice_locs(7, 3) == (2, 5)
# float slicing
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=float))
n = len(idx)
assert idx.slice_locs(5.0, 10.0) == (3, n)
assert idx.slice_locs(4.5, 10.5) == (3, 8)
idx2 = idx[::-1]
assert idx2.slice_locs(8.5, 1.5) == (2, 6)
assert idx2.slice_locs(10.5, -1) == (0, n)
# int slicing with floats
# GH 4892, these are all TypeErrors
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=int))
pytest.raises(TypeError,
lambda: idx.slice_locs(5.0, 10.0), (3, n))
pytest.raises(TypeError,
lambda: idx.slice_locs(4.5, 10.5), (3, 8))
idx2 = idx[::-1]
pytest.raises(TypeError,
lambda: idx2.slice_locs(8.5, 1.5), (2, 6))
pytest.raises(TypeError,
lambda: idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
assert idx.slice_locs('a', 'd') == (0, 6)
assert idx.slice_locs(end='d') == (0, 6)
assert idx.slice_locs('a', 'c') == (0, 4)
assert idx.slice_locs('b', 'd') == (2, 6)
idx2 = idx[::-1]
assert idx2.slice_locs('d', 'a') == (0, 6)
assert idx2.slice_locs(end='a') == (0, 6)
assert idx2.slice_locs('d', 'b') == (0, 4)
assert idx2.slice_locs('c', 'a') == (2, 6)
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
assert idx.slice_locs(12, 12) == (1, 3)
assert idx.slice_locs(11, 13) == (1, 3)
idx2 = idx[::-1]
assert idx2.slice_locs(12, 12) == (1, 3)
assert idx2.slice_locs(13, 11) == (1, 3)
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
pytest.raises(KeyError, idx.slice_locs, start=1.5)
pytest.raises(KeyError, idx.slice_locs, end=1.5)
assert idx.slice_locs(1) == (1, 3)
assert idx.slice_locs(np.nan) == (0, 3)
idx = Index([0, np.nan, np.nan, 1, 2])
assert idx.slice_locs(np.nan) == (1, 5)
def test_slice_locs_negative_step(self):
idx = Index(list('bcdxy'))
SLC = pd.IndexSlice
def check_slice(in_slice, expected):
s_start, s_stop = idx.slice_locs(in_slice.start, in_slice.stop,
in_slice.step)
result = idx[s_start:s_stop:in_slice.step]
expected = pd.Index(list(expected))
tm.assert_index_equal(result, expected)
for in_slice, expected in [
(SLC[::-1], 'yxdcb'), (SLC['b':'y':-1], ''),
(SLC['b'::-1], 'b'), (SLC[:'b':-1], 'yxdcb'),
(SLC[:'y':-1], 'y'), (SLC['y'::-1], 'yxdcb'),
(SLC['y'::-4], 'yb'),
# absent labels
(SLC[:'a':-1], 'yxdcb'), (SLC[:'a':-2], 'ydb'),
(SLC['z'::-1], 'yxdcb'), (SLC['z'::-3], 'yc'),
(SLC['m'::-1], 'dcb'), (SLC[:'m':-1], 'yx'),
(SLC['a':'a':-1], ''), (SLC['z':'z':-1], ''),
(SLC['m':'m':-1], '')
]:
check_slice(in_slice, expected)
def test_drop(self):
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
dropped = self.strIndex.drop(drop)
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
pytest.raises(ValueError, self.strIndex.drop, ['foo', 'bar'])
pytest.raises(ValueError, self.strIndex.drop, ['1', 'bar'])
# errors='ignore'
mixed = drop.tolist() + ['foo']
dropped = self.strIndex.drop(mixed, errors='ignore')
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore')
expected = self.strIndex[lrange(n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
tm.assert_index_equal(dropped, expected)
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
tm.assert_index_equal(dropped, expected)
# errors='ignore'
pytest.raises(ValueError, ser.drop, [3, 4])
dropped = ser.drop(4, errors='ignore')
expected = Index([1, 2, 3])
tm.assert_index_equal(dropped, expected)
dropped = ser.drop([3, 4, 5], errors='ignore')
expected = Index([1, 2])
tm.assert_index_equal(dropped, expected)
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'),
(2, 'B'), (1, 'C'), (2, 'C')],
dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
assert int_idx.ndim == 1
tm.assert_index_equal(int_idx, expected)
# union broken
union_idx = idx1.union(idx2)
expected = idx2
assert union_idx.ndim == 1
tm.assert_index_equal(union_idx, expected)
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
assert not index.is_monotonic_increasing
assert not index.is_monotonic_decreasing
assert not index._is_strictly_monotonic_increasing
assert not index._is_strictly_monotonic_decreasing
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date), values[67])
self.dateIndex.set_value(values, date, 10)
assert values[67] == 10
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# set
result = idx.isin(set(values))
tm.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
assert len(result) == 0
assert result.dtype == np.bool_
def test_isin_nan(self):
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([np.nan]),
np.array([False, True]))
tm.assert_numpy_array_equal(Index(['a', pd.NaT]).isin([pd.NaT]),
np.array([False, True]))
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]),
np.array([False, False]))
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([pd.NaT]),
np.array([False, False]))
# Float64Index overrides isin, so must be checked separately
tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([np.nan]),
np.array([False, True]))
tm.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]),
np.array([False, True]))
# we cannot compare NaT with NaN
tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([pd.NaT]),
np.array([False, False]))
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, idx.isin(values, level=0))
tm.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
pytest.raises(IndexError, idx.isin, values, level=1)
pytest.raises(IndexError, idx.isin, values, level=10)
pytest.raises(IndexError, idx.isin, values, level=-2)
pytest.raises(KeyError, idx.isin, values, level=1.0)
pytest.raises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
tm.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
pytest.raises(KeyError, idx.isin, values, level='xyzzy')
pytest.raises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
idx = Index(["a", "b"])
expected = np.array([False, False])
result = idx.isin(empty)
tm.assert_numpy_array_equal(expected, result)
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
tm.assert_numpy_array_equal(res, np.array(
[True, True, True, True], dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
tm.assert_index_equal(result, self.strIndex)
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
assert idx.name == idx[1:].name
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
assert res is joined
def test_str_attribute(self):
# GH9068
methods = ['strip', 'rstrip', 'lstrip']
idx = Index([' jack', 'jill ', ' jesse ', 'frank'])
for method in methods:
expected = Index([getattr(str, method)(x) for x in idx.values])
tm.assert_index_equal(
getattr(Index.str, method)(idx.str), expected)
# create a few instances that are not able to use .str accessor
indices = [Index(range(5)), tm.makeDateIndex(10),
MultiIndex.from_tuples([('foo', '1'), ('bar', '3')]),
PeriodIndex(start='2000', end='2010', freq='A')]
for idx in indices:
with tm.assert_raises_regex(AttributeError,
'only use .str accessor'):
idx.str.repeat(2)
idx = Index(['a b c', 'd e', 'f'])
expected = Index([['a', 'b', 'c'], ['d', 'e'], ['f']])
tm.assert_index_equal(idx.str.split(), expected)
tm.assert_index_equal(idx.str.split(expand=False), expected)
expected = MultiIndex.from_tuples([('a', 'b', 'c'), ('d', 'e', np.nan),
('f', np.nan, np.nan)])
tm.assert_index_equal(idx.str.split(expand=True), expected)
# test boolean case, should return np.array instead of boolean Index
idx = Index(['a1', 'a2', 'b1', 'b2'])
expected = np.array([True, True, False, False])
tm.assert_numpy_array_equal(idx.str.startswith('a'), expected)
assert isinstance(idx.str.startswith('a'), np.ndarray)
s = Series(range(4), index=idx)
expected = Series(range(2), index=['a1', 'a2'])
tm.assert_series_equal(s[s.index.str.startswith('a')], expected)
def test_tab_completion(self):
# GH 9910
idx = Index(list('abcd'))
assert 'str' in dir(idx)
idx = Index(range(4))
assert 'str' not in dir(idx)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
assert idx[1:3].identical(pd.Index([2, 3], dtype=np.object_))
assert idx[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
with tm.assert_produces_warning(RuntimeWarning):
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_take_fill_value(self):
# GH 12631
idx = pd.Index(list('ABC'), name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.Index(list('BAC'), name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.Index(['B', 'A', np.nan], name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.Index(['B', 'A', 'C'], name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_reshape_raise(self):
msg = "reshaping is not supported"
idx = pd.Index([0, 1, 2])
tm.assert_raises_regex(NotImplementedError, msg,
idx.reshape, idx.shape)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
assert idx.reindex([])[0].name is None
assert idx.reindex(np.array([]))[0].name is None
assert idx.reindex(idx.tolist())[0].name is None
assert idx.reindex(idx.tolist()[:-1])[0].name is None
assert idx.reindex(idx.values)[0].name is None
assert idx.reindex(idx.values[:-1])[0].name is None
# Must preserve name even if dtype changes.
assert idx.reindex(dt_idx.values)[0].name is None
assert idx.reindex(dt_idx.tolist())[0].name is None
idx.name = 'foobar'
assert idx.reindex([])[0].name == 'foobar'
assert idx.reindex(np.array([]))[0].name == 'foobar'
assert idx.reindex(idx.tolist())[0].name == 'foobar'
assert idx.reindex(idx.tolist()[:-1])[0].name == 'foobar'
assert idx.reindex(idx.values)[0].name == 'foobar'
assert idx.reindex(idx.values[:-1])[0].name == 'foobar'
# Must preserve name even if dtype changes.
assert idx.reindex(dt_idx.values)[0].name == 'foobar'
assert idx.reindex(dt_idx.tolist())[0].name == 'foobar'
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
assert get_reindex_type([]) == np.object_
assert get_reindex_type(np.array([])) == np.object_
assert get_reindex_type(np.array([], dtype=np.int64)) == np.object_
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
assert get_reindex_type(pd.Int64Index([])) == np.int64
assert get_reindex_type(pd.Float64Index([])) == np.float64
assert get_reindex_type(pd.DatetimeIndex([])) == np.datetime64
reindexed = idx.reindex(pd.MultiIndex(
[pd.Int64Index([]), pd.Float64Index([])], [[], []]))[0]
assert reindexed.levels[0].dtype.type == np.int64
assert reindexed.levels[1].dtype.type == np.float64
def test_groupby(self):
idx = Index(range(5))
groups = idx.groupby(np.array([1, 1, 2, 2, 2]))
exp = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])}
tm.assert_dict_equal(groups, exp)
def test_equals_op_multiindex(self):
# GH9785
# test comparisons of multiindex
from pandas.compat import StringIO
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
tm.assert_numpy_array_equal(df.index == df.index,
np.array([True, True]))
mi1 = MultiIndex.from_tuples([(1, 2), (4, 5)])
tm.assert_numpy_array_equal(df.index == mi1, np.array([True, True]))
mi2 = MultiIndex.from_tuples([(1, 2), (4, 6)])
tm.assert_numpy_array_equal(df.index == mi2, np.array([True, False]))
mi3 = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
with tm.assert_raises_regex(ValueError, "Lengths must match"):
df.index == mi3
index_a = Index(['foo', 'bar', 'baz'])
with tm.assert_raises_regex(ValueError, "Lengths must match"):
df.index == index_a
tm.assert_numpy_array_equal(index_a == mi3,
np.array([False, False, False]))
def test_conversion_preserves_name(self):
# GH 10875
i = pd.Index(['01:02:03', '01:02:04'], name='label')
assert i.name == pd.to_datetime(i).name
assert i.name == pd.to_timedelta(i).name
def test_string_index_repr(self):
# py3/py2 repr can differ because of "u" prefix
# which also affects to displayed element size
if PY3:
coerce = lambda x: x
else:
coerce = unicode # noqa
# short
idx = pd.Index(['a', 'bb', 'ccc'])
if PY3:
expected = u"""Index(['a', 'bb', 'ccc'], dtype='object')"""
assert repr(idx) == expected
else:
expected = u"""Index([u'a', u'bb', u'ccc'], dtype='object')"""
assert coerce(idx) == expected
# multiple lines
idx = pd.Index(['a', 'bb', 'ccc'] * 10)
if PY3:
expected = u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object')"""
assert repr(idx) == expected
else:
expected = u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object')"""
assert coerce(idx) == expected
# truncated
idx = pd.Index(['a', 'bb', 'ccc'] * 100)
if PY3:
expected = u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object', length=300)"""
assert repr(idx) == expected
else:
expected = u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object', length=300)"""
assert coerce(idx) == expected
# short
idx = pd.Index([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""Index(['あ', 'いい', 'ううう'], dtype='object')"""
assert repr(idx) == expected
else:
expected = u"""Index([u'あ', u'いい', u'ううう'], dtype='object')"""
assert coerce(idx) == expected
# multiple lines
idx = pd.Index([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう'],\n"
u" dtype='object')")
assert repr(idx) == expected
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")
assert coerce(idx) == expected
# truncated
idx = pd.Index([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう'],\n"
u" dtype='object', length=300)")
assert repr(idx) == expected
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object', length=300)")
assert coerce(idx) == expected
# Emable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
# short
idx = pd.Index([u'あ', u'いい', u'ううう'])
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう'], "
u"dtype='object')")
assert repr(idx) == expected
else:
expected = (u"Index([u'あ', u'いい', u'ううう'], "
u"dtype='object')")
assert coerce(idx) == expected
# multiple lines
idx = pd.Index([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう'],\n"
u" dtype='object')""")
assert repr(idx) == expected
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")
assert coerce(idx) == expected
# truncated
idx = pd.Index([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', "
u"'いい', 'ううう', 'あ', 'いい',\n"
u" 'ううう'],\n"
u" dtype='object', length=300)")
assert repr(idx) == expected
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう'],\n"
u" dtype='object', length=300)")
assert coerce(idx) == expected
@pytest.mark.parametrize('dtype', [np.int64, np.float64])
@pytest.mark.parametrize('delta', [1, 0, -1])
def test_addsub_arithmetic(self, dtype, delta):
# GH 8142
delta = dtype(delta)
idx = pd.Index([10, 11, 12], dtype=dtype)
result = idx + delta
expected = pd.Index(idx.values + delta, dtype=dtype)
tm.assert_index_equal(result, expected)
# this subtraction used to fail
result = idx - delta
expected = pd.Index(idx.values - delta, dtype=dtype)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(idx + idx, 2 * idx)
tm.assert_index_equal(idx - idx, 0 * idx)
assert not (idx - idx).empty
class TestMixedIntIndex(Base):
# Mostly the tests from common.py for which the results differ
# in py2 and py3 because ints and strings are uncomparable in py3
# (GH 13514)
_holder = Index
def setup_method(self, method):
self.indices = dict(mixedIndex=Index([0, 'a', 1, 'b', 2, 'c']))
self.setup_indices()
def create_index(self):
return self.mixedIndex
def test_argsort(self):
idx = self.create_index()
if PY36:
with tm.assert_raises_regex(TypeError, "'>|<' not supported"):
result = idx.argsort()
elif PY3:
with tm.assert_raises_regex(TypeError, "unorderable types"):
result = idx.argsort()
else:
result = idx.argsort()
expected = np.array(idx).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
idx = self.create_index()
if PY36:
with tm.assert_raises_regex(TypeError, "'>|<' not supported"):
result = np.argsort(idx)
elif PY3:
with tm.assert_raises_regex(TypeError, "unorderable types"):
result = np.argsort(idx)
else:
result = np.argsort(idx)
expected = idx.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_copy_name(self):
# Check that "name" argument passed at initialization is honoured
# GH12309
idx = self.create_index()
first = idx.__class__(idx, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ:
assert idx.equals(first)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
warning_type = RuntimeWarning if PY3 else None
with tm.assert_produces_warning(warning_type):
# Python 3: Unorderable types
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_copy_name2(self):
# Check that adding a "name" parameter to the copy is honored
# GH14302
idx = pd.Index([1, 2], name='MyName')
idx1 = idx.copy()
assert idx.equals(idx1)
assert idx.name == 'MyName'
assert idx1.name == 'MyName'
idx2 = idx.copy(name='NewName')
assert idx.equals(idx2)
assert idx.name == 'MyName'
assert idx2.name == 'NewName'
idx3 = idx.copy(names=['NewName'])
assert idx.equals(idx3)
assert idx.name == 'MyName'
assert idx.names == ['MyName']
assert idx3.name == 'NewName'
assert idx3.names == ['NewName']
def test_union_base(self):
idx = self.create_index()
first = idx[3:]
second = idx[:5]
if PY3:
with tm.assert_produces_warning(RuntimeWarning):
# unorderable types
result = first.union(second)
expected = Index(['b', 2, 'c', 0, 'a', 1])
tm.assert_index_equal(result, expected)
else:
result = first.union(second)
expected = Index(['b', 2, 'c', 0, 'a', 1])
tm.assert_index_equal(result, expected)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if PY3:
with tm.assert_produces_warning(RuntimeWarning):
# unorderable types
result = first.union(case)
assert tm.equalContents(result, idx)
else:
result = first.union(case)
assert tm.equalContents(result, idx)
def test_intersection_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
idx = self.create_index()
first = idx[:5]
second = idx[:3]
result = first.intersection(second)
expected = Index([0, 'a', 1])
tm.assert_index_equal(result, expected)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
assert tm.equalContents(result, second)
def test_difference_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
idx = self.create_index()
first = idx[:4]
second = idx[3:]
result = first.difference(second)
expected = Index([0, 1, 'a'])
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
idx = self.create_index()
first = idx[:4]
second = idx[3:]
result = first.symmetric_difference(second)
expected = Index([0, 1, 2, 'a', 'c'])
tm.assert_index_equal(result, expected)
def test_logical_compat(self):
idx = self.create_index()
assert idx.all() == idx.values.all()
assert idx.any() == idx.values.any()
def test_dropna(self):
# GH 6194
for dtype in [None, object, 'category']:
idx = pd.Index([1, 2, 3], dtype=dtype)
tm.assert_index_equal(idx.dropna(), idx)
idx = pd.Index([1., 2., 3.], dtype=dtype)
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.Index([1., 2., np.nan, 3.], dtype=dtype)
tm.assert_index_equal(nanidx.dropna(), idx)
idx = pd.Index(['A', 'B', 'C'], dtype=dtype)
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.Index(['A', np.nan, 'B', 'C'], dtype=dtype)
tm.assert_index_equal(nanidx.dropna(), idx)
tm.assert_index_equal(nanidx.dropna(how='any'), idx)
tm.assert_index_equal(nanidx.dropna(how='all'), idx)
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03', pd.NaT])
tm.assert_index_equal(nanidx.dropna(), idx)
idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days'])
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.TimedeltaIndex([pd.NaT, '1 days', '2 days',
'3 days', pd.NaT])
tm.assert_index_equal(nanidx.dropna(), idx)
idx = pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M')
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'],
freq='M')
tm.assert_index_equal(nanidx.dropna(), idx)
msg = "invalid how option: xxx"
with tm.assert_raises_regex(ValueError, msg):
pd.Index([1, 2, 3]).dropna(how='xxx')
def test_get_combined_index(self):
result = _get_combined_index([])
tm.assert_index_equal(result, Index([]))
def test_repeat(self):
repeats = 2
idx = pd.Index([1, 2, 3])
expected = pd.Index([1, 1, 2, 2, 3, 3])
result = idx.repeat(repeats)
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = idx.repeat(n=repeats)
tm.assert_index_equal(result, expected)
def test_is_monotonic_na(self):
examples = [pd.Index([np.nan]),
pd.Index([np.nan, 1]),
pd.Index([1, 2, np.nan]),
pd.Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']), ]
for index in examples:
assert not index.is_monotonic_increasing
assert not index.is_monotonic_decreasing
assert not index._is_strictly_monotonic_increasing
assert not index._is_strictly_monotonic_decreasing
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
assert len(r) < 200
assert "..." in r
def test_int_name_format(self):
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame({u("\u05d0"): [1, 2, 3],
"\u05d1": [4, 5, 6],
"c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if PY3:
str(idx)
else:
text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if PY3:
bytes(idx)
else:
str(idx)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
assert len(res) == 0
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
blackwhitehere/data-science-from-scratch | code/clustering.py | 60 | 6438 | from __future__ import division
from linear_algebra import squared_distance, vector_mean, distance
import math, random
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
class KMeans:
"""performs k-means clustering"""
def __init__(self, k):
self.k = k # number of clusters
self.means = None # means of clusters
def classify(self, input):
"""return the index of the cluster closest to the input"""
return min(range(self.k),
key=lambda i: squared_distance(input, self.means[i]))
def train(self, inputs):
self.means = random.sample(inputs, self.k)
assignments = None
while True:
# Find new assignments
new_assignments = map(self.classify, inputs)
# If no assignments have changed, we're done.
if assignments == new_assignments:
return
# Otherwise keep the new assignments,
assignments = new_assignments
for i in range(self.k):
i_points = [p for p, a in zip(inputs, assignments) if a == i]
# avoid divide-by-zero if i_points is empty
if i_points:
self.means[i] = vector_mean(i_points)
def squared_clustering_errors(inputs, k):
"""finds the total squared error from k-means clustering the inputs"""
clusterer = KMeans(k)
clusterer.train(inputs)
means = clusterer.means
assignments = map(clusterer.classify, inputs)
return sum(squared_distance(input,means[cluster])
for input, cluster in zip(inputs, assignments))
def plot_squared_clustering_errors(plt):
ks = range(1, len(inputs) + 1)
errors = [squared_clustering_errors(inputs, k) for k in ks]
plt.plot(ks, errors)
plt.xticks(ks)
plt.xlabel("k")
plt.ylabel("total squared error")
plt.show()
#
# using clustering to recolor an image
#
def recolor_image(input_file, k=5):
img = mpimg.imread(path_to_png_file)
pixels = [pixel for row in img for pixel in row]
clusterer = KMeans(k)
clusterer.train(pixels) # this might take a while
def recolor(pixel):
cluster = clusterer.classify(pixel) # index of the closest cluster
return clusterer.means[cluster] # mean of the closest cluster
new_img = [[recolor(pixel) for pixel in row]
for row in img]
plt.imshow(new_img)
plt.axis('off')
plt.show()
#
# hierarchical clustering
#
def is_leaf(cluster):
"""a cluster is a leaf if it has length 1"""
return len(cluster) == 1
def get_children(cluster):
"""returns the two children of this cluster if it's a merged cluster;
raises an exception if this is a leaf cluster"""
if is_leaf(cluster):
raise TypeError("a leaf cluster has no children")
else:
return cluster[1]
def get_values(cluster):
"""returns the value in this cluster (if it's a leaf cluster)
or all the values in the leaf clusters below it (if it's not)"""
if is_leaf(cluster):
return cluster # is already a 1-tuple containing value
else:
return [value
for child in get_children(cluster)
for value in get_values(child)]
def cluster_distance(cluster1, cluster2, distance_agg=min):
"""finds the aggregate distance between elements of cluster1
and elements of cluster2"""
return distance_agg([distance(input1, input2)
for input1 in get_values(cluster1)
for input2 in get_values(cluster2)])
def get_merge_order(cluster):
if is_leaf(cluster):
return float('inf')
else:
return cluster[0] # merge_order is first element of 2-tuple
def bottom_up_cluster(inputs, distance_agg=min):
# start with every input a leaf cluster / 1-tuple
clusters = [(input,) for input in inputs]
# as long as we have more than one cluster left...
while len(clusters) > 1:
# find the two closest clusters
c1, c2 = min([(cluster1, cluster2)
for i, cluster1 in enumerate(clusters)
for cluster2 in clusters[:i]],
key=lambda (x, y): cluster_distance(x, y, distance_agg))
# remove them from the list of clusters
clusters = [c for c in clusters if c != c1 and c != c2]
# merge them, using merge_order = # of clusters left
merged_cluster = (len(clusters), [c1, c2])
# and add their merge
clusters.append(merged_cluster)
# when there's only one cluster left, return it
return clusters[0]
def generate_clusters(base_cluster, num_clusters):
# start with a list with just the base cluster
clusters = [base_cluster]
# as long as we don't have enough clusters yet...
while len(clusters) < num_clusters:
# choose the last-merged of our clusters
next_cluster = min(clusters, key=get_merge_order)
# remove it from the list
clusters = [c for c in clusters if c != next_cluster]
# and add its children to the list (i.e., unmerge it)
clusters.extend(get_children(next_cluster))
# once we have enough clusters...
return clusters
if __name__ == "__main__":
inputs = [[-14,-5],[13,13],[20,23],[-19,-11],[-9,-16],[21,27],[-49,15],[26,13],[-46,5],[-34,-1],[11,15],[-49,0],[-22,-16],[19,28],[-12,-8],[-13,-19],[-41,8],[-11,-6],[-25,-9],[-18,-3]]
random.seed(0) # so you get the same results as me
clusterer = KMeans(3)
clusterer.train(inputs)
print "3-means:"
print clusterer.means
print
random.seed(0)
clusterer = KMeans(2)
clusterer.train(inputs)
print "2-means:"
print clusterer.means
print
print "errors as a function of k"
for k in range(1, len(inputs) + 1):
print k, squared_clustering_errors(inputs, k)
print
print "bottom up hierarchical clustering"
base_cluster = bottom_up_cluster(inputs)
print base_cluster
print
print "three clusters, min:"
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
print
print "three clusters, max:"
base_cluster = bottom_up_cluster(inputs, max)
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
| unlicense |
iulian787/spack | var/spack/repos/builtin/packages/py-elephant/package.py | 5 | 1416 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyElephant(PythonPackage):
"""Elephant is a package for analysis of electrophysiology data in Python
"""
homepage = "http://neuralensemble.org/elephant"
url = "https://pypi.io/packages/source/e/elephant/elephant-0.3.0.tar.gz"
version('0.4.1', sha256='86b21a44cbacdc09a6ba6f51738dcd5b42ecd553d73acb29f71a0be7c82eac81')
version('0.3.0', sha256='747251ccfb5820bdead6391411b5faf205b4ddf3ababaefe865f50b16540cfef')
variant('doc', default=False, description='Build the documentation')
variant('pandas', default=True, description='Build with pandas')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run')) # > 0.3.3 ?
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='+pandas')
depends_on('[email protected]:', type=('build', 'run'), when='+docs')
depends_on('[email protected]:', type=('build', 'run'), when='+docs')
depends_on('[email protected]:', type='test')
| lgpl-2.1 |
janpascal/denyhosts_sync | setup.py | 1 | 1855 | #!/usr/bin/env python
from setuptools import setup
from glob import glob
from denyhosts_server import version
etcpath = "/etc"
setup(name='denyhosts-server',
version=version,
description='DenyHosts Synchronisation Server',
author='Jan-Pascal van Best',
author_email='[email protected]',
url='https://github.com/janpascal/denyhosts_sync',
packages=['denyhosts_server'],
install_requires=["Twisted", "twistar", "ipaddr", "jinja2", "numpy", "matplotlib", "GeoIP", "minify", "libnacl"],
scripts=['scripts/denyhosts-server'],
data_files=[
('static/js', glob('static/js/*.min.js')),
('static/css', glob('static/css/*.min.css')),
('static/graph', glob('static/graph/README')),
('template', glob('template/*')),
('docs', [
'README.md',
'LICENSE.md',
'changelog.txt',
'denyhosts-server.conf.example',
'denyhosts-server.service.example',
'denyhosts-server.init.example'
]
),
],
license="""
Copyright (C) 2015-2017 Jan-Pascal van Best <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
)
| agpl-3.0 |
elkingtonmcb/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
houghb/lignet | lignet/learning_curve_full_net.py | 1 | 2200 | """
Make a learning curve for the full neural net trained on all 30 output
measures. The point of this graph is to investigate how much training data
is needed to achieve various MSE values.
"""
import matplotlib.pyplot as plt
import numpy as np
import cPickle as pickle
import lasagne
from lasagne import layers
from lasagne import nonlinearities
from lasagne.nonlinearities import ScaledTanH
from nolearn.lasagne import NeuralNet, TrainSplit
from sklearn.learning_curve import learning_curve
from lignet_utils import gen_train_test
x_train, x_test, y_train, y_test, x_scaler, y_scaler = gen_train_test()
# set up the Scaled tanh parameters. See nonlinearities.py for usage notes.
# I am following the guidance of LeCun et al. for these values
scaled_tanh = ScaledTanH(scale_in=2./3, scale_out=1.7159)
# Make a learning curve to find out how much training data to use
train_size = int(1 * x_train.shape[0])
xt = x_train[:train_size, :]
yt = y_train[:train_size, :]
train_sizes, train_scores, valid_scores = learning_curve(
NeuralNet(
layers=[
('input', layers.InputLayer),
('hidden0', layers.DenseLayer),
('hidden1', layers.DenseLayer),
('output', layers.DenseLayer)
],
input_shape=(None, x_train.shape[1]),
hidden0_num_units=18,
hidden0_nonlinearity=scaled_tanh,
hidden1_num_units=20,
hidden1_nonlinearity=scaled_tanh,
output_num_units=y_train.shape[1],
output_nonlinearity=nonlinearities.linear,
regression=True,
verbose=1,
max_epochs=4000,
update=lasagne.updates.adagrad,
train_split=TrainSplit(eval_size=0.3),
),
xt, yt,
train_sizes=[500, 1500, 5000, 15000, 35000, 75000, 133333],
scoring='mean_squared_error')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
valid_scores_mean = np.mean(valid_scores, axis=1)
valid_scores_std = np.std(valid_scores, axis=1)
with open('learning_curve.pkl', 'wb') as pkl:
pickle.dump([train_scores_mean, train_scores_std,
valid_scores_mean, valid_scores_std,
train_sizes], pkl)
| bsd-2-clause |
fabioticconi/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/HardContact_ElPPlShear/Normal_Load/Sigma_n_1e3/Normal_Stress_Plot.py | 72 | 2800 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig('Normal_Stress.pdf', bbox_inches='tight')
# plt.show()
| cc0-1.0 |
cbuntain/UMD_HCIL_TREC2015 | src/main/python/topicFilter/scenario_b_type_b.py | 1 | 3325 | #!/usr/bin/python
import codecs
import json
import re
import sys
import time
from nltk.stem import WordNetLemmatizer
import pandas as pd
minKeywordCount = 1
maxPerDay = 100
if ( len(sys.argv) < 5 ):
print "Usage: %s <trec_topics.json> <sparkTrecOutput.csv> <output_file.csv> <runtag>" % (sys.argv[0])
exit(1)
topicsFilePath = sys.argv[1]
sparkCsvFilePath = sys.argv[2]
outputPath = sys.argv[3]
runtag = sys.argv[4]
topicsJsonObj = None
with codecs.open(topicsFilePath, "r", "utf-8") as f:
topicsJsonObj = json.load(f)
wordToTopicMap = {}
topicTimeMap = {}
for topic in topicsJsonObj:
topicTitle = topic["title"]
topicNum = topic["num"]
tokens = topic["tokens"]
for token in tokens:
if ( token not in wordToTopicMap ):
wordToTopicMap[token] = [(topicNum,topicTitle)]
else:
wordToTopicMap[token].append((topicNum,topicTitle))
topicTimeMap[topicNum] = {}
wnl = WordNetLemmatizer()
specCharRegex = re.compile(r"[^a-zA-Z0-9\\s]")
outputRows = []
with codecs.open(sparkCsvFilePath, "r", "utf-8") as f:
df = pd.read_csv(sparkCsvFilePath, header=None)
for (id, row) in df.iterrows():
topicNums = row[0]
captureTime = row[1]
tweetId = row[2]
tweetText = row[3]
gmTime = time.gmtime(captureTime)
timeTuple = (gmTime.tm_year, gmTime.tm_mon, gmTime.tm_mday)
timeStr = "%04d%02d%02d" % (gmTime.tm_year, gmTime.tm_mon, gmTime.tm_mday)
cleanTokens = specCharRegex.sub(" ", tweetText.lower(), count=0)
tokens = set([wnl.lemmatize(x) for x in cleanTokens.split(" ")])
localTopicCountMap = {}
localTopics = []
for token in tokens:
if ( token in wordToTopicMap ):
for x in wordToTopicMap[token]:
thisTopicNum = x[0]
if ( thisTopicNum not in localTopicCountMap ):
localTopics.append(x)
localTopicCountMap[thisTopicNum] = 1
else:
localTopicCountMap[thisTopicNum] += 1
for localTopic in localTopics:
if ( localTopicCountMap[localTopic[0]] < minKeywordCount ):
continue
if ( timeTuple in topicTimeMap[localTopic[0]] and len(topicTimeMap[localTopic[0]][timeTuple]) >= maxPerDay ):
continue
if ( timeTuple not in topicTimeMap[localTopic[0]] ):
topicTimeMap[localTopic[0]][timeTuple] = [tweetId]
else:
topicTimeMap[localTopic[0]][timeTuple].append(tweetId)
item = {
"topic":localTopic[0],
"title": localTopic[1],
"time":captureTime,
"date":timeStr,
"id":tweetId,
"text":tweetText,
"runtag":runtag,
"q0":"Q0",
"rank": 1,
"score": 1.0,
}
outputRows.append(item)
outputDf = pd.DataFrame(outputRows)
# YYYYMMDD topic_id Q0 tweet_id rank score runtag
# outputDf.to_csv(outputPath, columns=["topic", "title", "time", "date", "id", "text"], index=False)
outputDf.to_csv(outputPath, columns=["date", "topic", "q0", "id", "rank", "score", "runtag"], index=False, sep="\t")
| apache-2.0 |
bundgus/python-playground | matplotlib-playground/examples/api/demo_affine_image.py | 1 | 1698 | #!/usr/bin/env python
"""
For the backends that supports draw_image with optional affine
transform (e.g., agg, ps backend), the image of the output should
have its boundary matches the red rectangles.
"""
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
def get_image():
delta = 0.25
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = Z2 - Z1 # difference of Gaussians
return Z
def imshow_affine(ax, z, *kl, **kwargs):
im = ax.imshow(z, *kl, **kwargs)
x1, x2, y1, y2 = im.get_extent()
im._image_skew_coordinate = (x2, y1)
return im
if 1:
# image rotation
fig, (ax1, ax2) = plt.subplots(1, 2)
Z = get_image()
im1 = imshow_affine(ax1, Z, interpolation='none', cmap=cm.jet,
origin='lower',
extent=[-2, 4, -3, 2], clip_on=True)
trans_data2 = mtransforms.Affine2D().rotate_deg(30) + ax1.transData
im1.set_transform(trans_data2)
# display intended extent of the image
x1, x2, y1, y2 = im1.get_extent()
x3, y3 = x2, y1
ax1.plot([x1, x2, x2, x1, x1], [y1, y1, y2, y2, y1], "r--", lw=3,
transform=trans_data2)
ax1.set_xlim(-3, 5)
ax1.set_ylim(-4, 4)
# image skew
im2 = ax2.imshow(Z, interpolation='none', cmap=cm.jet,
origin='lower',
extent=[-2, 4, -3, 2], clip_on=True)
im2._image_skew_coordinate = (3, -2)
plt.show()
#plt.savefig("demo_affine_image")
| mit |
yask123/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
restudToolbox/package | respy/simulate.py | 1 | 1980 | import pandas as pd
import os
from respy.python.shared.shared_auxiliary import replace_missing_values
from respy.python.shared.shared_auxiliary import dist_class_attributes
from respy.python.shared.shared_auxiliary import add_solution
from respy.python.simulate.simulate_auxiliary import write_info
from respy.python.simulate.simulate_auxiliary import write_out
from respy.python.shared.shared_auxiliary import check_dataset
from respy.fortran.interface import resfort_interface
from respy.python.interface import respy_interface
def simulate(respy_obj):
""" Simulate dataset of synthetic agent following the model specified in
the initialization file.
"""
# Cleanup
for fname in ['sim.respy.log', 'sol.respy.log']:
if os.path.exists(fname):
os.unlink(fname)
# Distribute class attributes
is_debug, version, num_agents_sim, is_store = \
dist_class_attributes(respy_obj, 'is_debug', 'version',
'num_agents_sim', 'is_store')
# Select appropriate interface
if version in ['PYTHON']:
solution, data_array = respy_interface(respy_obj, 'simulate')
elif version in ['FORTRAN']:
solution, data_array = resfort_interface(respy_obj, 'simulate')
else:
raise NotImplementedError
# Attach solution to class instance
respy_obj = add_solution(respy_obj, *solution)
respy_obj.unlock()
respy_obj.set_attr('is_solved', True)
respy_obj.lock()
# Store object to file
if is_store:
respy_obj.store('solution.respy.pkl')
# Create pandas data frame with missing values.
data_frame = pd.DataFrame(replace_missing_values(data_array))
# Wrapping up by running some checks on the dataset and then writing out
# the file and some basic information.
if is_debug:
check_dataset(data_frame, respy_obj, 'sim')
write_out(respy_obj, data_frame)
write_info(respy_obj, data_frame)
# Finishing
return respy_obj
| mit |
eramirem/astroML | doc/logos/plot_logo.py | 5 | 1286 | """
NASA Sloan Atlas
----------------
This shows some visualizations of the data from the NASA SDSS Atlas
"""
# Author: Jake VanderPlas <[email protected]>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
from astroML.datasets import fetch_nasa_atlas
data = fetch_nasa_atlas()
#------------------------------------------------------------
# plot the RA/DEC in an area-preserving projection
RA = data['RA']
DEC = data['DEC']
# convert coordinates to degrees
RA -= 180
RA *= np.pi / 180
DEC *= np.pi / 180
fig = plt.figure(figsize=(8, 2), facecolor='w')
ax = fig.add_axes([0.56, 0.1, 0.4, 0.8], projection='mollweide')
plt.scatter(RA, DEC, s=1, lw=0, c=data['Z'], cmap=plt.cm.copper)
plt.grid(True)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
font = {'family' : 'neuropol X',
'color' : '#222222',
'weight' : 'normal',
'size' : 135,
}
fig.text(0.5, 0.5, 'astroML', ha='center', va='center',
fontdict=font)
#size=135,
#fontproperties=FontProperties(['neuropol X bold', 'neuropol X']))
plt.savefig('logo.png')
plt.show()
| bsd-2-clause |
laserson/ibis | scripts/test_data_admin.py | 8 | 17250 | #! /usr/bin/env python
# Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import os.path as osp
from os.path import join as pjoin
from subprocess import check_call
from click import group, option
import ibis
from ibis.compat import BytesIO
from ibis.common import IbisError
from ibis.impala.tests.common import IbisTestEnv
from ibis.util import guid
import numpy as np
import pandas as pd
import pandas.util.testing as tm
ENV = IbisTestEnv()
IBIS_TEST_DATA_S3_BUCKET = 'ibis-resources'
IBIS_TEST_DATA_LOCAL_DIR = 'ibis-testing-data'
TARBALL_NAME = 'ibis-testing-data.tar.gz'
IBIS_TEST_DATA_TARBALL = 'testing/{0}'.format(TARBALL_NAME)
IBIS_TEST_AWS_KEY_ID = os.environ.get('IBIS_TEST_AWS_KEY_ID')
IBIS_TEST_AWS_SECRET = os.environ.get('IBIS_TEST_AWS_SECRET')
def make_ibis_client():
hc = ibis.hdfs_connect(host=ENV.nn_host, port=ENV.webhdfs_port,
auth_mechanism=ENV.auth_mechanism,
verify=(ENV.auth_mechanism
not in ['GSSAPI', 'LDAP']))
if ENV.auth_mechanism in ['GSSAPI', 'LDAP']:
print("Warning: ignoring invalid Certificate Authority errors")
return ibis.impala.connect(host=ENV.impala_host, port=ENV.impala_port,
auth_mechanism=ENV.auth_mechanism,
hdfs_client=hc)
def can_write_to_hdfs(con):
test_path = pjoin(ENV.test_data_dir, ibis.util.guid())
test_file = BytesIO(ibis.util.guid().encode('utf-8'))
try:
con.hdfs.put(test_path, test_file)
con.hdfs.rm(test_path)
return True
except:
return False
def can_build_udfs():
try:
check_call('which cmake', shell=True)
except:
print('Could not find cmake on PATH')
return False
try:
check_call('which make', shell=True)
except:
print('Could not find make on PATH')
return False
try:
check_call('which clang++', shell=True)
except:
print('Could not find LLVM on PATH; if IBIS_TEST_LLVM_CONFIG is set, '
'try setting PATH="$($IBIS_TEST_LLVM_CONFIG --bindir):$PATH"')
return False
return True
def is_data_loaded(con):
if not con.hdfs.exists(ENV.test_data_dir):
return False
if not con.exists_database(ENV.test_data_db):
return False
return True
def is_udf_loaded(con):
bitcode_dir = pjoin(ENV.test_data_dir, 'udf')
if con.hdfs.exists(bitcode_dir):
return True
return False
def dnload_ibis_test_data_from_s3(local_path):
url = 'https://{0}.s3.amazonaws.com/{1}'.format(
IBIS_TEST_DATA_S3_BUCKET, IBIS_TEST_DATA_TARBALL)
cmd = 'cd {0} && wget -q {1} && tar -xzf {2}'.format(
local_path, url, TARBALL_NAME)
check_call(cmd, shell=True)
data_dir = pjoin(local_path, IBIS_TEST_DATA_LOCAL_DIR)
print('Downloaded {0} and unpacked it to {1}'.format(url, data_dir))
return data_dir
def upload_ibis_test_data_to_hdfs(con, data_path):
hdfs = con.hdfs
if hdfs.exists(ENV.test_data_dir):
hdfs.rmdir(ENV.test_data_dir)
hdfs.put(ENV.test_data_dir, data_path, verbose=True)
def create_test_database(con):
if con.exists_database(ENV.test_data_db):
con.drop_database(ENV.test_data_db, force=True)
con.create_database(ENV.test_data_db)
print('Created database {0}'.format(ENV.test_data_db))
def create_parquet_tables(con):
parquet_files = con.hdfs.ls(pjoin(ENV.test_data_dir, 'parquet'))
schemas = {
'functional_alltypes': ibis.schema(
[('id', 'int32'),
('bool_col', 'boolean'),
('tinyint_col', 'int8'),
('smallint_col', 'int16'),
('int_col', 'int32'),
('bigint_col', 'int64'),
('float_col', 'float'),
('double_col', 'double'),
('date_string_col', 'string'),
('string_col', 'string'),
('timestamp_col', 'timestamp'),
('year', 'int32'),
('month', 'int32')]),
'tpch_region': ibis.schema(
[('r_regionkey', 'int16'),
('r_name', 'string'),
('r_comment', 'string')])}
tables = []
for table_name in parquet_files:
print('Creating {0}'.format(table_name))
# if no schema infer!
schema = schemas.get(table_name)
path = pjoin(ENV.test_data_dir, 'parquet', table_name)
table = con.parquet_file(path, schema=schema, name=table_name,
database=ENV.test_data_db, persist=True)
tables.append(table)
return tables
def create_avro_tables(con):
avro_files = con.hdfs.ls(pjoin(ENV.test_data_dir, 'avro'))
schemas = {
'tpch_region_avro': {
'type': 'record',
'name': 'a',
'fields': [
{'name': 'R_REGIONKEY', 'type': ['null', 'int']},
{'name': 'R_NAME', 'type': ['null', 'string']},
{'name': 'R_COMMENT', 'type': ['null', 'string']}]}}
tables = []
for table_name in avro_files:
print('Creating {0}'.format(table_name))
schema = schemas[table_name]
path = pjoin(ENV.test_data_dir, 'avro', table_name)
table = con.avro_file(path, schema, name=table_name,
database=ENV.test_data_db, persist=True)
tables.append(table)
return tables
def build_udfs():
print('Building UDFs')
ibis_home_dir = osp.dirname(osp.dirname(osp.abspath(__file__)))
udf_dir = pjoin(ibis_home_dir, 'testing', 'udf')
check_call('cmake . && make', shell=True, cwd=udf_dir)
def upload_udfs(con):
ibis_home_dir = osp.dirname(osp.dirname(osp.abspath(__file__)))
build_dir = pjoin(ibis_home_dir, 'testing', 'udf', 'build')
bitcode_dir = pjoin(ENV.test_data_dir, 'udf')
print('Uploading UDFs to {0}'.format(bitcode_dir))
if con.hdfs.exists(bitcode_dir):
con.hdfs.rmdir(bitcode_dir)
con.hdfs.put(bitcode_dir, build_dir, verbose=True)
def scrape_parquet_files(tmp_db, con):
to_scrape = [('tpch', x) for x in con.list_tables(database='tpch')]
to_scrape.append(('functional', 'alltypes'))
for db, tname in to_scrape:
table = con.table(tname, database=db)
new_name = '{0}_{1}'.format(db, tname)
print('Creating {0}'.format(new_name))
con.create_table(new_name, table, database=tmp_db)
def download_parquet_files(con, tmp_db_hdfs_path):
parquet_path = pjoin(IBIS_TEST_DATA_LOCAL_DIR, 'parquet')
print("Downloading {0}".format(parquet_path))
con.hdfs.get(tmp_db_hdfs_path, parquet_path)
def generate_sqlite_db(con):
from sqlalchemy import create_engine
path = pjoin(IBIS_TEST_DATA_LOCAL_DIR, 'ibis_testing.db')
csv_path = guid()
engine = create_engine('sqlite:///{0}'.format(path))
generate_sql_csv_sources(csv_path, con.database('ibis_testing'))
make_sqlite_testing_db(csv_path, engine)
shutil.rmtree(csv_path)
def download_avro_files(con):
avro_hdfs_path = '/test-warehouse/tpch.region_avro'
avro_local_path = pjoin(IBIS_TEST_DATA_LOCAL_DIR, 'avro')
os.mkdir(avro_local_path)
print("Downloading {0}".format(avro_hdfs_path))
con.hdfs.get(avro_hdfs_path, pjoin(avro_local_path, 'tpch_region_avro'))
def generate_csv_files():
N = 10
nfiles = 10
df = pd.DataFrame({'foo': [tm.rands(10) for _ in xrange(N)],
'bar': np.random.randn(N),
'baz': np.random.randint(0, 100, size=N)},
columns=['foo', 'bar', 'baz'])
csv_base = pjoin(IBIS_TEST_DATA_LOCAL_DIR, 'csv')
os.mkdir(csv_base)
for i in xrange(nfiles):
csv_path = pjoin(csv_base, '{0}.csv'.format(i))
print('Writing {0}'.format(csv_path))
df.to_csv(csv_path, index=False, header=False)
def copy_tarball_to_versioned_backup(bucket):
key = bucket.get_key(IBIS_TEST_DATA_TARBALL)
if key:
names = [k.name for k in bucket.list(prefix=IBIS_TEST_DATA_TARBALL)]
names.remove(IBIS_TEST_DATA_TARBALL)
# get the highest number for this key name
last = sorted([int(names.split('.')[-1]) for name in names])[-1]
next_key = '{0}.{1}'.format(IBIS_TEST_DATA_TARBALL, last + 1)
key.copy(IBIS_TEST_DATA_S3_BUCKET, next_key)
key.delete()
assert bucket.get_key(IBIS_TEST_DATA_TARBALL) is None
_sql_tpch_tables = ['tpch_lineitem', 'tpch_customer',
'tpch_region', 'tpch_nation', 'tpch_orders']
_sql_tables = ['functional_alltypes']
def _project_tpch_lineitem(t):
return t['l_orderkey',
'l_partkey',
'l_suppkey',
'l_linenumber',
t.l_quantity.cast('double'),
t.l_extendedprice.cast('double'),
t.l_discount.cast('double'),
t.l_tax.cast('double'),
'l_returnflag',
'l_linestatus',
'l_shipdate',
'l_commitdate',
'l_receiptdate',
'l_shipinstruct',
'l_shipmode']
def _project_tpch_orders(t):
return t['o_orderkey',
'o_custkey',
'o_orderstatus',
t.o_totalprice.cast('double'),
'o_orderdate',
'o_orderpriority',
'o_clerk',
'o_shippriority']
def _project_tpch_customer(t):
return t['c_custkey',
'c_name',
'c_nationkey',
'c_phone',
'c_acctbal',
'c_mktsegment']
_projectors = {
'tpch_customer': _project_tpch_customer,
'tpch_lineitem': _project_tpch_lineitem,
'tpch_orders': _project_tpch_orders,
}
def generate_sql_csv_sources(output_path, db):
ibis.options.sql.default_limit = None
if not osp.exists(output_path):
os.mkdir(output_path)
for name in _sql_tables:
print(name)
table = db[name]
if name in _projectors:
table = _projectors[name](table)
df = table.execute()
path = osp.join(output_path, name)
df.to_csv('{0}.csv'.format(path), na_rep='\\N')
def make_sqlite_testing_db(csv_dir, con):
for name in _sql_tables:
print(name)
path = osp.join(csv_dir, '{0}.csv'.format(name))
df = pd.read_csv(path, na_values=['\\N'])
pd.io.sql.to_sql(df, name, con, chunksize=10000)
# ==========================================
@group(context_settings={'help_option_names': ['-h', '--help']})
def main():
"""Manage test data for Ibis"""
pass
@main.command()
def printenv():
"""Print current IbisTestEnv"""
print(str(ENV))
@main.command()
@option('--create-tarball', is_flag=True,
help="Create a gzipped tarball")
@option('--push-to-s3', is_flag=True,
help="Also push the tarball to s3://ibis-test-resources")
def create(create_tarball, push_to_s3):
"""Create Ibis test data"""
print(str(ENV))
con = make_ibis_client()
# verify some assumptions before proceeding
if push_to_s3 and not create_tarball:
raise IbisError(
"Must specify --create-tarball if specifying --push-to-s3")
if osp.exists(IBIS_TEST_DATA_LOCAL_DIR):
raise IbisError(
'Local dir {0} already exists; please remove it first'.format(
IBIS_TEST_DATA_LOCAL_DIR))
if not con.exists_database('tpch'):
raise IbisError('`tpch` database does not exist')
if not con.hdfs.exists('/test-warehouse/tpch.region_avro'):
raise IbisError(
'HDFS dir /test-warehouse/tpch.region_avro does not exist')
# generate tmp identifiers
tmp_db_hdfs_path = pjoin(ENV.tmp_dir, guid())
tmp_db = guid()
os.mkdir(IBIS_TEST_DATA_LOCAL_DIR)
try:
# create the tmp data locally
con.create_database(tmp_db, path=tmp_db_hdfs_path)
print('Created database {0} at {1}'.format(tmp_db, tmp_db_hdfs_path))
# create the local data set
scrape_parquet_files(tmp_db, con)
download_parquet_files(con, tmp_db_hdfs_path)
download_avro_files(con)
generate_csv_files()
generate_sqlite_db(con)
finally:
con.drop_database(tmp_db, force=True)
assert not con.hdfs.exists(tmp_db_hdfs_path)
if create_tarball:
check_call('tar -zc {0} > {1}'
.format(IBIS_TEST_DATA_LOCAL_DIR, TARBALL_NAME),
shell=True)
if push_to_s3:
import boto
s3_conn = boto.connect_s3(IBIS_TEST_AWS_KEY_ID,
IBIS_TEST_AWS_SECRET)
bucket = s3_conn.get_bucket(IBIS_TEST_DATA_S3_BUCKET)
# copy_tarball_to_versioned_backup(bucket)
key = bucket.new_key(IBIS_TEST_DATA_TARBALL)
print('Upload tarball to S3')
key.set_contents_from_filename(TARBALL_NAME, replace=True)
@main.command()
@option('--data/--no-data', default=True, help='Load (skip) ibis testing data')
@option('--udf/--no-udf', default=True, help='Build/upload (skip) test UDFs')
@option('--data-dir',
help='Path to testing data; dnloads data from S3 if unset')
@option('--overwrite', is_flag=True, help='Forces overwriting of data/UDFs')
def load(data, udf, data_dir, overwrite):
"""Load Ibis test data and build/upload UDFs"""
print(str(ENV))
con = make_ibis_client()
# validate our environment before performing possibly expensive operations
if not can_write_to_hdfs(con):
raise IbisError('Failed to write to HDFS; check your settings')
if udf and not can_build_udfs():
raise IbisError('Build environment does not support building UDFs')
# load the data files
if data:
already_loaded = is_data_loaded(con)
print('Attempting to load Ibis test data (--data)')
if already_loaded and not overwrite:
print('Data is already loaded and not overwriting; moving on')
else:
if already_loaded:
print('Data is already loaded; attempting to overwrite')
tmp_dir = tempfile.mkdtemp(prefix='__ibis_tmp_')
try:
if not data_dir:
print('Did not specify a local dir with the test data, so '
'downloading it from S3')
data_dir = dnload_ibis_test_data_from_s3(tmp_dir)
print('Uploading to HDFS')
upload_ibis_test_data_to_hdfs(con, data_dir)
print('Creating Ibis test data database')
create_test_database(con)
parquet_tables = create_parquet_tables(con)
avro_tables = create_avro_tables(con)
for table in parquet_tables + avro_tables:
print('Computing stats for {0}'.format(table.op().name))
table.compute_stats()
# sqlite database
sqlite_src = osp.join(data_dir, 'ibis_testing.db')
shutil.copy(sqlite_src, '.')
finally:
shutil.rmtree(tmp_dir)
else:
print('Skipping Ibis test data load (--no-data)')
# build and upload the UDFs
if udf:
already_loaded = is_udf_loaded(con)
print('Attempting to build and load test UDFs')
if already_loaded and not overwrite:
print('UDFs already loaded and not overwriting; moving on')
else:
if already_loaded:
print('UDFs already loaded; attempting to overwrite')
print('Building UDFs')
build_udfs()
print('Uploading UDFs')
upload_udfs(con)
else:
print('Skipping UDF build/load (--no-udf)')
@main.command()
@option('--test-data', is_flag=True,
help='Cleanup Ibis test data, test database, and also the test UDFs '
'if they are stored in the test data directory/database')
@option('--udfs', is_flag=True, help='Cleanup Ibis test UDFs only')
@option('--tmp-data', is_flag=True,
help='Cleanup Ibis temporary HDFS directory')
@option('--tmp-db', is_flag=True, help='Cleanup Ibis temporary database')
def cleanup(test_data, udfs, tmp_data, tmp_db):
"""Cleanup Ibis test data and UDFs"""
print(str(ENV))
con = make_ibis_client()
if udfs:
# this comes before test_data bc the latter clobbers this too
con.hdfs.rmdir(pjoin(ENV.test_data_dir, 'udf'))
if test_data:
con.drop_database(ENV.test_data_db, force=True)
con.hdfs.rmdir(ENV.test_data_dir)
if tmp_data:
con.hdfs.rmdir(ENV.tmp_dir)
if tmp_db:
con.drop_database(ENV.tmp_db, force=True)
if __name__ == '__main__':
main()
| apache-2.0 |
bliebeskind/Gene-Ages | CannedScripts/calc_LDOs.py | 1 | 1609 | from LECA.node_stats import run_LDOcomp, percLDOs
from LECA import csv_parser
import cPickle as pickle
import pandas as pd
### This program runs the oversplitting analysis, used for false negative analysis
### Outputs three files:
### _results.p # pickle file used by consensus scripts
### _results.csv # the results of the analysis in csv format
### _summary.csv # summary statistics (not that useful)
############# User input #######################
ORTHOFILE = "<SPECIES>_coOrthologs.txt"
AGEFILE = "../../NodeAges/nodeAges_<SPECIES>.csv"
YOUNGGROUP = ["InParanoid","InParanoidCore","PANTHER8_LDO","OMA_Groups",
"OMA_Pairs","RSD","EggNOG","Orthoinspector","Hieranoid_2",
"EnsemblCompara_v2","Metaphors","PANTHER8_all"]
OLDGROUP = ["PhylomeDB"]
FALSEPOS = "../Losses/lossStats_<SPECIES>.csv"
BINNED=False # Can also calculate LDOs on binned ages, but I wouldn't recommend it. Not conservative enough.
############ Don't change #######################
print "Running LDO analysis"
LDO_results = run_LDOcomp(ORTHOFILE,AGEFILE,OLDGROUP,YOUNGGROUP,FALSEPOS,BINNED)
print "Pickling results to '<SPECIES>_LDO_results.p'"
with open("<SPECIES>_LDO_results.p",'w') as out:
pickle.dump(LDO_results,out)
print "Writing results to '<SPECIES>_LDO_results.csv'"
LDO_df = pd.DataFrame.from_dict(LDO_results,orient='index')
LDO_df.to_csv("<SPECIES>_LDO_results.csv")
print "Writing summary stats to '<SPECIES>_LDO_summary.csv'"
LDOstats = percLDOs(LDO_results)
LDOstatsDF = pd.DataFrame.from_dict(LDOstats,orient='index')
LDOstatsDF.columns = ["FractionLDOs"]
LDOstatsDF.to_csv("<SPECIES>_LDO_summary.csv")
| mit |
jakobzhao/wbcrawler3 | backup/analysisGraph2.py | 1 | 2375 | # !/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on Nov 27, 2012
@author: Bo Zhao
@email: [email protected]
@website: http://yenching.org
@organization: The Ohio State University
'''
# retweet type
# 1: reply
# 2: comment
# 3: reply to a comment
# 4: a reply and a comment
# status type
# 0: original
# 1: reply
# 2: comments
import sqlite3
import math
import networkx as nx
import matplotlib.pylab as plt
import numpy as np
from scipy import linalg
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
#--------------------------------build network-----------------------------------
database = '../data/xilaibo.db'
conn = sqlite3.connect(database)#to name it with a specific word
cursor = conn.cursor()
cursor.execute('select id, source_name, target_name from user_edges')
edges = cursor.fetchall()
cursor.execute('select id, node from user_nodes')
nodes = cursor.fetchall()
conn.commit()
conn.close()
G = nx.DiGraph()
for node in nodes:
G.add_node(node[1])
for edge in edges:
G.add_edge(edge[1],edge[2])
degree_sequence=sorted(nx.degree(G).values(),reverse=True) # degree sequence
##print "Degree sequence", degree_sequence
dmax=max(degree_sequence)
x, y , t = [], [] ,-1
for item in degree_sequence:
if item != t:
x.append(item)
y.append(degree_sequence.count(item))
t = item
print x
print y
print len(x)
print len(degree_sequence)
plt.loglog(degree_sequence,'b+',marker='o')
plt.loglog(x, y,'g-',marker='3')
plt.title("Degree Distribution")
plt.ylabel("degree")
plt.xlabel("count")
##===============================================
#fig = plt.figure()
#ax = fig.add_subplot(111)
#ax.loglog(x,y,"o") # 绘制实际数据的图形 x,y表示原始数据的横坐标与纵坐标,没有贴出生成这些数据的代码
#plt.loglog(x,y,"g-")
lnx = [math.log(f+.01,math.e) for f in x] # 对x进行对数转换
lny = [math.log(f+.01,math.e) for f in y] # 对y进行对数转换
a = np.mat([lnx,[1]*len(x)]).T # 自变量矩阵a
b = np.mat(lny).T # 因变量矩阵b
(t,res,rank,s) = linalg.lstsq(a,b) # 最小二乘法求系数
print t
r = t[0][0]
c = t[1][0]
x_ = x
y_ = [math.e**(r*a+c) for a in lnx] # 根据求的的系数求出y*
plt.loglog(x_,y_,"r-") # 绘制拟合的曲线图
plt.savefig("../data/img/power_law.png")
plt.show() | mit |
YudinYury/Python_Netology_homework | less_4_1_classwork_for_dig_data.py | 1 | 1630 | """lesson_4_1_Classwork "Data processing tools"
"""
import os
import pandas as pd
def count_of_len(row):
return len(row.Name)
def main():
source_path = 'D:\Python_my\Python_Netology_homework\data_names'
source_dir_path = os.path.normpath(os.path.abspath(source_path))
source_file = os.path.normpath(os.path.join(source_dir_path, 'yob1900.txt'))
names = pd.read_csv(source_file, names=['Name', 'Gender', 'Count'])
# print(names.head(10))
# print(names.query('Count > 5000 & Gender == "M"'))
# print(names[(names.Count > 5000) | (names.Gender == 'M')].head(10))
# print(names[(names.Count > 5000) | (names.Name.str.startswith('M'))].head(10))
print(names[(names.Count > 3000) & (names.Name.str.startswith('M'))].head(10))
# dest_file = os.path.normpath(os.path.join(source_dir_path, 'yob1900.csv'))
# names[(names.Count > 3000) & (names.Name.str.startswith('M'))].to_csv(dest_file, index=False) # добавить параметр header=False, чтобы не записывать в файл шапку таблицы
# print(names[(names.Count > 3000) & (names.Name.str.startswith('M'))].sort_values(by=Name, ascending=False))
# print(names.sort_values(by=Name, ascending=False))
print()
# print(names.query('Gender == "M"').Count.sum())
# names['Len'] = names.apply(count_of_len, axis=1).head(10) # axis=1 - это обход по строкам
names['Len'] = names.apply(lambda row: len(row.Name), axis=1).head(10) # axis=1 - это обход по строкам
print(names.head(10))
exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 |
scienceopen/hist-feasibility | Plots/stormer_height_aurora.py | 2 | 2019 | #!/usr/bin/env python
"""
Stormer's trigometric auroral height determination
Egeland 2013 p. 98
Explores effects of features at different heights vs. observer standoff distance.
Shows difficulty of fine-scale auroral observation:
1. need close spaced caemras to unambiguously estimate fine scale (Semeter 2012)
2. close cameras present difficult but solvable inversion problem (Hirsch 2015, 2017)
"""
import numpy as np
from matplotlib.pyplot import figure,show
import seaborn as sns
sns.set_context('talk',font_scale=1)
sns.set_style('whitegrid')
sns.set_palette('cubehelix')
b = 88 # angle to feature from observer #2
a = np.arange(45,b-.5,0.01) #degrees # angle to feature from observer #1
d = np.array([3.4,4.3,20,50]) # observer standoff distance [km]
br = np.radians(b); ar= np.radians(a)
h = np.sin(ar) * np.sin(br) / np.sin(br-ar) * d[:,None]
h[(75>h) | (h>300)] = np.nan
#%%
fg=figure(); fg.clf()
axs = fg.subplots(2,1,sharex=True)
ax=axs[0]
ax.plot(a,h.T)
ax.set_ylabel('altitude [km]')
ax.set_title('Størmer trigonometric altitude vs. observer standoff distance [km]')
ax.legend(d.astype(str),loc='best')
dh = np.diff(h)
ax=axs[1]
ax.plot(a[:-1], dh.T)
#ax.set_yscale('log')
#ax.set_ylim((0,100))
ax.set_ylabel(r'd$h$/d$\alpha$ [km/degree]')
ax.set_title('Angular sensitivity vs. observer standoff distance [km]')
ax.legend(d.astype(str),loc='best')
ax.set_xlabel(r'observer angle $\alpha$ [deg]')
show()
#%% symbolic
# Broken? Doesn't solve.
if False:
from sympy import sin,solve,Symbol
a = Symbol("a",real=True,positive=True)
b = Symbol("b",real=True,positive=True)
h=100
S=solve(sin(a) * sin(b) / sin(b-a) * d - h,(a,b) )
print(S)
#%% Human auditory reaction time
# I neglected to write down which article these came from
# was thinking about error due to human shutter reaction time to aurora/telephone stimulus
rt_ms = np.array([75,85,85,85,85,85,95,95])
print('Human auditory reaction time std() {:.1f} [ms] mean: {:.1f}'.format(rt_ms.std(),rt_ms.mean()))
| gpl-3.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/tslibs/test_liboffsets.py | 3 | 5046 | """
Tests for helper functions in the cython tslibs.offsets
"""
from datetime import datetime
import pytest
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import roll_qtrday
from pandas import Timestamp
@pytest.fixture(params=["start", "end", "business_start", "business_end"])
def day_opt(request):
return request.param
@pytest.mark.parametrize(
"dt,exp_week_day,exp_last_day",
[
(datetime(2017, 11, 30), 3, 30), # Business day.
(datetime(1993, 10, 31), 6, 29), # Non-business day.
],
)
def test_get_last_bday(dt, exp_week_day, exp_last_day):
assert dt.weekday() == exp_week_day
assert liboffsets.get_lastbday(dt.year, dt.month) == exp_last_day
@pytest.mark.parametrize(
"dt,exp_week_day,exp_first_day",
[
(datetime(2017, 4, 1), 5, 3), # Non-weekday.
(datetime(1993, 10, 1), 4, 1), # Business day.
],
)
def test_get_first_bday(dt, exp_week_day, exp_first_day):
assert dt.weekday() == exp_week_day
assert liboffsets.get_firstbday(dt.year, dt.month) == exp_first_day
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(0, 15, datetime(2017, 11, 15)),
(0, None, datetime(2017, 11, 30)),
(1, "start", datetime(2017, 12, 1)),
(-145, "end", datetime(2005, 10, 31)),
(0, "business_end", datetime(2017, 11, 30)),
(0, "business_start", datetime(2017, 11, 1)),
],
)
def test_shift_month_dt(months, day_opt, expected):
dt = datetime(2017, 11, 30)
assert liboffsets.shift_month(dt, months, day_opt=day_opt) == expected
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(1, "start", Timestamp("1929-06-01")),
(-3, "end", Timestamp("1929-02-28")),
(25, None, Timestamp("1931-06-5")),
(-1, 31, Timestamp("1929-04-30")),
],
)
def test_shift_month_ts(months, day_opt, expected):
ts = Timestamp("1929-05-05")
assert liboffsets.shift_month(ts, months, day_opt=day_opt) == expected
def test_shift_month_error():
dt = datetime(2017, 11, 15)
day_opt = "this should raise"
with pytest.raises(ValueError, match=day_opt):
liboffsets.shift_month(dt, 3, day_opt=day_opt)
@pytest.mark.parametrize(
"other,expected",
[
# Before March 1.
(datetime(2017, 2, 10), {2: 1, -7: -7, 0: 0}),
# After March 1.
(Timestamp("2014-03-15", tz="US/Eastern"), {2: 2, -7: -6, 0: 1}),
],
)
@pytest.mark.parametrize("n", [2, -7, 0])
def test_roll_yearday(other, expected, n):
month = 3
day_opt = "start" # `other` will be compared to March 1.
assert liboffsets.roll_yearday(other, n, month, day_opt) == expected[n]
@pytest.mark.parametrize(
"other,expected",
[
# Before June 30.
(datetime(1999, 6, 29), {5: 4, -7: -7, 0: 0}),
# After June 30.
(Timestamp(2072, 8, 24, 6, 17, 18), {5: 5, -7: -6, 0: 1}),
],
)
@pytest.mark.parametrize("n", [5, -7, 0])
def test_roll_yearday2(other, expected, n):
month = 6
day_opt = "end" # `other` will be compared to June 30.
assert liboffsets.roll_yearday(other, n, month, day_opt) == expected[n]
def test_get_day_of_month_error():
# get_day_of_month is not directly exposed.
# We test it via roll_yearday.
dt = datetime(2017, 11, 15)
day_opt = "foo"
with pytest.raises(ValueError, match=day_opt):
# To hit the raising case we need month == dt.month and n > 0.
liboffsets.roll_yearday(dt, n=3, month=11, day_opt=day_opt)
@pytest.mark.parametrize(
"month",
[3, 5], # (other.month % 3) < (month % 3) # (other.month % 3) > (month % 3)
)
@pytest.mark.parametrize("n", [4, -3])
def test_roll_qtr_day_not_mod_unequal(day_opt, month, n):
expected = {3: {-3: -2, 4: 4}, 5: {-3: -3, 4: 3}}
other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday.
assert roll_qtrday(other, n, month, day_opt, modby=3) == expected[month][n]
@pytest.mark.parametrize(
"other,month,exp_dict",
[
# Monday.
(datetime(1999, 5, 31), 2, {-1: {"start": 0, "business_start": 0}}),
# Saturday.
(
Timestamp(2072, 10, 1, 6, 17, 18),
4,
{2: {"end": 1, "business_end": 1, "business_start": 1}},
),
# First business day.
(
Timestamp(2072, 10, 3, 6, 17, 18),
4,
{2: {"end": 1, "business_end": 1}, -1: {"start": 0}},
),
],
)
@pytest.mark.parametrize("n", [2, -1])
def test_roll_qtr_day_mod_equal(other, month, exp_dict, n, day_opt):
# All cases have (other.month % 3) == (month % 3).
expected = exp_dict.get(n, {}).get(day_opt, n)
assert roll_qtrday(other, n, month, day_opt, modby=3) == expected
@pytest.mark.parametrize(
"n,expected", [(42, {29: 42, 1: 42, 31: 41}), (-4, {29: -4, 1: -3, 31: -4})]
)
@pytest.mark.parametrize("compare", [29, 1, 31])
def test_roll_convention(n, expected, compare):
assert liboffsets.roll_convention(29, n, compare) == expected[compare]
| apache-2.0 |
Seynen/egfrd | test/p.py | 6 | 5025 | #!/usr/bin/env python
import sys
import numpy
from matplotlib.pylab import *
import _gfrd
N_A = 6.0221367e23
sigma = 1e-8
#r0 = sigma
D = 1e-12
#kf = 1000 * sigma * D
#kf=1e-8
#kf=1e-10
kf=1e-10
#a = 1e-7
a = sigma*5
#r0 = a * (1.0-1e-7)
r0 = sigma * 3
#r0 = a * 0.999
#r0 = (a-sigma) * 0.5 + sigma
tau = sigma*sigma / D
#T = tau * .1
#T = 1e-300
T = 1e-2
rmin = sigma
def plot_p_survival(gf, T):
N = 1000
x = numpy.mgrid[0:T:T/N]
parray1 = numpy.array([gf.p_survival(t) for t in x])
plot(x, parray1, '-', label='psurvival')
def plot_p_survival_i(gf):
N = 1000
x = range(N)
parray1 = numpy.array([gf.p_survival_i_exp(i, T, r0) for i in x])
print len(parray1[:-1]), len(parray1[1:])
parray2 = parray1[:-1:2] + parray1[1::2]
parray2 = parray2[:-1:2] + parray2[1::2]
plot(range(len(parray2)), parray2, '-', label='psurvival_i')
plot(range(len(parray2)), parray2.cumsum(), '-', label='psurvival_i_sum')
#plot(range(N), parray1, '.', label='psurvival_i')
def plot_p_survival_alpha(gf):
N = 1000
alpha = numpy.array(range(N)) * 1e-3
parray1 = numpy.array([gf.p_survival_i_alpha(al, T, r0) for al in alpha])
print len(parray1[:-1]), len(parray1[1:])
parray2 = parray1[:-1:2] + parray1[1::2]
parray2 = parray2[:-1:2] + parray2[1::2]
#plot(range(len(parray2)), parray2, '-', label='psurvival_i')
#plot(range(len(parray2)), parray2.cumsum(), '-', label='psurvival_i_sum')
plot(alpha, parray1, '.', label='psurvival_i')
def plot_p_leaveas(gf, t):
N = 100000
tmax = 1e-3
tmin = 1e-10
ttick = (tmax - tmin) / N
tarray = numpy.mgrid[tmin:tmax:ttick]
parray1 = array([1 - gf.p_survival(t, r0) for t in tarray])
semilogx(tarray , parray1, '-', label='psurvival')
gf2 = _gfrd.GreensFunction3DRadInf(D, kf, sigma)
parray2 = array([1 - gf2.p_survival(t, r0) for t in tarray])
semilogx(tarray , parray2, '-', label='psurvival basic')
# parray2 = array([gf.p_leavea(t, r0) for t in tarray])
# parray2 = 1 - parray2# / gf.p_leavea(0, r0)
# semilogx(tarray , parray2, '-', label='pleavea')
# parray3 = array([gf.p_leaves(t, r0) for t in tarray])
# parray3 = 1 - parray3# / gf.p_leaves(0, r0)
# semilogx(tarray , parray3, '-', label='pleaves')
# semilogx(tarray , parray2 + parray3 - 1, '-', label='s+a')
#semilogx(tarray , parray2 + parray3, '-', label='a+s')
def plot_leaveas(gf, t):
N = 3000
#tmax = 2.4e-5
#tmin = 1.1e-5
tmax = 2.5e-2
tmin = 2.2e-8
ttick = (tmax - tmin) / N
tarray = numpy.mgrid[tmin:tmax:ttick]
#parray1 = array([1 - gf.p_survival(t, r0) for t in tarray])
#semilogx(tarray , parray1, '-', label='psurvival')
parray2 = array([gf.leavea(t, r0) * 4 * numpy.pi * a * a
for t in tarray])
parray3 = array([gf.leaves(t, r0) * 4 * numpy.pi * sigma * sigma
for t in tarray])
parray4 = array([gf.dp_survival(t, r0) for t in tarray])
#semilogx(tarray, parray2 / (parray2+parray3), '-', label='leavea')
semilogx(tarray, parray2, '-', label='leavea')
semilogx(tarray, parray3, '-', label='leaves')
#semilogx(tarray, parray3 / (parray2+parray3), '-', label='leaves')
#semilogx(tarray, parray4 / gf.dp_survival(0,r0) , '-', label='dp_survival')
#semilogx(tarray, (parray2 + parray3)/(parray2[0]+parray3[0]) , '-', label='a+s')
#semilogx(tarray , parray2, '-', label='a')
#semilogx(tarray , parray3, '-', label='s')
def plot_p_int_r(gf, t):
N = 10000
rmax = min(a, r0 + 4 * math.sqrt(6 * D * t))
#rmax = a
#rmin = max(sigma, r0 - 4 * math.sqrt(6 * D * t))
#rmin = max(0, r0 - 4 * math.sqrt(6 * D * t))
rmin = 0
rtick = (rmax - rmin) / N
rarray = numpy.mgrid[rmin:rmax:rtick]
#surv = gf.p_survival(t, r0)
surv = gf.p_survival(t)
print surv
#parray = array([gf.p_int_r(r, t, r0) for r in rarray]) / surv
parray = array([gf.p_int_r(r, t) for r in rarray]) / surv
plot(rarray / sigma , parray, '-', label='f')
def plot_ip_theta(gf, r, t):
N = 300
thetamax = 0
thetamin = numpy.pi
thetatick = (thetamax - thetamin) / N
thetaarray = numpy.mgrid[thetamin:thetamax:thetatick]
p0 = gf.p_0(t, r, r0) * 2
parray = array([gf.ip_theta(theta, r, r0, t)
for theta in thetaarray])
parray /= p0
plot(thetaarray, parray, '-', label='f')
def p(r, t):
surv = gf.p_survival(t, r0)
return gf.p_int_r(r, t, r0) / surv
if __name__ == '__main__':
#gf = _gfrd.GreensFunction3DRadAbs(D, kf, sigma)
#gf = _gfrd.GreensFunction3DAbs(D)
gf = _gfrd.GreensFunction3DAbsSym(D)
gf.seta(a)
#plot_p_int_r(gf, T)
plot_p_int_r(gf, 1e-6)
#plot_p_survival(gf, T)
#plot_ip_theta(gf, r0, T)
#plot_p_leaveas(gf, r0)
#plot_leaveas(gf, r0)
#plot_p_survival_i(gf)
#plot_p_survival_alpha(gf)
#xlabel('r / sigma')
#ylabel('p_irr')
legend()
show()
| gpl-2.0 |
MST-MRR/DroneKit | Flight/Receiver.py | 1 | 1826 | import socket
from threading import Thread
import pickle
from PIL import Image
from matplotlib import pyplot as plt
# needs to be the same on the client
PORT_NUMBER = 8089
UP_BOARD_IP = 'localhost' #'192.168.12.1' actually I don't know what address to put here
global color_image
global depth_image
import socket
import numpy as np
from cStringIO import StringIO
class numpysocket():
def __init__(self):
pass
@staticmethod
def startServer():
port=PORT_NUMBER
server_socket=socket.socket()
server_socket.bind(('',port))
server_socket.listen(1)
print 'waiting for a connection...'
client_connection,client_address=server_socket.accept()
print 'connected to ',client_address[0]
ultimate_buffer=''
while True:
receiving_buffer = client_connection.recv(1024)
if not receiving_buffer: break
ultimate_buffer+= receiving_buffer
print '-',
final_image=np.load(StringIO(ultimate_buffer))['frame']
client_connection.close()
server_socket.close()
print '\nframe received'
return final_image
def Receiver():
buf = ""
while True:
color = numpysocket.startServer()
#size = int( connection.recv(6) )
#print size
#buf = connection.recv(size)
#print buf
#color = pickle.loads(buf)
plt.imshow(color, interpolation='nearest')
plt.show()
print color
def get_last_color_frame():
return color_image
def get_last_depth_frame():
return depth_image
def Main():
print( "beginning of main" )
t1 = Thread(target=Receiver, args=())
print( "middle of main" )
t1.start()
print( "end of main" )
if __name__ == '__main__':
Main()
| mit |
jdavidrcamacho/Tests_GP | 02 - Programs being tested/02 - optimization test files/optimization tests 2/testsscipy.py | 1 | 2521 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 7 10:39:14 2016
@author: camacho
"""
import Kernel;reload(Kernel);kl = Kernel
import Kernel_likelihood;reload(Kernel_likelihood); lk= Kernel_likelihood
import Kernel_optimization;reload(Kernel_optimization); opt=Kernel_optimization
#import kernel_optimizationTEST as opt
import numpy as np
import matplotlib.pyplot as pl
import george
import george.kernels as ge
#INITIAL DATA
np.random.seed(1001)
x1 = 10 * np.sort(np.random.rand(101))
yerr1 = 0.2 * np.ones_like(x1)
y1 = np.sin(x1) + yerr1 * np.random.randn(len(x1))
###############################################################################
print '########## Calculations from george ##########'
kernel = ge.ExpSine2Kernel(2.0/1.1**2, 7.1)
gp = george.GP(kernel)
gp.compute(x1,yerr1)
print 'Initial kernel ->', kernel
print 'likelihood_george ->', gp.lnlikelihood(y1)
print 'gradient_george ->', gp.grad_lnlikelihood(y1); print ''
### OPTIMIZE HYPERPARAMETERS
import scipy.optimize as op
# Define the objective function (negative log-likelihood in this case).
def nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
#print 'Ps:',p
ll = gp.lnlikelihood(y1, quiet=True)
#print 'type ll',type(ll)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
return -gp.grad_lnlikelihood(y1, quiet=True)
# You need to compute the GP once before starting the optimization.
#gp.compute(x1,yerr1)
# Run the optimization routine.
p0 = gp.kernel.vector #WHY?????
print 'p0=', type(p0);print ''
####cenas minhas
#p0=np.array([1.65289256198, 7.1])
###
#nll = valor da likelihood
#p0 = log(hyperparameters) -> why???
#grad_nll = gradientes
results = op.minimize(nll, p0, jac=grad_nll, \
options={'maxiter':20,'disp': True})
print 'resultados:', results
# Update the kernel and print the final log-likelihood.
gp.kernel[:] = results.x
print results.x
print 'likelihood_george =', gp.lnlikelihood(y1)
print 'kernel_george =', kernel #kernel final
###############################################################################
print '########## Calculations with our stuff ##########'
kernel1=kl.ExpSineGeorge(2.0/1.1**2, 7.1)
gp = george.GP(kernel)
gp.compute(x1,yerr1)
lk.likelihood(kernel1,x1,x1,y1,yerr1)
opt.optimize(kernel1,x1,x1,y1,yerr1) | mit |
pypot/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
mehdidc/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
CalebBell/fluids | fluids/optional/spa.py | 1 | 49589 | # -*- coding: utf-8 -*-
"""
irradiance.py from pvlib
========================
Stripped down, vendorized version from:
https://github.com/pvlib/pvlib-python/
Calculate the solar position using the NREL SPA algorithm either using
numpy arrays or compiling the code to machine language with numba.
The rational for not including this library as a strict dependency is to avoid
including a dependency on pandas, keeping load time low, and PyPy compatibility
Created by Tony Lorenzo (@alorenzo175), Univ. of Arizona, 2015
For a full list of contributors to this file, see the `pvlib` repository.
The copyright notice (BSD-3 clause) is as follows:
BSD 3-Clause License
Copyright (c) 2013-2018, Sandia National Laboratories and pvlib python
Development Team
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division
import os
import time
from datetime import datetime
import math
from math import degrees, sin, cos, tan, radians, atan, asin, atan2, sqrt, acos
from fluids.constants import deg2rad, rad2deg
from fluids.numerics import sincos
__all__ = ['julian_day_dt', 'julian_day', 'julian_ephemeris_day', 'julian_century',
'julian_ephemeris_century', 'julian_ephemeris_millennium', 'heliocentric_longitude',
'heliocentric_latitude', 'heliocentric_radius_vector', 'geocentric_longitude',
'geocentric_latitude', 'mean_elongation', 'mean_anomaly_sun', 'mean_anomaly_moon',
'moon_argument_latitude', 'moon_ascending_longitude', 'longitude_nutation',
'obliquity_nutation', 'mean_ecliptic_obliquity', 'true_ecliptic_obliquity',
'aberration_correction', 'apparent_sun_longitude', 'mean_sidereal_time',
'apparent_sidereal_time', 'geocentric_sun_right_ascension', 'geocentric_sun_declination',
'local_hour_angle', 'equatorial_horizontal_parallax', 'uterm', 'xterm', 'yterm',
'parallax_sun_right_ascension', 'topocentric_sun_right_ascension', 'topocentric_sun_declination',
'topocentric_local_hour_angle', 'topocentric_elevation_angle_without_atmosphere',
'atmospheric_refraction_correction', 'topocentric_elevation_angle', 'topocentric_zenith_angle',
'topocentric_astronomers_azimuth', 'topocentric_azimuth_angle', 'sun_mean_longitude',
'equation_of_time', 'calculate_deltat', 'longitude_obliquity_nutation',
'transit_sunrise_sunset',
]
nan = float("nan")
HELIO_RADIUS_TABLE_LIST_0 = [[100013989.0, 0.0, 0.0],
[1670700.0, 3.0984635, 6283.07585],
[13956.0, 3.05525, 12566.1517],
[3084.0, 5.1985, 77713.7715],
[1628.0, 1.1739, 5753.3849],
[1576.0, 2.8469, 7860.4194],
[925.0, 5.453, 11506.77],
[542.0, 4.564, 3930.21],
[472.0, 3.661, 5884.927],
[346.0, 0.964, 5507.553],
[329.0, 5.9, 5223.694],
[307.0, 0.299, 5573.143],
[243.0, 4.273, 11790.629],
[212.0, 5.847, 1577.344],
[186.0, 5.022, 10977.079],
[175.0, 3.012, 18849.228],
[110.0, 5.055, 5486.778],
[98.0, 0.89, 6069.78],
[86.0, 5.69, 15720.84],
[86.0, 1.27, 161000.69],
[65.0, 0.27, 17260.15],
[63.0, 0.92, 529.69],
[57.0, 2.01, 83996.85],
[56.0, 5.24, 71430.7],
[49.0, 3.25, 2544.31],
[47.0, 2.58, 775.52],
[45.0, 5.54, 9437.76],
[43.0, 6.01, 6275.96],
[39.0, 5.36, 4694.0],
[38.0, 2.39, 8827.39],
[37.0, 0.83, 19651.05],
[37.0, 4.9, 12139.55],
[36.0, 1.67, 12036.46],
[35.0, 1.84, 2942.46],
[33.0, 0.24, 7084.9],
[32.0, 0.18, 5088.63],
[32.0, 1.78, 398.15],
[28.0, 1.21, 6286.6],
[28.0, 1.9, 6279.55],
[26.0, 4.59, 10447.39]]
HELIO_RADIUS_TABLE_LIST_1 = [[103019.0, 1.10749, 6283.07585],
[1721.0, 1.0644, 12566.1517],
[702.0, 3.142, 0.0],
[32.0, 1.02, 18849.23],
[31.0, 2.84, 5507.55],
[25.0, 1.32, 5223.69],
[18.0, 1.42, 1577.34],
[10.0, 5.91, 10977.08],
[9.0, 1.42, 6275.96],
[9.0, 0.27, 5486.78],
]
HELIO_RADIUS_TABLE_LIST_2 = [[4359.0, 5.7846, 6283.0758],
[124.0, 5.579, 12566.152],
[12.0, 3.14, 0.0],
[9.0, 3.63, 77713.77],
[6.0, 1.87, 5573.14],
[3.0, 5.47, 18849.23]]
HELIO_RADIUS_TABLE_LIST_3 = [[145.0, 4.273, 6283.076],
[7.0, 3.92, 12566.15]]
HELIO_RADIUS_TABLE_LIST_4 = [[4.0, 2.56, 6283.08]]
NUTATION_YTERM_LIST_0 = [0.0, -2.0, 0.0, 0.0, 0.0, 0.0, -2.0, 0.0, 0.0, -2.0, -2.0, -2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 0.0, -2.0, 0.0, 2.0, 0.0, 0.0, -2.0, 0.0, -2.0, 0.0, 0.0, 2.0, -2.0, 0.0, -2.0, 0.0, 0.0, 2.0, 2.0, 0.0, -2.0, 0.0, 2.0, 2.0, -2.0, -2.0, 2.0, 2.0, 0.0, -2.0, -2.0, 0.0, -2.0, -2.0, 0.0, -1.0, -2.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 2.0, 0.0, 2.0]
NUTATION_YTERM_LIST_1 = [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 2.0, 1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -1.0, 1.0, -1.0, -1.0, 0.0, -1.0]
NUTATION_YTERM_LIST_2 = [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, -1.0, 0.0, 1.0, -1.0, -1.0, 1.0, 2.0, -2.0, 0.0, 2.0, 2.0, 1.0, 0.0, 0.0, -1.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 2.0, -1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 2.0, 1.0, -2.0, 0.0, 1.0, 0.0, 0.0, 2.0, 2.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, -2.0, 1.0, 1.0, 1.0, -1.0, 3.0, 0.0]
NUTATION_YTERM_LIST_3 = [0.0, 2.0, 2.0, 0.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0, 0.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0, 2.0, 2.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, -2.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0, 0.0, 2.0, 2.0, 0.0, 0.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, -2.0, 0.0, 0.0, 0.0, 2.0, 2.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0]
NUTATION_YTERM_LIST_4 = [1.0, 2.0, 2.0, 2.0, 0.0, 0.0, 2.0, 1.0, 2.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 1.0, 1.0, 0.0, 1.0, 2.0, 2.0, 0.0, 2.0, 0.0, 0.0, 1.0, 0.0, 1.0, 2.0, 1.0, 1.0, 1.0, 0.0, 1.0, 2.0, 2.0, 0.0, 2.0, 1.0, 0.0, 2.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0]
NUTATION_ABCD_LIST = [[-171996.0, -174.2, 92025.0, 8.9],
[-13187.0, -1.6, 5736.0, -3.1],
[-2274.0, -0.2, 977.0, -0.5],
[2062.0, 0.2, -895.0, 0.5],
[1426.0, -3.4, 54.0, -0.1],
[712.0, 0.1, -7.0, 0.0],
[-517.0, 1.2, 224.0, -0.6],
[-386.0, -0.4, 200.0, 0.0],
[-301.0, 0.0, 129.0, -0.1],
[217.0, -0.5, -95.0, 0.3],
[-158.0, 0.0, 0.0, 0.0],
[129.0, 0.1, -70.0, 0.0],
[123.0, 0.0, -53.0, 0.0],
[63.0, 0.0, 0.0, 0.0],
[63.0, 0.1, -33.0, 0.0],
[-59.0, 0.0, 26.0, 0.0],
[-58.0, -0.1, 32.0, 0.0],
[-51.0, 0.0, 27.0, 0.0],
[48.0, 0.0, 0.0, 0.0],
[46.0, 0.0, -24.0, 0.0],
[-38.0, 0.0, 16.0, 0.0],
[-31.0, 0.0, 13.0, 0.0],
[29.0, 0.0, 0.0, 0.0],
[29.0, 0.0, -12.0, 0.0],
[26.0, 0.0, 0.0, 0.0],
[-22.0, 0.0, 0.0, 0.0],
[21.0, 0.0, -10.0, 0.0],
[17.0, -0.1, 0.0, 0.0],
[16.0, 0.0, -8.0, 0.0],
[-16.0, 0.1, 7.0, 0.0],
[-15.0, 0.0, 9.0, 0.0],
[-13.0, 0.0, 7.0, 0.0],
[-12.0, 0.0, 6.0, 0.0],
[11.0, 0.0, 0.0, 0.0],
[-10.0, 0.0, 5.0, 0.0],
[-8.0, 0.0, 3.0, 0.0],
[7.0, 0.0, -3.0, 0.0],
[-7.0, 0.0, 0.0, 0.0],
[-7.0, 0.0, 3.0, 0.0],
[-7.0, 0.0, 3.0, 0.0],
[6.0, 0.0, 0.0, 0.0],
[6.0, 0.0, -3.0, 0.0],
[6.0, 0.0, -3.0, 0.0],
[-6.0, 0.0, 3.0, 0.0],
[-6.0, 0.0, 3.0, 0.0],
[5.0, 0.0, 0.0, 0.0],
[-5.0, 0.0, 3.0, 0.0],
[-5.0, 0.0, 3.0, 0.0],
[-5.0, 0.0, 3.0, 0.0],
[4.0, 0.0, 0.0, 0.0],
[4.0, 0.0, 0.0, 0.0],
[4.0, 0.0, 0.0, 0.0],
[-4.0, 0.0, 0.0, 0.0],
[-4.0, 0.0, 0.0, 0.0],
[-4.0, 0.0, 0.0, 0.0],
[3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0]]
HELIO_LAT_TABLE_LIST_0 = [[280.0, 3.199, 84334.662],
[102.0, 5.422, 5507.553],
[80.0, 3.88, 5223.69],
[44.0, 3.7, 2352.87],
[32.0, 4.0, 1577.34]]
HELIO_LAT_TABLE_LIST_1 = [[9.0, 3.9, 5507.55],
[6.0, 1.73, 5223.69]]
#HELIO_LONG_TABLE_LIST = HELIO_LONG_TABLE.tolist()
HELIO_LONG_TABLE_LIST_0 = [[175347046.0, 0.0, 0.0],
[3341656.0, 4.6692568, 6283.07585],
[34894.0, 4.6261, 12566.1517],
[3497.0, 2.7441, 5753.3849],
[3418.0, 2.8289, 3.5231],
[3136.0, 3.6277, 77713.7715],
[2676.0, 4.4181, 7860.4194],
[2343.0, 6.1352, 3930.2097],
[1324.0, 0.7425, 11506.7698],
[1273.0, 2.0371, 529.691],
[1199.0, 1.1096, 1577.3435],
[990.0, 5.233, 5884.927],
[902.0, 2.045, 26.298],
[857.0, 3.508, 398.149],
[780.0, 1.179, 5223.694],
[753.0, 2.533, 5507.553],
[505.0, 4.583, 18849.228],
[492.0, 4.205, 775.523],
[357.0, 2.92, 0.067],
[317.0, 5.849, 11790.629],
[284.0, 1.899, 796.298],
[271.0, 0.315, 10977.079],
[243.0, 0.345, 5486.778],
[206.0, 4.806, 2544.314],
[205.0, 1.869, 5573.143],
[202.0, 2.458, 6069.777],
[156.0, 0.833, 213.299],
[132.0, 3.411, 2942.463],
[126.0, 1.083, 20.775],
[115.0, 0.645, 0.98],
[103.0, 0.636, 4694.003],
[102.0, 0.976, 15720.839],
[102.0, 4.267, 7.114],
[99.0, 6.21, 2146.17],
[98.0, 0.68, 155.42],
[86.0, 5.98, 161000.69],
[85.0, 1.3, 6275.96],
[85.0, 3.67, 71430.7],
[80.0, 1.81, 17260.15],
[79.0, 3.04, 12036.46],
[75.0, 1.76, 5088.63],
[74.0, 3.5, 3154.69],
[74.0, 4.68, 801.82],
[70.0, 0.83, 9437.76],
[62.0, 3.98, 8827.39],
[61.0, 1.82, 7084.9],
[57.0, 2.78, 6286.6],
[56.0, 4.39, 14143.5],
[56.0, 3.47, 6279.55],
[52.0, 0.19, 12139.55],
[52.0, 1.33, 1748.02],
[51.0, 0.28, 5856.48],
[49.0, 0.49, 1194.45],
[41.0, 5.37, 8429.24],
[41.0, 2.4, 19651.05],
[39.0, 6.17, 10447.39],
[37.0, 6.04, 10213.29],
[37.0, 2.57, 1059.38],
[36.0, 1.71, 2352.87],
[36.0, 1.78, 6812.77],
[33.0, 0.59, 17789.85],
[30.0, 0.44, 83996.85],
[30.0, 2.74, 1349.87],
[25.0, 3.16, 4690.48]]
HELIO_LONG_TABLE_LIST_1 = [[628331966747.0, 0.0, 0.0],
[206059.0, 2.678235, 6283.07585],
[4303.0, 2.6351, 12566.1517],
[425.0, 1.59, 3.523],
[119.0, 5.796, 26.298],
[109.0, 2.966, 1577.344],
[93.0, 2.59, 18849.23],
[72.0, 1.14, 529.69],
[68.0, 1.87, 398.15],
[67.0, 4.41, 5507.55],
[59.0, 2.89, 5223.69],
[56.0, 2.17, 155.42],
[45.0, 0.4, 796.3],
[36.0, 0.47, 775.52],
[29.0, 2.65, 7.11],
[21.0, 5.34, 0.98],
[19.0, 1.85, 5486.78],
[19.0, 4.97, 213.3],
[17.0, 2.99, 6275.96],
[16.0, 0.03, 2544.31],
[16.0, 1.43, 2146.17],
[15.0, 1.21, 10977.08],
[12.0, 2.83, 1748.02],
[12.0, 3.26, 5088.63],
[12.0, 5.27, 1194.45],
[12.0, 2.08, 4694.0],
[11.0, 0.77, 553.57],
[10.0, 1.3, 6286.6],
[10.0, 4.24, 1349.87],
[9.0, 2.7, 242.73],
[9.0, 5.64, 951.72],
[8.0, 5.3, 2352.87],
[6.0, 2.65, 9437.76],
[6.0, 4.67, 4690.48],
]
HELIO_LONG_TABLE_LIST_2 = [[52919.0, 0.0, 0.0],
[8720.0, 1.0721, 6283.0758],
[309.0, 0.867, 12566.152],
[27.0, 0.05, 3.52],
[16.0, 5.19, 26.3],
[16.0, 3.68, 155.42],
[10.0, 0.76, 18849.23],
[9.0, 2.06, 77713.77],
[7.0, 0.83, 775.52],
[5.0, 4.66, 1577.34],
[4.0, 1.03, 7.11],
[4.0, 3.44, 5573.14],
[3.0, 5.14, 796.3],
[3.0, 6.05, 5507.55],
[3.0, 1.19, 242.73],
[3.0, 6.12, 529.69],
[3.0, 0.31, 398.15],
[3.0, 2.28, 553.57],
[2.0, 4.38, 5223.69],
[2.0, 3.75, 0.98]]
HELIO_LONG_TABLE_LIST_3 = [[289.0, 5.844, 6283.076],
[35.0, 0.0, 0.0],
[17.0, 5.49, 12566.15],
[3.0, 5.2, 155.42],
[1.0, 4.72, 3.52],
[1.0, 5.3, 18849.23],
[1.0, 5.97, 242.73]
]
HELIO_LONG_TABLE_LIST_4 = [[114.0, 3.142, 0.0],
[8.0, 4.13, 6283.08],
[1.0, 3.84, 12566.15]]
def julian_day_dt(year, month, day, hour, minute, second, microsecond):
"""This is the original way to calculate the julian day from the NREL paper.
However, it is much faster to convert to unix/epoch time and then convert to
julian day. Note that the date must be UTC.
"""
# Not used anywhere!
if month <= 2:
year = year-1
month = month+12
a = int(year/100)
b = 2 - a + int(a * 0.25)
frac_of_day = (microsecond + (second + minute * 60 + hour * 3600)
) * 1.0 / (3600*24)
d = day + frac_of_day
jd = (int(365.25 * (year + 4716)) + int(30.6001 * (month + 1)) + d +
b - 1524.5)
return jd
def julian_day(unixtime):
jd = unixtime*1.1574074074074073e-05 + 2440587.5
# jd = unixtime/86400.0 + 2440587.5
return jd
def julian_ephemeris_day(julian_day, delta_t):
jde = julian_day + delta_t*1.1574074074074073e-05
# jde = julian_day + delta_t * 1.0 / 86400.0
return jde
def julian_century(julian_day):
jc = (julian_day - 2451545.0)*2.7378507871321012e-05# * 1.0 / 36525
return jc
def julian_ephemeris_century(julian_ephemeris_day):
# 1/36525.0 = 2.7378507871321012e-05
jce = (julian_ephemeris_day - 2451545.0)*2.7378507871321012e-05
return jce
def julian_ephemeris_millennium(julian_ephemeris_century):
jme = julian_ephemeris_century*0.1
return jme
def heliocentric_longitude(jme):
# Might be able to replace this with a pade approximation?
# Looping over rows is probably still faster than (a, b, c)
# Maximum optimization
l0 = 0.0
l1 = 0.0
l2 = 0.0
l3 = 0.0
l4 = 0.0
l5 = 0.0
for row in range(64):
HELIO_LONG_TABLE_LIST_0_ROW = HELIO_LONG_TABLE_LIST_0[row]
l0 += (HELIO_LONG_TABLE_LIST_0_ROW[0]
* cos(HELIO_LONG_TABLE_LIST_0_ROW[1]
+ HELIO_LONG_TABLE_LIST_0_ROW[2] * jme)
)
for row in range(34):
HELIO_LONG_TABLE_LIST_1_ROW = HELIO_LONG_TABLE_LIST_1[row]
l1 += (HELIO_LONG_TABLE_LIST_1_ROW[0]
* cos(HELIO_LONG_TABLE_LIST_1_ROW[1]
+ HELIO_LONG_TABLE_LIST_1_ROW[2] * jme)
)
for row in range(20):
HELIO_LONG_TABLE_LIST_2_ROW = HELIO_LONG_TABLE_LIST_2[row]
l2 += (HELIO_LONG_TABLE_LIST_2_ROW[0]
* cos(HELIO_LONG_TABLE_LIST_2_ROW[1]
+ HELIO_LONG_TABLE_LIST_2_ROW[2] * jme)
)
for row in range(7):
HELIO_LONG_TABLE_LIST_3_ROW = HELIO_LONG_TABLE_LIST_3[row]
l3 += (HELIO_LONG_TABLE_LIST_3_ROW[0]
* cos(HELIO_LONG_TABLE_LIST_3_ROW[1]
+ HELIO_LONG_TABLE_LIST_3_ROW[2] * jme)
)
for row in range(3):
HELIO_LONG_TABLE_LIST_4_ROW = HELIO_LONG_TABLE_LIST_4[row]
l4 += (HELIO_LONG_TABLE_LIST_4_ROW[0]
* cos(HELIO_LONG_TABLE_LIST_4_ROW[1]
+ HELIO_LONG_TABLE_LIST_4_ROW[2] * jme)
)
# l5 = (HELIO_LONG_TABLE_LIST_5[0][0]*cos(HELIO_LONG_TABLE_LIST_5[0][1]))
l5 = -0.9999987317275395
l_rad = (jme*(jme*(jme*(jme*(jme*l5 + l4) + l3) + l2) + l1) + l0)*1E-8
l = rad2deg*l_rad
return l % 360
def heliocentric_latitude(jme):
b0 = 0.0
b1 = 0.0
for row in range(5):
HELIO_LAT_TABLE_LIST_0_ROW = HELIO_LAT_TABLE_LIST_0[row]
b0 += (HELIO_LAT_TABLE_LIST_0_ROW[0]
* cos(HELIO_LAT_TABLE_LIST_0_ROW[1]
+ HELIO_LAT_TABLE_LIST_0_ROW[2] * jme)
)
HELIO_LAT_TABLE_LIST_1_ROW = HELIO_LAT_TABLE_LIST_1[0]
b1 += (HELIO_LAT_TABLE_LIST_1_ROW[0]
* cos(HELIO_LAT_TABLE_LIST_1_ROW[1]
+ HELIO_LAT_TABLE_LIST_1_ROW[2] * jme))
HELIO_LAT_TABLE_LIST_1_ROW = HELIO_LAT_TABLE_LIST_1[1]
b1 += (HELIO_LAT_TABLE_LIST_1_ROW[0]
* cos(HELIO_LAT_TABLE_LIST_1_ROW[1]
+ HELIO_LAT_TABLE_LIST_1_ROW[2] * jme))
b_rad = (b0 + b1 * jme)*1E-8
b = rad2deg*b_rad
return b
def heliocentric_radius_vector(jme):
# no optimizations can be thought of
r0 = 0.0
r1 = 0.0
r2 = 0.0
r3 = 0.0
r4 = 0.0
# Would be possible to save a few multiplies of table1row[2]*jme, table1row[1]*jme as they are dups
for row in range(40):
table0row = HELIO_RADIUS_TABLE_LIST_0[row]
r0 += (table0row[0]*cos(table0row[1] + table0row[2]*jme))
for row in range(10):
table1row = HELIO_RADIUS_TABLE_LIST_1[row]
r1 += (table1row[0]*cos(table1row[1] + table1row[2]*jme))
for row in range(6):
table2row = HELIO_RADIUS_TABLE_LIST_2[row]
r2 += (table2row[0]*cos(table2row[1] + table2row[2]*jme))
table3row = HELIO_RADIUS_TABLE_LIST_3[0]
r3 += (table3row[0]*cos(table3row[1] + table3row[2]*jme))
table3row = HELIO_RADIUS_TABLE_LIST_3[1]
r3 += (table3row[0]*cos(table3row[1] + table3row[2]*jme))
# table4row = HELIO_RADIUS_TABLE_LIST_4[0]
# r4 = (table4row[0]*cos(table4row[1] + table4row[2]*jme))
r4 = (4.0*cos(2.56 + 6283.08*jme))
return (jme*(jme*(jme*(jme*r4 + r3) + r2) + r1) + r0)*1E-8
def geocentric_longitude(heliocentric_longitude):
theta = heliocentric_longitude + 180.0
return theta % 360
def geocentric_latitude(heliocentric_latitude):
beta = -heliocentric_latitude
return beta
def mean_elongation(julian_ephemeris_century):
return (julian_ephemeris_century*(julian_ephemeris_century
*(5.27776898149614e-6*julian_ephemeris_century - 0.0019142)
+ 445267.11148) + 297.85036)
# x0 = (297.85036
# + 445267.111480 * julian_ephemeris_century
# - 0.0019142 * julian_ephemeris_century**2
# + julian_ephemeris_century**3 / 189474.0)
# return x0
def mean_anomaly_sun(julian_ephemeris_century):
return (julian_ephemeris_century*(julian_ephemeris_century*(
-3.33333333333333e-6*julian_ephemeris_century - 0.0001603)
+ 35999.05034) + 357.52772)
# x1 = (357.52772
# + 35999.050340 * julian_ephemeris_century
# - 0.0001603 * julian_ephemeris_century**2
# - julian_ephemeris_century**3 / 300000.0)
# return x1
def mean_anomaly_moon(julian_ephemeris_century):
return (julian_ephemeris_century*(julian_ephemeris_century*(
1.77777777777778e-5*julian_ephemeris_century + 0.0086972)
+ 477198.867398) + 134.96298)
# x2 = (134.96298
# + 477198.867398 * julian_ephemeris_century
# + 0.0086972 * julian_ephemeris_century**2
# + julian_ephemeris_century**3 / 56250)
# return x2
def moon_argument_latitude(julian_ephemeris_century):
return julian_ephemeris_century*(julian_ephemeris_century*(
3.05558101873071e-6*julian_ephemeris_century - 0.0036825)
+ 483202.017538) + 93.27191
# x3 = (93.27191
# + 483202.017538 * julian_ephemeris_century
# - 0.0036825 * julian_ephemeris_century**2
# + julian_ephemeris_century**3 / 327270)
# return x3
def moon_ascending_longitude(julian_ephemeris_century):
return (julian_ephemeris_century*(julian_ephemeris_century*(
2.22222222222222e-6*julian_ephemeris_century + 0.0020708)
- 1934.136261) + 125.04452)
# x4 = (125.04452
# - 1934.136261 * julian_ephemeris_century
# + 0.0020708 * julian_ephemeris_century**2
# + julian_ephemeris_century**3 / 450000)
# return x4
def longitude_obliquity_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
x0, x1, x2, x3, x4 = deg2rad*x0, deg2rad*x1, deg2rad*x2, deg2rad*x3, deg2rad*x4
delta_psi_sum = 0.0
delta_eps_sum = 0.0
# If the sincos formulation is used, the speed up is ~8% with numba.
for row in range(63):
arg = (NUTATION_YTERM_LIST_0[row]*x0 +
NUTATION_YTERM_LIST_1[row]*x1 +
NUTATION_YTERM_LIST_2[row]*x2 +
NUTATION_YTERM_LIST_3[row]*x3 +
NUTATION_YTERM_LIST_4[row]*x4)
arr = NUTATION_ABCD_LIST[row]
sinarg, cosarg = sincos(arg)
# sinarg = sin(arg)
# cosarg = sqrt(1.0 - sinarg*sinarg)
t0 = (arr[0] + julian_ephemeris_century*arr[1])
delta_psi_sum += t0*sinarg
# delta_psi_sum += t0*sin(arg)
t0 = (arr[2] + julian_ephemeris_century*arr[3])
delta_eps_sum += t0*cosarg
# delta_eps_sum += t0*cos(arg)
delta_psi = delta_psi_sum/36000000.0
delta_eps = delta_eps_sum/36000000.0
res = [0.0]*2
res[0] = delta_psi
res[1] = delta_eps
return res
def longitude_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
x0, x1, x2, x3, x4 = deg2rad*x0, deg2rad*x1, deg2rad*x2, deg2rad*x3, deg2rad*x4
delta_psi_sum = 0.0
for row in range(63):
# # None can be skipped but the multiplies can be with effort -2 to 2 with dict - just might be slower
argsin = (NUTATION_YTERM_LIST_0[row]*x0 +
NUTATION_YTERM_LIST_1[row]*x1 +
NUTATION_YTERM_LIST_2[row]*x2 +
NUTATION_YTERM_LIST_3[row]*x3 +
NUTATION_YTERM_LIST_4[row]*x4)
term = (NUTATION_ABCD_LIST[row][0] + NUTATION_ABCD_LIST[row][1]
* julian_ephemeris_century)*sin(argsin)
delta_psi_sum += term
delta_psi = delta_psi_sum/36000000.0
return delta_psi
def obliquity_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
delta_eps_sum = 0.0
x0, x1, x2, x3, x4 = deg2rad*x0, deg2rad*x1, deg2rad*x2, deg2rad*x3, deg2rad*x4
for row in range(63):
argcos = (NUTATION_YTERM_LIST_0[row]*x0 +
NUTATION_YTERM_LIST_1[row]*x1 +
NUTATION_YTERM_LIST_2[row]*x2 +
NUTATION_YTERM_LIST_3[row]*x3 +
NUTATION_YTERM_LIST_4[row]*x4)
term = (NUTATION_ABCD_LIST[row][2]
+ NUTATION_ABCD_LIST[row][3]*julian_ephemeris_century)*cos(argcos)
delta_eps_sum += term
delta_eps = delta_eps_sum/36000000.0
return delta_eps
def mean_ecliptic_obliquity(julian_ephemeris_millennium):
U = 0.1*julian_ephemeris_millennium
e0 = (U*(U*(U*(U*(U*(U*(U*(U*(U*(2.45*U + 5.79) + 27.87) + 7.12) - 39.05)
- 249.67) - 51.38) + 1999.25) - 1.55) - 4680.93) + 84381.448)
return e0
def true_ecliptic_obliquity(mean_ecliptic_obliquity, obliquity_nutation):
# e0 = mean_ecliptic_obliquity
# deleps = obliquity_nutation
return mean_ecliptic_obliquity*0.0002777777777777778 + obliquity_nutation
# e = e0/3600.0 + deleps
# return e
def aberration_correction(earth_radius_vector):
# -20.4898 / (3600)
deltau = -0.005691611111111111/earth_radius_vector
return deltau
def apparent_sun_longitude(geocentric_longitude, longitude_nutation,
aberration_correction):
lamd = geocentric_longitude + longitude_nutation + aberration_correction
return lamd
def mean_sidereal_time(julian_day, julian_century):
julian_century2 = julian_century*julian_century
v0 = (280.46061837 + 360.98564736629*(julian_day - 2451545.0)
+ 0.000387933*julian_century2
- julian_century2*julian_century/38710000.0)
return v0 % 360.0
def apparent_sidereal_time(mean_sidereal_time, longitude_nutation,
true_ecliptic_obliquity):
v = mean_sidereal_time + longitude_nutation*cos(deg2rad*true_ecliptic_obliquity)
return v
def geocentric_sun_right_ascension(apparent_sun_longitude,
true_ecliptic_obliquity,
geocentric_latitude):
num = (sin(deg2rad*apparent_sun_longitude)
* cos(deg2rad*true_ecliptic_obliquity)
- tan(deg2rad*geocentric_latitude)
* sin(deg2rad*true_ecliptic_obliquity))
alpha = degrees(atan2(num, cos(
deg2rad*apparent_sun_longitude)))
return alpha % 360
def geocentric_sun_declination(apparent_sun_longitude, true_ecliptic_obliquity,
geocentric_latitude):
delta = degrees(asin(sin(deg2rad*geocentric_latitude) *
cos(deg2rad*true_ecliptic_obliquity) +
cos(deg2rad*geocentric_latitude) *
sin(deg2rad*true_ecliptic_obliquity) *
sin(deg2rad*apparent_sun_longitude)))
return delta
def local_hour_angle(apparent_sidereal_time, observer_longitude,
sun_right_ascension):
"""Measured westward from south."""
H = apparent_sidereal_time + observer_longitude - sun_right_ascension
return H % 360
def equatorial_horizontal_parallax(earth_radius_vector):
return 8.794 / (3600.0 * earth_radius_vector)
def uterm(observer_latitude):
u = atan(0.99664719*tan(deg2rad*observer_latitude))
return u
def xterm(u, observer_latitude, observer_elevation):
# 1/6378140.0 = const
x = (cos(u) + observer_elevation*1.5678552054360676e-07*cos(deg2rad*observer_latitude))
return x
def yterm(u, observer_latitude, observer_elevation):
# 1/6378140.0 = const
y = (0.99664719 * sin(u) + observer_elevation*1.5678552054360676e-07
* sin(deg2rad*observer_latitude))
return y
def parallax_sun_right_ascension(xterm, equatorial_horizontal_parallax,
local_hour_angle, geocentric_sun_declination):
x0 = sin(deg2rad*equatorial_horizontal_parallax)
x1 = deg2rad*local_hour_angle
num = -xterm*x0*sin(x1)
denom = (cos(deg2rad*geocentric_sun_declination) - xterm*x0 * cos(x1))
delta_alpha = degrees(atan2(num, denom))
return delta_alpha
def topocentric_sun_right_ascension(geocentric_sun_right_ascension,
parallax_sun_right_ascension):
alpha_prime = geocentric_sun_right_ascension + parallax_sun_right_ascension
return alpha_prime
def topocentric_sun_declination(geocentric_sun_declination, xterm, yterm,
equatorial_horizontal_parallax,
parallax_sun_right_ascension,
local_hour_angle):
x0 = sin(deg2rad*equatorial_horizontal_parallax)
num = ((sin(deg2rad*geocentric_sun_declination) - yterm
* x0)
* cos(deg2rad*parallax_sun_right_ascension))
denom = (cos(deg2rad*geocentric_sun_declination) - xterm
* x0
* cos(deg2rad*local_hour_angle))
delta = degrees(atan2(num, denom))
return delta
def topocentric_local_hour_angle(local_hour_angle,
parallax_sun_right_ascension):
H_prime = local_hour_angle - parallax_sun_right_ascension
return H_prime
def topocentric_elevation_angle_without_atmosphere(observer_latitude,
topocentric_sun_declination,
topocentric_local_hour_angle
):
observer_latitude = observer_latitude
topocentric_sun_declination = topocentric_sun_declination
topocentric_local_hour_angle = topocentric_local_hour_angle
r_observer_latitude = deg2rad*observer_latitude
r_topocentric_sun_declination = deg2rad*topocentric_sun_declination
e0 = degrees(asin(
sin(r_observer_latitude)
* sin(r_topocentric_sun_declination)
+ cos(r_observer_latitude)
* cos(r_topocentric_sun_declination)
* cos(deg2rad*topocentric_local_hour_angle)))
return e0
def atmospheric_refraction_correction(local_pressure, local_temp,
topocentric_elevation_angle_wo_atmosphere,
atmos_refract):
# switch sets delta_e when the sun is below the horizon
switch = topocentric_elevation_angle_wo_atmosphere >= -1.0 * (
0.26667 + atmos_refract)
delta_e = ((local_pressure / 1010.0) * (283.0 / (273.0 + local_temp))
* 1.02 / (60.0 * tan(deg2rad*(
topocentric_elevation_angle_wo_atmosphere
+ 10.3 / (topocentric_elevation_angle_wo_atmosphere
+ 5.11))))) * switch
return delta_e
def topocentric_elevation_angle(topocentric_elevation_angle_without_atmosphere,
atmospheric_refraction_correction):
e = (topocentric_elevation_angle_without_atmosphere
+ atmospheric_refraction_correction)
return e
def topocentric_zenith_angle(topocentric_elevation_angle):
theta = 90.0 - topocentric_elevation_angle
return theta
def topocentric_astronomers_azimuth(topocentric_local_hour_angle,
topocentric_sun_declination,
observer_latitude):
num = sin(deg2rad*topocentric_local_hour_angle)
denom = (cos(deg2rad*topocentric_local_hour_angle)
* sin(deg2rad*observer_latitude)
- tan(deg2rad*topocentric_sun_declination)
* cos(deg2rad*observer_latitude))
gamma = degrees(atan2(num, denom))
return gamma % 360.0
def topocentric_azimuth_angle(topocentric_astronomers_azimuth):
phi = topocentric_astronomers_azimuth + 180.0
return phi % 360.0
def sun_mean_longitude(julian_ephemeris_millennium):
M = julian_ephemeris_millennium*(julian_ephemeris_millennium*(
julian_ephemeris_millennium*(julian_ephemeris_millennium*(
-5.0e-7*julian_ephemeris_millennium - 6.5359477124183e-5)
+ 2.00276381406341e-5) + 0.03032028) + 360007.6982779) + 280.4664567
return M
#@jcompile('float64(float64, float64, float64, float64)', nopython=True)
def equation_of_time(sun_mean_longitude, geocentric_sun_right_ascension,
longitude_nutation, true_ecliptic_obliquity):
term = cos(deg2rad*true_ecliptic_obliquity)
E = (sun_mean_longitude - 0.0057183 - geocentric_sun_right_ascension +
longitude_nutation * term)
# limit between 0 and 360
E = E % 360
# convert to minutes
E *= 4.0
greater = E > 20.0
less = E < -20.0
other = (E <= 20.0) & (E >= -20.0)
E = greater * (E - 1440.0) + less * (E + 1440.0) + other * E
return E
def earthsun_distance(unixtime, delta_t):
"""Calculates the distance from the earth to the sun using the NREL SPA
algorithm described in [1].
Parameters
----------
unixtime : numpy array
Array of unix/epoch timestamps to calculate solar position for.
Unixtime is the number of seconds since Jan. 1, 1970 00:00:00 UTC.
A pandas.DatetimeIndex is easily converted using .astype(np.int64)/10**9
delta_t : float
Difference between terrestrial time and UT. USNO has tables.
Returns
-------
R : array
Earth-Sun distance in AU.
References
----------
[1] Reda, I., Andreas, A., 2003. Solar position algorithm for solar
radiation applications. Technical report: NREL/TP-560- 34302. Golden,
USA, http://www.nrel.gov.
"""
jd = julian_day(unixtime)
jde = julian_ephemeris_day(jd, delta_t)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
R = heliocentric_radius_vector(jme)
return R
def solar_position(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, sst=False):
"""Calculate the solar position using the NREL SPA algorithm described in
[1].
If numba is installed, the functions can be compiled
and the code runs quickly. If not, the functions
still evaluate but use numpy instead.
Parameters
----------
unixtime : numpy array
Array of unix/epoch timestamps to calculate solar position for.
Unixtime is the number of seconds since Jan. 1, 1970 00:00:00 UTC.
A pandas.DatetimeIndex is easily converted using .astype(np.int64)/10**9
lat : float
Latitude to calculate solar position for
lon : float
Longitude to calculate solar position for
elev : float
Elevation of location in meters
pressure : int or float
avg. yearly pressure at location in millibars;
used for atmospheric correction
temp : int or float
avg. yearly temperature at location in
degrees C; used for atmospheric correction
delta_t : float, optional
If delta_t is None, uses spa.calculate_deltat
using time.year and time.month from pandas.DatetimeIndex.
For most simulations specifying delta_t is sufficient.
Difference between terrestrial time and UT1.
*Note: delta_t = None will break code using nrel_numba,
this will be fixed in a future version.
By default, use USNO historical data and predictions
atmos_refrac : float, optional
The approximate atmospheric refraction (in degrees)
at sunrise and sunset.
numthreads: int, optional, default None
Number of threads to use for computation if numba>=0.17
is installed.
sst : bool, default False
If True, return only data needed for sunrise, sunset, and transit
calculations.
Returns
-------
list with elements:
apparent zenith,
zenith,
elevation,
apparent_elevation,
azimuth,
equation_of_time
References
----------
.. [1] I. Reda and A. Andreas, Solar position algorithm for solar radiation
applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
.. [2] I. Reda and A. Andreas, Corrigendum to Solar position algorithm for
solar radiation applications. Solar Energy, vol. 81, no. 6, p. 838, 2007.
"""
jd = julian_day(unixtime)
jde = julian_ephemeris_day(jd, delta_t)
jc = julian_century(jd)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
R = heliocentric_radius_vector(jme)
L = heliocentric_longitude(jme)
B = heliocentric_latitude(jme)
Theta = geocentric_longitude(L)
beta = geocentric_latitude(B)
x0 = mean_elongation(jce)
x1 = mean_anomaly_sun(jce)
x2 = mean_anomaly_moon(jce)
x3 = moon_argument_latitude(jce)
x4 = moon_ascending_longitude(jce)
delta_psi, delta_epsilon = longitude_obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = mean_ecliptic_obliquity(jme)
epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = aberration_correction(R)
lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = mean_sidereal_time(jd, jc)
v = apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = geocentric_sun_declination(lamd, epsilon, beta)
if sst: # numba: delete
return v, alpha, delta # numba: delete
m = sun_mean_longitude(jme)
eot = equation_of_time(m, alpha, delta_psi, epsilon)
H = local_hour_angle(v, lon, alpha)
xi = equatorial_horizontal_parallax(R)
u = uterm(lat)
x = xterm(u, lat, elev)
y = yterm(u, lat, elev)
delta_alpha = parallax_sun_right_ascension(x, xi, H, delta)
delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha, H)
H_prime = topocentric_local_hour_angle(H, delta_alpha)
e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime,
H_prime)
delta_e = atmospheric_refraction_correction(pressure, temp, e0,
atmos_refract)
e = topocentric_elevation_angle(e0, delta_e)
theta = topocentric_zenith_angle(e)
theta0 = topocentric_zenith_angle(e0)
gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat)
phi = topocentric_azimuth_angle(gamma)
return [theta, theta0, e, e0, phi, eot]
try:
if IS_NUMBA: # type: ignore
try:
import numpy as np
except:
pass
import numba
import numpy as np
import threading
# This is 3x slower without nogil
@numba.njit(nogil=True)
def solar_position_loop(unixtime, loc_args, out):
"""Loop through the time array and calculate the solar position."""
lat = loc_args[0]
lon = loc_args[1]
elev = loc_args[2]
pressure = loc_args[3]
temp = loc_args[4]
delta_t = loc_args[5]
atmos_refract = loc_args[6]
sst = loc_args[7]
esd = loc_args[8]
for i in range(len(unixtime)):
utime = unixtime[i]
jd = julian_day(utime)
jde = julian_ephemeris_day(jd, delta_t)
jc = julian_century(jd)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
R = heliocentric_radius_vector(jme)
L = heliocentric_longitude(jme)
B = heliocentric_latitude(jme)
Theta = geocentric_longitude(L)
beta = geocentric_latitude(B)
x0 = mean_elongation(jce)
x1 = mean_anomaly_sun(jce)
x2 = mean_anomaly_moon(jce)
x3 = moon_argument_latitude(jce)
x4 = moon_ascending_longitude(jce)
# delta_psi = longitude_nutation(jce, x0, x1, x2, x3, x4)
# delta_epsilon = obliquity_nutation(jce, x0, x1, x2, x3, x4)
delta_psi, delta_epsilon = longitude_obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = mean_ecliptic_obliquity(jme)
epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = aberration_correction(R)
lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = mean_sidereal_time(jd, jc)
v = apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = geocentric_sun_declination(lamd, epsilon, beta)
# if sst:
# out[0, i] = v
# out[1, i] = alpha
# out[2, i] = delta
# continue
m = sun_mean_longitude(jme)
eot = equation_of_time(m, alpha, delta_psi, epsilon)
H = local_hour_angle(v, lon, alpha)
xi = equatorial_horizontal_parallax(R)
u = uterm(lat)
x = xterm(u, lat, elev)
y = yterm(u, lat, elev)
delta_alpha = parallax_sun_right_ascension(x, xi, H, delta)
delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha,
H)
H_prime = topocentric_local_hour_angle(H, delta_alpha)
e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime,
H_prime)
delta_e = atmospheric_refraction_correction(pressure, temp, e0,
atmos_refract)
e = topocentric_elevation_angle(e0, delta_e)
theta = topocentric_zenith_angle(e)
theta0 = topocentric_zenith_angle(e0)
gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat)
phi = topocentric_azimuth_angle(gamma)
out[0, i] = theta
out[1, i] = theta0
out[2, i] = e
out[3, i] = e0
out[4, i] = phi
out[5, i] = eot
def solar_position_numba(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads, sst=False, esd=False):
"""Calculate the solar position using the numba compiled functions
and multiple threads.
Very slow if functions are not numba compiled.
"""
# these args are the same for each thread
loc_args = np.array([lat, lon, elev, pressure, temp, delta_t,
atmos_refract, sst, esd])
# construct dims x ulength array to put the results in
ulength = unixtime.shape[0]
if sst:
dims = 3
elif esd:
dims = 1
else:
dims = 6
result = np.empty((dims, ulength), dtype=np.float64)
if unixtime.dtype != np.float64:
unixtime = unixtime.astype(np.float64)
if ulength < numthreads:
warnings.warn('The number of threads is more than the length of '
'the time array. Only using %s threads.' %(length))
numthreads = ulength
if numthreads <= 1:
solar_position_loop(unixtime, loc_args, result)
return result
# split the input and output arrays into numthreads chunks
split0 = np.array_split(unixtime, numthreads)
split2 = np.array_split(result, numthreads, axis=1)
chunks = [[a0, loc_args, split2[i]] for i, a0 in enumerate(split0)]
# Spawn one thread per chunk
threads = [threading.Thread(target=solar_position_loop, args=chunk)
for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
except:
pass
def transit_sunrise_sunset(dates, lat, lon, delta_t):
"""Calculate the sun transit, sunrise, and sunset for a set of dates at a
given location.
Parameters
----------
dates : array
Numpy array of ints/floats corresponding to the Unix time
for the dates of interest, must be midnight UTC (00:00+00:00)
on the day of interest.
lat : float
Latitude of location to perform calculation for
lon : float
Longitude of location
delta_t : float
Difference between terrestrial time and UT. USNO has tables.
Returns
-------
tuple : (transit, sunrise, sunset) localized to UTC
>>> transit_sunrise_sunset(1523836800, 51.0486, -114.07, 70.68302220312503)
(1523907360.3863413, 1523882341.570479, 1523932345.7781625)
"""
condition = (dates % 86400) != 0.0
if condition:
raise ValueError('Input dates must be at 00:00 UTC')
utday = (dates // 86400) * 86400
ttday0 = utday - delta_t
ttdayn1 = ttday0 - 86400.0
ttdayp1 = ttday0 + 86400.0
# index 0 is v, 1 is alpha, 2 is delta
utday_res = solar_position(utday, 0, 0, 0, 0, 0, delta_t,
0, sst=True)
v = utday_res[0]
ttday0_res = solar_position(ttday0, 0, 0, 0, 0, 0, delta_t,
0, sst=True)
ttdayn1_res = solar_position(ttdayn1, 0, 0, 0, 0, 0, delta_t,
0, sst=True)
ttdayp1_res = solar_position(ttdayp1, 0, 0, 0, 0, 0, delta_t,
0, sst=True)
m0 = (ttday0_res[1] - lon - v) / 360
cos_arg = ((-0.014543315936696236 - sin(radians(lat)) # sin(radians(-0.8333)) = -0.0145...
* sin(radians(ttday0_res[2]))) /
(cos(radians(lat)) * cos(radians(ttday0_res[2]))))
if abs(cos_arg) > 1:
cos_arg = nan
H0 = degrees(acos(cos_arg)) % 180
m = [0.0]*3
m[0] = m0 % 1
m[1] = (m[0] - H0 / 360.0)
m[2] = (m[0] + H0 / 360.0)
# need to account for fractions of day that may be the next or previous
# day in UTC
add_a_day = m[2] >= 1
sub_a_day = m[1] < 0
m[1] = m[1] % 1
m[2] = m[2] % 1
vs = [0.0]*3
for i in range(3):
vs[i] = v + 360.985647*m[i]
n = [0.0]*3
for i in range(3):
n[i] = m[i] + delta_t / 86400.0
a = ttday0_res[1] - ttdayn1_res[1]
if abs(a) > 2:
a = a %1
ap = ttday0_res[2] - ttdayn1_res[2]
if (abs(ap) > 2):
ap = ap % 1
b = ttdayp1_res[1] - ttday0_res[1]
if (abs(b) > 2):
b = b % 1
bp = ttdayp1_res[2] - ttday0_res[2]
if abs(bp) > 2:
bp = bp % 1
c = b - a
cp = bp - ap
alpha_prime = [0.0]*3
delta_prime = [0.0]*3
Hp = [0.0]*3
for i in range(3):
alpha_prime[i] = ttday0_res[1] + (n[i] * (a + b + c * n[i]))*0.5
delta_prime[i] = ttday0_res[2] + (n[i] * (ap + bp + cp * n[i]))*0.5
Hp[i] = (vs[i] + lon - alpha_prime[i]) % 360
if Hp[i] >= 180.0:
Hp[i] = Hp[i] - 360.0
#alpha_prime = ttday0_res[1] + (n * (a + b + c * n)) / 2 # this is vect
#delta_prime = ttday0_res[2] + (n * (ap + bp + cp * n)) / 2 # this is vect
#Hp = (vs + lon - alpha_prime) % 360
#Hp[Hp >= 180] = Hp[Hp >= 180] - 360
x1 = sin(radians(lat))
x2 = cos(radians(lat))
h = [0.0]*3
for i in range(3):
h[i] = degrees(asin(x1*sin(radians(delta_prime[i])) + x2 * cos(radians(delta_prime[i])) * cos(radians(Hp[i]))))
T = float((m[0] - Hp[0] / 360.0) * 86400.0)
R = float((m[1] + (h[1] + 0.8333) / (360.0 * cos(radians(delta_prime[1])) *
cos(radians(lat)) *
sin(radians(Hp[1])))) * 86400.0)
S = float((m[2] + (h[2] + 0.8333) / (360.0 * cos(radians(delta_prime[2])) *
cos(radians(lat)) *
sin(radians(Hp[2])))) * 86400.0)
if add_a_day:
S += 86400.0
if sub_a_day:
R -= 86400.0
transit = T + utday
sunrise = R + utday
sunset = S + utday
return transit, sunrise, sunset
def calculate_deltat(year, month):
y = year + (month - 0.5)/12
if (2005 <= year) & (year < 2050):
t1 = (y-2000.0)
deltat = (62.92+0.32217*t1 + 0.005589*t1*t1)
elif (1986 <= year) & (year < 2005):
t1 = y - 2000.0
deltat = (63.86+0.3345*t1
- 0.060374*t1**2
+ 0.0017275*t1**3
+ 0.000651814*t1**4
+ 0.00002373599*t1**5)
elif (2050 <= year) & (year < 2150):
deltat = (-20+32*((y-1820)/100)**2
- 0.5628*(2150-y))
elif year < -500.0:
deltat = -20.0 + 32*(0.01*(y-1820.0))**2
elif (-500 <= year) & (year < 500):
t1 = y/100
deltat = (10583.6-1014.41*(y/100)
+ 33.78311*(y/100)**2
- 5.952053*(y/100)**3
- 0.1798452*(y/100)**4
+ 0.022174192*(y/100)**5
+ 0.0090316521*(y/100)**6)
elif (500 <= year) & (year < 1600):
t1 = (y-1000)/100
deltat = (1574.2-556.01*((y-1000)/100)
+ 71.23472*((y-1000)/100)**2
+ 0.319781*((y-1000)/100)**3
- 0.8503463*((y-1000)/100)**4
- 0.005050998*((y-1000)/100)**5
+ 0.0083572073*((y-1000)/100)**6)
elif (1600 <= year) & (year < 1700):
t1 = (y-1600.0)
deltat = (120-0.9808*(y-1600)
- 0.01532*(y-1600)**2
+ (y-1600)**3/7129)
elif (1700 <= year) & (year < 1800):
t1 = (y - 1700.0)
deltat = (8.83+0.1603*(y-1700)
- 0.0059285*(y-1700)**2
+ 0.00013336*(y-1700)**3
- (y-1700)**4/1174000)
elif (1800 <= year) & (year < 1860):
t1 = y - 1800.0
deltat = (13.72-0.332447*(y-1800)
+ 0.0068612*(y-1800)**2
+ 0.0041116*(y-1800)**3
- 0.00037436*(y-1800)**4
+ 0.0000121272*(y-1800)**5
- 0.0000001699*(y-1800)**6
+ 0.000000000875*(y-1800)**7)
elif (1860 <= year) & (year < 1900):
t1 = y-1860.0
deltat = (7.62+0.5737*(y-1860)
- 0.251754*(y-1860)**2
+ 0.01680668*(y-1860)**3
- 0.0004473624*(y-1860)**4
+ (y-1860)**5/233174)
elif (1900 <= year) & (year < 1920):
t1 = y - 1900.0
deltat = (-2.79+1.494119*(y-1900)
- 0.0598939*(y-1900)**2
+ 0.0061966*(y-1900)**3
- 0.000197*(y-1900)**4)
elif (1920 <= year) & (year < 1941):
t1 = y - 1920.0
deltat = (21.20+0.84493*(y-1920)
- 0.076100*(y-1920)**2
+ 0.0020936*(y-1920)**3)
elif (1941 <= year) & (year < 1961):
t1 = y - 1950.0
deltat = (29.07+0.407*(y-1950)
- (y-1950)**2/233
+ (y-1950)**3/2547)
elif (1961 <= year) & (year < 1986):
t1 = y-1975
deltat = (45.45+1.067*(y-1975)
- (y-1975)**2/260
- (y-1975)**3/718)
elif year >= 2150:
deltat = -20+32*((y-1820)/100)**2
return deltat | mit |
ilo10/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
Palmerina/CMS_OpenData_DidacticAnalyzer | CmsOpenData_Analyzer/TwoMuonAnalyzerTTree.py | 1 | 12564 | # Name: TwoMuonAnalyzer.py
#
# CMS Open Data
#
# Description:
#
# Returns:
__author__ = "Palmerina Gonzalez Izquierdo"
__copyright__ = "Copyright (C) 2015 Palmerina G. I."
__license__ = "Public Domain"
__version__ = "1.0"
__maintainer__ = "Palmerina Gonzalez"
__email__ = "[email protected]"
import ROOT
from CutsConfig import CutsConfig
import numpy as n
from scipy.stats import norm
#from scipy import optimize
#import matplotlib.mlab as mlab
import matplotlib
matplotlib.use('QT4agg')
import matplotlib.pylab as P
import array
class TwoMuonAnalyzer(object):
"""
Analyzes the properties of the muons in every event
and selects those coming from the Z boson decay
"""
def __init__(self, cutsConfig):
"""
TwoMuonAnalyzer initializer
"""
self.f = ROOT.TFile("mytree.root", "read")
self.tree = self.f.Get("muons")
self.cutsConfig = cutsConfig
# Initialization of variables
self.Muon_pt = array.array("d", [0.]*50)
self.Muon_eta = array.array("d", [0.]*50)
self.Muon_px = array.array("d", [0.]*50)
self.Muon_py = array.array("d", [0.]*50)
self.Muon_pz = array.array("d", [0.]*50)
self.Muon_energy = array.array("d", [0.]*50)
self.Muon_vertex_z = array.array("d", [0.]*50)
self.Muon_isGlobalMuon = array.array("i", [0]*50)
self.Muon_isTrackerMuon = array.array("i", [0]*50)
self.Muon_dB = array.array("d", [0.]*50)
self.Muon_edB = array.array("d", [0.]*50)
self.Muon_isolation_sumPt = array.array("d", [0.]*50)
self.Muon_isolation_emEt = array.array("d", [0.]*50)
self.Muon_isolation_hadEt = array.array("d", [0.]*50)
self.Muon_numberOfValidHits = array.array("i", [0]*50)
self.Muon_normChi2 = array.array("d", [0.]*50)
self.Muon_charge = array.array("i", [0]*50)
self.Vertex_z = array.array("d", [0.])
self.npart = array.array("i", [0])
# Arrays where the variables are going to be stored
self.allMuons_pt = array.array("d")
self.allMuons_eta = array.array("d")
self.allMuons_px = array.array("d")
self.allMuons_py = array.array("d")
self.allMuons_pz = array.array("d")
self.allMuons_energy = array.array("d")
self.allMuons_isGlobalMuon = array.array("i")
self.allMuons_isTrackerMuon = array.array("i")
self.allMuons_vertex_z = array.array("d")
self.allMuons_dB = array.array("d")
self.allMuons_edB = array.array("d")
self.allMuons_isolation_sumPt = array.array("d")
self.allMuons_isolation_emEt = array.array("d")
self.allMuons_isolation_hadEt = array.array("d")
self.allMuons_numberOfValidHits = array.array("i")
self.allMuons_normChi2 = array.array("d")
self.allMuons_distance = array.array("d")
self.allMuons_charge = array.array("i")
self.mass = array.array("d")
self.event_vertex_z = array.array("d")
self.goodMuons_pt = array.array("d")
self.goodMuons_eta = array.array("d")
self.goodMuons_px = array.array("d")
self.goodMuons_py = array.array("d")
self.goodMuons_pz = array.array("d")
self.goodMuons_energy = array.array("d")
self.goodMuons_isGlobalMuon = array.array("i")
self.goodMuons_isTrackerMuon = array.array("i")
self.goodMuons_vertex_z = array.array("d")
self.goodMuons_dB = array.array("d")
self.goodMuons_edB = array.array("d")
self.goodMuons_isolation_sumPt = array.array("d")
self.goodMuons_isolation_emEt = array.array("d")
self.goodMuons_isolation_hadEt = array.array("d")
self.goodMuons_numberOfValidHits = array.array("i")
self.goodMuons_normChi2 = array.array("d")
self.goodMuons_distance = array.array("d")
self.goodMuons_charge = array.array("i")
self.goodMuons_pt1 = array.array("d")
self.goodMuons_pt2 = array.array("d")
self.z_mass = array.array("d")
def selectMuons(self, iMuon):
"""
muon:
vertex:
returns: boolean
"""
#muon=getMuons(), vertex=getVertex()
#The muon must be detected by both the tracker and the muon chambers
if not (self.Muon_isGlobalMuon[iMuon] and self.Muon_isTrackerMuon[iMuon]):
return False
# Minimum transverse momentum (pt) and maximum eta angle
if self.Muon_pt[iMuon] < self.cutsConfig.pt_min or abs(self.Muon_eta[iMuon]) > self.cutsConfig.eta_max:
return False
# Maximum distance of the muon respect to the vertex
if abs(self.Muon_vertex_z[iMuon] - self.Vertex_z[0]) > self.cutsConfig.distance:
return False
# Maximum impact parameter
if self.Muon_dB[iMuon] > self.cutsConfig.dB_max:
return False
# I_trk + I_ECAL + I_HCAL
# sumPt = suma de los momentos transversos
# emEt = electromagnetic energy
# hadEt = hadronic energy
# Maximum energy content in that region before consider the "muon" as a jet of particle
if (self.Muon_isolation_sumPt[iMuon] + self.Muon_isolation_emEt[iMuon] + self.Muon_isolation_hadEt[iMuon]) / self.Muon_pt[iMuon] > self.cutsConfig.isolation:
return False
# muon SIP variable # Symmetrized Impact Parameter in 2010?
#if (self.Muon_dB[0] / self.Muon_edB[0]) > 4:
# return False
if self.Muon_normChi2[iMuon] > -900.0:
# Maximum chi2
if self.Muon_normChi2[iMuon] > self.cutsConfig.chi2:
return False
if self.Muon_numberOfValidHits[iMuon] > -900:
# Minimum number of valid hits on the global track.
if self.Muon_numberOfValidHits[iMuon] < self.cutsConfig.numValidHits:
return False
return True
def process(self):
"""
maxEv: maximum number of processed events
maxEv=-1 runs over good the events
It selects the good muons applying the cut configuration
and paires up them creating objects of the class LeptonPair.
It gets the mass of every pair and adds the one which approaches
the most to the Z boson's mass to the list self.zMass.
"""
# Address arrays to the TTree's branches
self.tree.SetBranchAddress("Muon_pt", self.Muon_pt)
self.tree.SetBranchAddress("Muon_eta", self.Muon_eta)
self.tree.SetBranchAddress("Muon_px", self.Muon_px)
self.tree.SetBranchAddress("Muon_py", self.Muon_py)
self.tree.SetBranchAddress("Muon_pz", self.Muon_pz)
self.tree.SetBranchAddress("Muon_energy", self.Muon_energy)
self.tree.SetBranchAddress("Muon_vertex_z", self.Muon_vertex_z)
self.tree.SetBranchAddress("Muon_isGlobalMuon", self.Muon_isGlobalMuon)
self.tree.SetBranchAddress("Muon_isTrackerMuon", self.Muon_isTrackerMuon)
self.tree.SetBranchAddress("Muon_dB", self.Muon_dB)
self.tree.SetBranchAddress("Muon_edB", self.Muon_edB)
self.tree.SetBranchAddress("Muon_isolation_sumPt", self.Muon_isolation_sumPt)
self.tree.SetBranchAddress("Muon_isolation_emEt", self.Muon_isolation_emEt)
self.tree.SetBranchAddress("Muon_isolation_hadEt", self.Muon_isolation_hadEt)
self.tree.SetBranchAddress("Muon_numberOfValidHits", self.Muon_numberOfValidHits)
self.tree.SetBranchAddress("Muon_normChi2", self.Muon_normChi2)
self.tree.SetBranchAddress("Muon_charge", self.Muon_charge)
self.tree.SetBranchAddress("Vertex_z", self.Vertex_z)
self.tree.SetBranchAddress("npart", self.npart)
numEntries = self.tree.GetEntries()
# Loop over the events
for i in range(0, numEntries):
self.tree.GetEntry(i) # Muon_* arrays are filled for each event
if (i+1) % 10 == 0:
print i+1, "processed events"
# Select events with at least two muons
if self.npart[0]<2:
continue
# ALL MUONS
# Loop over the muons
for iMuon in range(0, self.npart[0] - 1):
# Histograms to plot
self.allMuons_pt.append(self.Muon_pt[iMuon])
self.allMuons_eta.append(self.Muon_eta[iMuon])
self.allMuons_energy.append(self.Muon_energy[iMuon])
self.allMuons_vertex_z.append(self.Muon_vertex_z[iMuon])
if abs(self.Muon_dB[iMuon])<20:
self.allMuons_dB.append(self.Muon_dB[iMuon])
self.allMuons_edB.append(self.Muon_edB[iMuon])
self.allMuons_distance.append(self.Muon_vertex_z[iMuon] - self.Vertex_z[0])
if self.Muon_numberOfValidHits[iMuon] > -900:
self.allMuons_numberOfValidHits.append(self.Muon_numberOfValidHits[iMuon])
# print "hits", self.Muon_numberOfValidHits[iMuon]
if (abs(self.Muon_normChi2[iMuon]) < 900.0 and self.Muon_normChi2[iMuon] != float("inf")):
self.allMuons_normChi2.append(self.Muon_normChi2[iMuon])
# Muon's four-momentum
outerMuon = ROOT.TLorentzVector(self.Muon_px[iMuon], self.Muon_py[iMuon], self.Muon_pz[iMuon], self.Muon_energy[iMuon])
outerMuon_charge = self.Muon_charge[iMuon]
# Selec the good muons
if self.selectMuons(iMuon):
# Histograms to plot
self.goodMuons_pt.append(self.Muon_pt[iMuon])
self.goodMuons_eta.append(self.Muon_eta[iMuon])
self.goodMuons_energy.append(self.Muon_energy[iMuon])
self.goodMuons_vertex_z.append(self.Muon_vertex_z[iMuon])
if abs(self.Muon_dB[iMuon])<4:
self.goodMuons_dB.append(self.Muon_dB[iMuon])
self.goodMuons_edB.append(self.Muon_edB[iMuon])
self.goodMuons_distance.append(self.Muon_vertex_z[iMuon] - self.Vertex_z[0])
# GOOD MUONS
# Loop over the muons
for jMuon in range(0, self.npart[0] - 1):
innerGoodMuon_charge = self.Muon_charge[jMuon]
#They must have opposite charges
if innerGoodMuon_charge * outerMuon_charge >=0:
continue
# Selec the good muons
if self.selectMuons(jMuon):
innerGoodMuon = ROOT.TLorentzVector(self.Muon_px[jMuon], self.Muon_py[jMuon], self.Muon_pz[jMuon], self.Muon_energy[jMuon])
goodMass = (outerMuon + innerGoodMuon).M()
if not (goodMass > self.cutsConfig.mass_min and goodMass < 120):
continue
self.goodMuons_pt1.append(self.Muon_pt[iMuon])
self.goodMuons_pt2.append(self.Muon_pt[jMuon])
self.z_mass.append(goodMass)
#ALL MUONS
# Loop over all muons
for kMuon in range(0, self.npart[0] - 1):
innerMuon_charge = self.Muon_charge[kMuon]
if innerMuon_charge * outerMuon_charge >=0:
continue
innerMuon = ROOT.TLorentzVector(self.Muon_px[kMuon], self.Muon_py[kMuon], self.Muon_pz[kMuon], self.Muon_energy[kMuon])
mass = (outerMuon + innerMuon).M()
if not (mass > 60 and mass < 120):
continue
self.mass.append(mass)
def plotter1(self):
"""
Plots the transverse momentum
"""
# P.figure()
# P.hist(self.badZPt, bins = 100, normed=1, alpha=0.5)
# P.xlim(0, 500)
# P.xlabel("Total pt (GeV/c)")
# P.ylabel("frequency")
fig1 = P.figure()
ax_1=fig1.add_subplot(211)
ax_1.hist(self.allMuons_eta, bins = 60, alpha=0.5)
ax_1.set_xlabel("Eta")
ax_1.set_ylabel("frequency")
ax_2=fig1.add_subplot(212)
ax_2.hist(self.allMuons_pt, bins = 15000, alpha=0.5, log = True)
ax_2.set_xlim(0, 1000)
ax_2.set_xlabel("Transverse momentum (GeV/c)")
ax_2.set_ylabel("frequency")
fig2 = P.figure()
ax_3 = fig2.add_subplot(211)
ax_3.hist(self.allMuons_normChi2, bins = 50, alpha=0.5, log = True)
ax_3.set_xlabel("Chi**2")
ax_3.set_ylabel("frequency")
ax_4 = fig2.add_subplot(212)
ax_4.hist(self.allMuons_numberOfValidHits, bins = 30, alpha=0.5)
ax_4.set_xlabel("Number of valid hits")
ax_4.set_ylabel("frequency")
fig3 = P.figure()
ax_5 = fig3.add_subplot(211)
ax_5.hist(self.allMuons_dB, bins = 50, log=True)
ax_5.set_xlabel("Impact parameter")
ax_5.set_ylabel("frequency")
ax_5.set_title("Distance to the primary vertex")
ax_6 = fig3.add_subplot(212)
ax_6.hist(self.allMuons_distance, bins = 50, alpha=0.5, log=True)
ax_6.set_xlabel("Dz to PV")
ax_6.set_ylabel("frequency")
P.show()
def plotter2(self):
"""
Plots the histograms
"""
fig1 = P.figure()
ax_1 = fig1.add_subplot(211)
ax_1.hist(self.z_mass, bins = 50, alpha=0.5, label="Good Muons")
ax_1.hist(self.mass, bins = 60, alpha=0.5, label="All Muons")
ax_1.set_xlabel("Invariant mass (GeV/c2)")
ax_1.set_ylabel("frequency")
ax_1.legend(loc='upper right')
ax_2 = fig1.add_subplot(212)
ax_2.hist(self.goodMuons_eta, bins = 10, alpha=0.5, label="Good Muons", log =True)
ax_2.hist(self.allMuons_eta, bins = 40, alpha=0.5, label="All Muons", log = True)
ax_2.set_xlim(-10, 10)
ax_2.set_xlabel("Eta")
ax_2.set_ylabel("frequency")
ax_2.legend(loc='upper right')
fig2 = P.figure()
ax_1 = fig2.add_subplot(211)
ax_1.hist(self.goodMuons_pt1, bins = 50, alpha=0.5)
ax_1.set_xlabel("pt_1 (GeV/c)")
ax_1.set_ylabel("frequency")
ax_2 = fig2.add_subplot(212)
ax_2.hist(self.goodMuons_pt2, bins = 50, alpha=0.5)
ax_2.set_xlabel("pt_2 (GeV/c)")
ax_2.set_ylabel("frequency")
P.show()
| gpl-3.0 |
jlazear/cmb | util/plot_funcs.py | 1 | 3499 | #!/bin/python
"""
plot_funcs
jlazear
2/26/15
Collection of useful plotting functions.
"""
__version__ = 20150226
__releasestatus__ = 'beta'
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
def resid_plot(xs1, ys1, xs2, ys2, label1=None, label2=None,
xlabel=None, ylabel=None, ratio=4, legend=True,
fig=None, **kwargs):
# Only include shared points for residual plot
first = (len(xs1) <= len(xs2))
xmax = len(xs1) if first else len(xs2)
xssub = xs1 if first else xs2
dys = ys2[:xmax] - ys1[:xmax]
# Make fig and axes
if fig is None:
fig = plt.figure()
gs = gridspec.GridSpec(2, 1, height_ratios=[ratio, 1])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1], sharex=ax1)
plt.setp(ax1.get_xticklabels(), visible=False) # Remove ax1 x-tick labels
# Plot data sets
ax1.plot(xs1, ys1, label=label1, **kwargs)
ax1.plot(xs2, ys2, label=label2, **kwargs)
# Plot residuals
ax2.plot(xssub, dys)
# Set labels/legend
ax2.set_xlabel(xlabel, fontsize=22)
ax1.set_ylabel(ylabel, fontsize=22)
ax2.set_ylabel('residuals', fontsize=16)
if legend:
ax1.legend(fontsize=16)
# Adjust spacing
fig.tight_layout(h_pad=0.0)
return fig, [ax1, ax2]
def Dl_resid_plot(ells1, Cls1, ells2, Cls2, label1=None, label2=None,
xlabel=None, ylabel=None, ratio=4, legend=True, fig=None,
CltoDl=True, units=r'$\mathrm{\mu K^2}$', **kwargs):
if xlabel is None:
xlabel = r'$\ell \sim \frac{180^\circ}{\theta}$'
if ylabel is None:
if CltoDl:
ylabel = r'$D_\ell^{XX}\,$' + r'({0})'.format(units)
else:
ylabel = r'$C_\ell^{XX}\,$' + r'({0})'.format(units)
if CltoDl:
Cls1 = (ells1*(ells1 + 1)/(2*np.pi))*Cls1
Cls2 = (ells2*(ells2 + 1)/(2*np.pi))*Cls2
return resid_plot(ells1, Cls1, ells2, Cls2,
label1=label1, label2=label2,
xlabel=xlabel, ylabel=ylabel,
ratio=ratio, legend=legend, fig=fig, **kwargs)
def print_summary(retdict):
w_Q = retdict['weights_Q']
w_U = retdict['weights_U']
ilc_Q = retdict['ilcQmap']
ilc_U = retdict['ilcUmap']
try:
debugdict = retdict['debug']
except KeyError:
print "No debug information available! Run ILC with _debug=True flag next time."
print "frequencies = {0} GHz".format(retdict['frequencies'])
print "weights_Q =", w_Q.flatten()
print "weights_U =", w_U.flatten()
print "var(ILC Qmap) =", ilc_Q.var()
print "var(ILC Umap) =", ilc_U.var()
return
w_Qexp = debugdict['weights_Q']
w_Uexp = debugdict['weights_U']
Qcond = debugdict['G_Qcond']
Ucond = debugdict['G_Ucond']
ilc_Qexp = debugdict['ilcQmap']
ilc_Uexp = debugdict['ilcUmap']
print "frequencies = {0} GHz".format(retdict['frequencies']/1.e9)
print "Q weights =", w_Q.flatten()
print "Q weights expected =", w_Qexp.flatten()
print "log10(Q condition number) =", np.log10(Qcond)
print "var(ILC Q) =", ilc_Q.var()
print "var(ILC Q) expected =", ilc_Qexp.var()
print "U weights =", w_U.flatten()
print "U weights expected =", w_Uexp.flatten()
print "log10(U condition number) =", np.log10(Ucond)
print "var(ILC U) =", ilc_U.var()
print "var(ILC U) expected =", ilc_Uexp.var() | apache-2.0 |
jrpretz/scratch | tensorflow-gymnastics/iris-nn.py | 1 | 2155 | import numpy as np
import tensorflow as tf
import sys
from sklearn import datasets
from sklearn import metrics
# for reproducibility
tf.set_random_seed(42)
# init random weights
w_1 = tf.Variable(tf.random_normal((4,16), stddev=0.1))
b_1 = tf.Variable(tf.random_normal((1,16), stddev=0.1))
w_2 = tf.Variable(tf.random_normal((16,3), stddev=0.1))
b_2 = tf.Variable(tf.random_normal((1,3), stddev=0.1))
# the neural network.
# note that the 'softmax_cross_entropy_with_logits' cost function
# includes the 'softmax' part
def forward(X,w_1,b_1,w_2,b_2):
h = tf.nn.sigmoid(tf.matmul(X,w_1)+b_1)
yhat = tf.matmul(h,w_2)+b_2
return yhat
# not interested in splitting into train/test cause i'm only
# interested in the mechanism of creating/training a NN with tensorflow
iris = datasets.load_iris()
# arranging the data for multi-class classification
X_train = iris.data
nFeatures = X_train.shape[1]
y_train_labels = iris.target
labels = np.unique(y_train_labels)
nLabels = len(labels)
y_train = np.zeros(shape=(y_train_labels.shape[0],nLabels))
for i in range(len(labels)):
y_train[:,i][np.where(y_train_labels == labels[i])] = 1.0
# the tensorflow tensors for the NN output, the predictions,
# the cost function and a minimization algorithm
sess = tf.Session()
X = tf.placeholder(tf.float32 , shape=(None,nFeatures ),name="X")
y = tf.placeholder(tf.float32 , shape=(None,nLabels),name="y")
h = forward(X,w_1,b_1,w_2,b_2)
y_pred = tf.nn.softmax(h)
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=h))
updates = tf.train.AdamOptimizer(0.1).minimize(cost)
# needed to run with variables
sess.run(tf.global_variables_initializer())
# 1000 epochs of training
for epoch in range(1000):
sess.run(updates,feed_dict={X: X_train, y: y_train})
print("%d %f"%(epoch,sess.run(cost,feed_dict={X: X_train, y: y_train})))
# classification report
# N.B. it's not best practices to use the training dataset for this, but idc right now
prediction = np.argmax(sess.run(y_pred,feed_dict={X: X_train, y: y_train}),
axis=1)
print(metrics.classification_report(y_train_labels,prediction))
| gpl-3.0 |
linebp/pandas | pandas/tests/plotting/test_deprecated.py | 1 | 1471 | # coding: utf-8
import string
import pandas as pd
import pandas.util.testing as tm
from pandas.util.testing import slow
from numpy.random import randn
import pandas.tools.plotting as plotting
from pandas.tests.plotting.common import TestPlotBase
"""
Test cases for plot functions imported from deprecated
pandas.tools.plotting
"""
tm._skip_if_no_mpl()
class TestDeprecatedNameSpace(TestPlotBase):
@slow
def test_scatter_plot_legacy(self):
tm._skip_if_no_scipy()
df = pd.DataFrame(randn(100, 2))
with tm.assert_produces_warning(FutureWarning):
plotting.scatter_matrix(df)
with tm.assert_produces_warning(FutureWarning):
pd.scatter_matrix(df)
@slow
def test_boxplot_deprecated(self):
df = pd.DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
with tm.assert_produces_warning(FutureWarning):
plotting.boxplot(df, column=['one', 'two'],
by='indic')
@slow
def test_radviz_deprecated(self):
df = self.iris
with tm.assert_produces_warning(FutureWarning):
plotting.radviz(frame=df, class_column='Name')
@slow
def test_plot_params(self):
with tm.assert_produces_warning(FutureWarning):
pd.plot_params['xaxis.compat'] = True
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.