repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
BenSchannes/Epidemium | Regressor_NN_2.py | 1 | 1591 | from sklearn.base import BaseEstimator
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Imputer
from sklearn.ensemble import *
from sklearn.svm import *
from sklearn.neighbors import *
import numpy as np
from sklearn.preprocessing import *
from sklearn.decomposition import PCA
from keras.models import Sequential
from keras.optimizers import SGD, RMSprop
from keras.layers.core import *
class Regressor(BaseEstimator):
def __init__(self):
self.model = Sequential([
Dense(100, input_dim=68, activation="sigmoid", init="lecun_uniform"),
Dense(100, activation="sigmoid"),
Dense(100, activation="sigmoid"),
Dense(5),
])
self.model.compile( optimizer='rmsprop',
loss='mse')
def fit(self, X, y):
print("Training...")
imputer = Imputer()
X = imputer.fit_transform(X)
normalizer = StandardScaler()
X = normalizer.fit_transform(X)
print "shape : ", X.shape
self.model.fit(X, y, nb_epoch=200, batch_size=100)
for layer in self.model.layers:
weights = layer.get_weights()
print "weights ", weights
#return self.reg.fit(X, y)
def predict(self, X):
imputer = Imputer()
X = imputer.fit_transform(X)
normalizer = StandardScaler()
X = normalizer.fit_transform(X)
return self.model.predict(X)
#return self.reg.predict(X)
| mit |
srwareham/CampaignAdvisor | campaignadvisor/data_cleanup/normalize.py | 1 | 4104 | import pandas as pd
import numpy as np
import os
import cPickle as pickle
from context import campaignadvisor
from sklearn import preprocessing
class Normalizer():
MIN_VAL = 0
MAX_VAL = 1
"""
input: Series
output: Series
"""
def __init__(self, df, alg="minmax"):
self.df = df
self.alg = alg
# algorithm types =
# 'minmax' is rescaling,
# 'std' is standardization,
# 'unit' is scaling to unit length
def __repr__(self):
return self.df
def normalize_minmax(self, feature):
self.MAX_VAL = self.df[feature].max()
self.MIN_VAL = self.df[feature].min()
feature_minmax = feature + '_' + str(self.alg)
self.df[feature_minmax] = (self.df[feature] - self.MIN_VAL) / (self.MAX_VAL - self.MIN_VAL)
# return self.df[feature_minmax]
def normalize_std(self, feature):
feature_zscore = feature + '_' + str(self.alg)
self.df[feature_zscore] = (self.df[feature] - self.df[feature].mean()) / self.df[feature].std(ddof=0)
# return self.df[feature_zscore]
def normalize(self, feature):
# check if column min and max equals the global min and max
if self.MAX_VAL == self.df[feature].max() and self.MIN_VAL == self.df[feature].min():
return True
if self.alg == 'minmax':
self.normalize_minmax(feature)
elif self.alg == 'std':
self.normalize_std(feature)
def _save_pickle(data, path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'wb') as file_out:
pickle.dump(data, file_out)
def _load_pickle(path):
with open(path, 'rb') as file_in:
return pickle.load(file_in)
def test_normalize(df, features, alg):
if alg == 'minmax':
scale = preprocessing.MinMaxScaler().fit(df[features])
elif alg == 'std':
scale = preprocessing.StandardScaler().fit(df[features])
df_scaled = scale.transform(df[features])
return df_scaled
def main():
#jobs_name = campaignadvisor.dataframe_holder.JOBS
#votes_name = campaignadvisor.dataframe_holder.VOTES
#jobs = campaignadvisor.dataframe_holder.get_dataframe(jobs_name)
#votes = campaignadvisor.dataframe_holder.get_dataframe(votes_name)
#votes['clean_fips'] = votes['fips_code']
# country statistics dataframes
# df = pd.merge(votes, jobs, on='clean_fips', sort=False, how="inner")
df_name = campaignadvisor.dataframe_holder.COUNTY_STATISTICS
df = campaignadvisor.dataframe_holder.get_dataframe(df_name)
# get feature list from county statistics
features_to_drop = ['clean_fps', 'fips_code', 'FIPS', 'State', 'County', 'winner_name', 'winner_party']
# keywords_to_drop = ['Pct', 'Rate']
features_to_scale = list()
for feature in df.columns:
feature_element = df[feature][0]
if type(feature_element) != type(str()) and feature not in features_to_drop:
if 'Pct' not in feature and 'Rate' not in feature:
features_to_scale.append(feature)
# set algorithm
n_alg = 'minmax'
# instantiate Normalizer object with given algorithm
df_scaled = Normalizer(df, alg=n_alg)
# normalize only features to be scaled
for feature in features_to_scale:
df_scaled.normalize(feature)
# print df_scaled.df[feature]
df_cs = df_scaled.df
# testing
print "-------ALG CHECK TEST-------"
#print test_normalize(df, features_to_scale, alg=n_alg)
print "--------MIN MAX TEST--------"
features_to_test = list()
for feature in df_cs.columns:
if 'minmax' in feature:
#print df[feature]
features_to_test.append(feature)
local_max = max(df_cs[feature])
#print local_max
#print features_to_test
#df_cs['Test_Max'] = df_cs[features_to_test].max(axis=1)
# pickling
path = 'county_statistics.pik'
df['clean_fips'] = df_cs.index
with open(path, 'wb') as file_out:
pickle.dump(df_cs, file_out)
if __name__ == "__main__":
main() | mit |
rjw57/renormimg | renormimg/tool.py | 1 | 1863 | """
Renormalise a set of images from the command line to the full [0, 255] range
and save as a set of PNG files.
Usage:
renormimg [options] [<IMAGE>...]
Options:
-h, --help Show brief usage summary.
-v, --verbose Be more verbose in logging.
-s SUFFIX, --suffix=SUFFIX Suffix to append to converted files.
[default: .renorm.png]
"""
import logging
import sys
from docopt import docopt
import numpy as np
from matplotlib.pyplot import imread, imsave, cm
def main():
opts = docopt(__doc__)
if opts['--verbose']:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARN)
logging.info('Using filename suffix: {0}'.format(opts['--suffix']))
if len(opts['<IMAGE>']) == 0:
logging.info('No images specified. Exiting successfully.')
sys.exit(0)
logging.info('Pre-scanning images...')
min_val, max_val = np.inf, -np.inf
for filename in opts['<IMAGE>']:
logging.info('Scanning {0}'.format(filename))
A = imread(filename)
max_val = max(np.max(A), max_val)
min_val = min(np.min(A), min_val)
logging.info('Input images have values on interval [{0}, {1}].'.format(min_val, max_val))
if max_val == min_val:
logging.error('Images all have the same value pixels. Rescaling is meaningless.')
sys.exit(1)
for filename in opts['<IMAGE>']:
out_filename = filename + opts['--suffix']
logging.info('Re-scaling {0} to {1}'.format(filename, out_filename))
A = np.array(imread(filename), dtype=np.float32)
A -= min_val
A /= max_val - min_val
if len(A.shape) == 2:
A = cm.gray(A)
imsave(out_filename, A, format='png')
logging.info('Finished')
# vim:sw=4:sts=4:et
| mit |
ssh0/growing-string | triangular_lattice/fractal_dim_from_mass2.py | 1 | 1712 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2017-01-22
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import fractal_dim_from_mass as fd
def read_from_csv(fn):
data = np.loadtxt(fn, delimiter=',', skiprows=1)
return data
def manual_data():
Ds = []
for beta_i in range(6):
result_data_paths = fd.get_paths(fix='beta', beta_num=beta_i, ver=1)
_Ds = []
for path in result_data_paths:
_Ds.append(fd.get_fractal_dim(path))
#Ds = [
# [200, 400, ..., 2000], # 0.
# [200, 400, ..., 2000], # 2.
# ...
# [200, 400, ..., 2000], # 10.
# ]
Ds = np.array(Ds)
return Ds
if __name__ == '__main__':
frames_list = [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]
## 0 1 2 3 4 5 6 7 8 9
beta_list = [0, 2, 4, 6, 8, 10]
## 0 1 2 3 4 5
Ds = read_from_csv('./results/img/mass_in_r/data_170122.csv').T
# Ds = manual_data()
markers = ['o', 'v', '^', 's', 'D', 'h']
fig, ax = plt.subplots()
for i, beta in enumerate(beta_list):
color = cm.viridis(float(i) / (len(beta_list) - 1))
ax.plot(frames_list, Ds[i], marker=markers[i % len(markers)],
ls='', color=color, label=r'$\beta = %2.2f$' % beta)
# ax.legend(loc='best')
ax.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0, numpoints=1)
fig.subplots_adjust(right=0.8)
ax.set_title(r'Fractal dimension $D$')
ax.set_xlabel(r'$T$')
ax.set_ylabel(r'$D$')
ax.set_xlim(0, 2200)
ax.set_ylim(1., 2.)
plt.show()
| mit |
willgrass/pandas | pandas/core/tests/test_groupby.py | 1 | 1916 | import unittest
from pandas.core.daterange import DateRange
from pandas.core.index import Index
from pandas.core.groupby import GroupBy
from pandas.core.pytools import rands, groupby
from pandas.core.frame import DataFrame
from pandas.core.matrix import DataMatrix
from pandas.core.series import Series
import pandas.core.datetools as dt
import pandas.lib.tseries as tseries
import numpy as np
# unittest.TestCase
def commonSetUp(self):
self.dateRange = DateRange('1/1/2005', periods=250, offset=dt.bday)
self.stringIndex = Index([rands(8).upper() for x in xrange(250)])
self.groupId = Series([x[0] for x in self.stringIndex],
index=self.stringIndex)
self.groupDict = dict((k, v) for k, v in self.groupId.iteritems())
self.columnIndex = Index(['A', 'B', 'C', 'D', 'E'])
randMat = np.random.randn(250, 5)
self.stringMatrix = DataMatrix(randMat, columns=self.columnIndex,
index=self.stringIndex)
self.timeMatrix = DataMatrix(randMat, columns=self.columnIndex,
index=self.dateRange)
class GroupByTestCase(unittest.TestCase):
setUp = commonSetUp
def testPythonGrouper(self):
groupFunc = self.groupDict.get
groups = groupby(self.stringIndex, groupFunc)
setDict = dict((k, set(v)) for k, v in groups.iteritems())
for idx in self.stringIndex:
key = groupFunc(idx)
groupSet = setDict[key]
self.assert_(idx in groupSet)
def testCythonGrouper(self):
pass
def testNaNGrouping(self):
pass
def testMembership(self):
pass
def testByColumnName(self):
pass
class TestAggregate(unittest.TestCase):
setUp = commonSetUp
class TestTransform(unittest.TestCase):
setUp = commonSetUp
| bsd-3-clause |
xubenben/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
EricSB/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/texmanager.py | 69 | 16818 | """
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \*Agg backends: dvipng
* PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51
(older versions do not work properly)
Backends:
* \*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = '\\TeX\\ is Number $\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!'
Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file (http://matplotlib.sf.net/matplotlibrc)
or include these two lines in your script::
from matplotlib import rc
rc('text', usetex=True)
"""
import copy, glob, os, shutil, sys, warnings
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
DEBUG = False
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
def dvipng_hack_alpha():
stdin, stdout = os.popen4('dvipng -version')
for line in stdout:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s'% version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
raise RuntimeError('Could not obtain dvipng version')
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None: oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
configdir = mpl.get_configdir()
texcache = os.path.join(configdir, 'tex.cache')
if os.path.exists(oldcache):
print >> sys.stderr, """\
WARNING: found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s"."""%(oldcache, texcache)
shutil.move(oldcache, texcache)
if not os.path.exists(texcache):
os.mkdir(texcache)
_dvipng_hack_alpha = dvipng_hack_alpha()
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', r'\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', r'\usepackage{mathptmx}'),
'palatino': ('ppl', r'\usepackage{mathpazo}'),
'zapf chancery': ('pzc', r'\usepackage{chancery}'),
'cursive': ('pzc', r'\usepackage{chancery}'),
'charter': ('pch', r'\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', r'\usepackage{helvet}'),
'avant garde': ('pag', r'\usepackage{avant}'),
'courier': ('pcr', r'\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = ('text.latex.preamble', )\
+ tuple(['font.'+n for n in ('family', ) + font_families])
def __init__(self):
if not os.path.isdir(self.texcache):
os.mkdir(self.texcache)
ff = rcParams['font.family'].lower()
if ff in self.font_families:
self.font_family = ff
else:
mpl.verbose.report('The %s font family is not compatible with LaTeX. serif will be used by default.' % ff, 'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in \
[(ff, ff.replace('-', '_')) for ff in self.font_families]:
for font in rcParams['font.'+font_family]:
if font.lower() in self.font_info:
found_font = self.font_info[font.lower()]
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print 'family: %s, font: %s, info: %s'%(font_family,
font, self.font_info[font.lower()])
break
else:
if DEBUG: print '$s font is not compatible with usetex'
else:
mpl.verbose.report('No LaTeX-compatible font found for the %s font family in rcParams. Using default.' % ff, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive': cmd.append(self.cursive[1])
while r'\usepackage{type1cm}' in cmd:
cmd.remove(r'\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join([r'\usepackage{type1cm}', cmd,
r'\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f'%fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = unicode(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict([(k,None) for k in self._rc_cache_keys])
changed = [par for par in self._rc_cache_keys if rcParams[par] != \
self._rc_cache[par]]
if changed:
if DEBUG: print 'DEBUG following keys changed:', changed
for k in changed:
if DEBUG:
print 'DEBUG %-20s: %-10s -> %-10s' % \
(k, self._rc_cache[k], rcParams[k])
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG: print 'DEBUG RE-INIT\nold fontconfig:', self._fontconfig
self.__init__()
if DEBUG: print 'DEBUG fontconfig:', self._fontconfig
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def _get_shell_cmd(self, *args):
"""
On windows, changing directories can be complicated by the presence of
multiple drives. get_shell_cmd deals with this issue.
"""
if sys.platform == 'win32':
command = ['%s'% os.path.splitdrive(self.texcache)[0]]
else:
command = []
command.extend(args)
return ' && '.join(command)
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex'%basefile
fh = file(texfile, 'w')
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[papersize={72in,72in}, body={70in,70in}, margin={1in,1in}]{geometry}
\pagestyle{empty}
\begin{document}
\fontsize{%f}{%f}%s
\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize*1.25, tex)
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s)
except UnicodeEncodeError, err:
mpl.verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
fh.close()
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi'% basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'latex -interaction=nonstopmode %s > "%s"'\
%(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No latex error report available.'
if exit_status:
raise RuntimeError(('LaTeX was not able to process the following \
string:\n%s\nHere is the full report generated by LaTeX: \n\n'% repr(tex)) + report)
else: mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile+'*'):
if fname.endswith('dvi'): pass
elif fname.endswith('tex'): pass
else:
try: os.remove(fname)
except OSError: pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png'% basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"' % self.texcache,
'dvipng -bg Transparent -D %s -T tight -o \
"%s" "%s" > "%s"'%(dpi, os.path.split(pngfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No dvipng error report available.'
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + report)
else: mpl.verbose.report(report, 'debug')
try: os.remove(outfile)
except OSError: pass
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf'% basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'dvips -q -E -o "%s" "%s" > "%s"'\
%(os.path.split(psfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + fh.read())
else: mpl.verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
ps = file(psfile)
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s'%psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
if rcParams['text.dvipnghack'] is not None:
hack = rcParams['text.dvipnghack']
else:
hack = self._dvipng_hack_alpha
if hack:
# hack the alpha channel
# dvipng assumed a constant background, whereas we want to
# overlay these rasters with antialiasing over arbitrary
# backgrounds that may have other figure elements under them.
# When you set dvipng -bg Transparent, it actually makes the
# alpha channel 1 and does the background compositing and
# antialiasing itself and puts the blended data in the rgb
# channels. So what we do is extract the alpha information
# from the red channel, which is a blend of the default dvipng
# background (white) and foreground (black). So the amount of
# red (or green or blue for that matter since white and black
# blend to a grayscale) is the alpha intensity. Once we
# extract the correct alpha information, we assign it to the
# alpha channel properly and let the users pick their rgb. In
# this way, we can overlay tex strings on arbitrary
# backgrounds with antialiasing
#
# red = alpha*red_foreground + (1-alpha)*red_background
#
# Since the foreground is black (0) and the background is
# white (1) this reduces to red = 1-alpha or alpha = 1-red
#alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here?
alpha = 1-X[:,:,0]
else:
alpha = X[:,:,-1]
self.grey_arrayd[key] = alpha
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0,0,0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize: fontsize = rcParams['font.size']
if not dpi: dpi = rcParams['savefig.dpi']
r,g,b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float)
Z[:,:,0] = r
Z[:,:,1] = g
Z[:,:,2] = b
Z[:,:,3] = alpha
self.rgba_arrayd[key] = Z
return Z
| agpl-3.0 |
gdooper/scipy | scipy/stats/stats.py | 2 | 179056 | # Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
#
# Heavily adapted for use by SciPy 2002 by Travis Oliphant
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
zmap
zscore
iqr
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
# Scipy imports.
from scipy._lib.six import callable, string_types, xrange
from scipy._lib._version import NumpyVersion
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import distributions
from . import mstats_basic
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import _find_repeats, linregress, theilslopes
from ._stats import _kendall_condis
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'iqr', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata',
'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
#####################################
# CENTRAL TENDENCY #
#####################################
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0): # Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return np.array([]), np.array([])
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""
Calculates the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of points.
It is often used to calculate coefficients of skewness and kurtosis due
to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
@np.deprecate(message=("scipy.stats.histogram is deprecated in scipy 0.17.0; "
"use np.histogram instead"))
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
# _histogram is used in relfreq/cumfreq, so need to keep it
res = _histogram(a, numbins=numbins, defaultlimits=defaultlimits,
weights=weights, printextras=printextras)
return res
def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
return np.array(arrays)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate',
interpolation='linear', keepdims=False):
"""
Compute the interquartile range of the data along the specified
axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
'raw' : No scaling, just return the raw IQR.
'normal' : Scale by :math:`2 \\sqrt{2} erf^{-1}(\\frac{1}{2}) \\approx 1.349`.
The default is 'raw'. Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate'
returns nan, 'raise' throws an error, 'omit' performs the
calculations ignoring nan values. Default is 'propagate'.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`:
* 'linear' : `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower' : `i`.
* 'higher' : `j`.
* 'nearest' : `i` or `j` whichever is nearest.
* 'midpoint' : `(i + j) / 2`.
Default is 'linear'.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, string_types):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = _iqr_nanpercentile
else:
percentile_func = _iqr_percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims, contains_nan=contains_nan)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _iqr_percentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around older versions of `numpy`.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if contains_nan and NumpyVersion(np.__version__) < '1.10.0a':
# I see no way to avoid the version check to ensure that the corrected
# NaN behavior has been implemented except to call `percentile` on a
# small array.
msg = "Keyword nan_policy='propagate' not correctly supported for " \
"numpy versions < 1.10.x. The default behavior of " \
"`numpy.percentile` will be used."
warnings.warn(msg, RuntimeWarning)
try:
# For older versions of numpy, there are two things that can cause a
# problem here: missing keywords and non-scalar axis. The former can be
# partially handled with a warning, the latter can be handled fully by
# hacking in an implementation similar to numpy's function for
# providing multi-axis functionality
# (`numpy.lib.function_base._ureduce` for the curious).
result = np.percentile(x, q, axis=axis, keepdims=keepdims,
interpolation=interpolation)
except TypeError:
if interpolation != 'linear' or keepdims:
# At time or writing, this means np.__version__ < 1.9.0
warnings.warn("Keywords interpolation and keepdims not supported "
"for your version of numpy", RuntimeWarning)
try:
# Special processing if axis is an iterable
original_size = len(axis)
except TypeError:
# Axis is a scalar at this point
pass
else:
axis = np.unique(np.asarray(axis) % x.ndim)
if original_size > axis.size:
# mimic numpy if axes are duplicated
raise ValueError("duplicate value in axis")
if axis.size == x.ndim:
# axis includes all axes: revert to None
axis = None
elif axis.size == 1:
# no rolling necessary
axis = axis[0]
else:
# roll multiple axes to the end and flatten that part out
for ax in axis[::-1]:
x = np.rollaxis(x, ax, x.ndim)
x = x.reshape(x.shape[:-axis.size] +
(np.prod(x.shape[-axis.size:]),))
axis = -1
result = np.percentile(x, q, axis=axis)
return result
def _iqr_nanpercentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around the following:
1. A bug in `np.nanpercentile` that was around until numpy version
1.11.0.
2. A bug in `np.percentile` NaN handling that was fixed in numpy
version 1.10.0.
3. The non-existence of `np.nanpercentile` before numpy version
1.9.0.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if hasattr(np, 'nanpercentile'):
# At time or writing, this means np.__version__ < 1.9.0
result = np.nanpercentile(x, q, axis=axis,
interpolation=interpolation, keepdims=keepdims)
# If non-scalar result and nanpercentile does not do proper axis roll.
# I see no way of avoiding the version test since dimensions may just
# happen to match in the data.
if result.ndim > 1 and NumpyVersion(np.__version__) < '1.11.0a':
axis = np.asarray(axis)
if axis.size == 1:
# If only one axis specified, reduction happens along that dimension
if axis.ndim == 0:
axis = axis[None]
result = np.rollaxis(result, axis[0])
else:
# If multiple axes, reduced dimeision is last
result = np.rollaxis(result, -1)
else:
msg = "Keyword nan_policy='omit' not correctly supported for numpy " \
"versions < 1.9.x. The default behavior of numpy.percentile " \
"will be used."
warnings.warn(msg, RuntimeWarning)
result = _iqr_percentile(x, q, axis=axis)
return result
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
(7.1210194716424473, 0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed, and not necessarily zero-mean.
Like other correlation coefficients, this one varies between -1 and +1
with 0 implying no correlation. Correlations of -1 or +1 imply an exact
linear relationship. Positive correlations imply that as x increases, so
does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
r : float
Pearson's correlation coefficient
p-value : float
2-tailed p-value
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
contains_nan, nan_policy = _contains_nan(b, nan_policy)
if contains_nan and nan_policy == 'omit':
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt(((n-2)/((rs+1.0)*(1.0-rs))).clip(0))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""
Calculates a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate'):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the tau-b version of Kendall's tau which
accounts for ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Whether to use lexsort or quicksort as the sorting method for the
initial sort of the inputs. Default is lexsort (True), for which
`kendalltau` is of complexity O(n log(n)). If False, the complexity is
O(n^2), but with a smaller pre-factor (so quicksort may be faster for
small arrays).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
The definition of Kendall's tau that is used is::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
No. 314, Part 1, pp. 436-439, 1966.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.24821309157521476
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
return (cnt * (cnt - 1) // 2).sum()
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
con, dis = _kendall_condis(x, y) # concordant & discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.where(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie = count_rank_tie(x) - ntie # ties only in x
ytie = count_rank_tie(y) - ntie # ties only in y
if con + dis + xtie == 0 or con + dis + ytie == 0:
return KendalltauResult(np.nan, np.nan)
tau = (con - dis) / np.sqrt(con + dis + xtie) / np.sqrt(con + dis + ytie)
# what follows reproduces the ending of Gary Strangman's original
# stats.kendalltau() in SciPy
svar = (4.0 * size + 10.0) / (9.0 * size * (size - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return KendalltauResult(tau, prob)
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculates the T-test for the means of *two independent* samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D, pval_two)
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
return Ks_2sampResult(d, prob)
def tiecorrect(rankvals):
"""
Tie correction factor for ties in the Mann-Whitney U and
Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
def mannwhitneyu(x, y, use_continuity=True, alternative=None):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
alternative : None (deprecated), 'less', 'two-sided', or 'greater'
Whether to get the p-value for the one-sided hypothesis ('less'
or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to None, which results in a p-value half the size of
the 'two-sided' p-value and a different U statistic. The
default behavior is not the same as using 'less' or 'greater':
it only exists for backward compatibility and is deprecated.
Returns
-------
statistic : float
The Mann-Whitney U statistic, equal to min(U for x, U for y) if
`alternative` is equal to None (deprecated; exists for backward
compatibility), and U for y otherwise.
pvalue : float
p-value assuming an asymptotic normal distribution. One-sided or
two-sided, depending on the choice of `alternative`.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
"""
if alternative is None:
warnings.warn("Calling `mannwhitneyu` without specifying "
"`alternative` is deprecated.", DeprecationWarning)
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in mannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative is None or alternative == 'two-sided':
bigu = max(u1, u2)
elif alternative == 'less':
bigu = u1
elif alternative == 'greater':
bigu = u2
else:
raise ValueError("alternative should be None, 'less', 'greater' "
"or 'two-sided'")
z = (bigu - meanrank) / sd
if alternative is None:
# This behavior, equal to half the size of the two-sided
# p-value, is deprecated.
p = distributions.norm.sf(abs(z))
elif alternative == 'two-sided':
p = 2 * distributions.norm.sf(abs(z))
else:
p = distributions.norm.sf(z)
u = u2
# This behavior is deprecated.
if alternative is None:
u = min(u1, u2)
return MannwhitneyuResult(u, p)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([ 2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([ 4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
def rankdata(a, method='average'):
"""
rankdata(a, method='average')
Assign ranks to data, dealing with ties appropriately.
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked. The array is first flattened.
method : str, optional
The method used to assign ranks to tied elements.
The options are 'average', 'min', 'max', 'dense' and 'ordinal'.
'average':
The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
'min':
The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
'max':
The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
'dense':
Like 'min', but the rank of the next highest element is assigned
the rank immediately after those assigned to the tied elements.
'ordinal':
All values are given a distinct rank, corresponding to the order
that the values occur in `a`.
The default is 'average'.
Returns
-------
ranks : ndarray
An array of length equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", http://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
| bsd-3-clause |
biothings/biothings_explorer | biothings_explorer/query/utils.py | 1 | 7952 | from collections import defaultdict
from copy import deepcopy
import pandas as pd
from ..config_new import ALWAYS_PREFIXED
from ..smartapi_kg import MetaKG
def id2curie(prefix, val):
if prefix in ALWAYS_PREFIXED:
return val
return prefix + ":" + val
def annotateEdgesWithInput(edges, inputs):
if isinstance(inputs, dict):
inputs = [inputs]
annotatedEdges = []
for edge in edges:
if edge["query_operation"].get("supportBatch"):
copy_edge = deepcopy(edge)
input_ids = set()
original_input = {}
for _input in inputs:
prefix = copy_edge["association"]["input_id"]
if prefix in _input["db_ids"]:
for val in _input["db_ids"][prefix]:
input_ids.add(val)
original_input[id2curie(prefix, val)] = _input
input_ids = list(input_ids)
step = 500
for i in range(0, len(input_ids), step):
copy_edge["input"] = input_ids[i : i + step]
copy_edge["original_input"] = original_input
annotatedEdges.append(deepcopy(copy_edge))
else:
for _input in inputs:
prefix = edge["association"]["input_id"]
if prefix in _input["db_ids"]:
if not isinstance(_input["db_ids"][prefix], list):
_input["db_ids"][prefix] = [_input["db_ids"][prefix]]
for _id in _input["db_ids"][prefix]:
copy_edge = deepcopy(edge)
copy_edge["input"] = _id
copy_edge["original_input"] = {id2curie(prefix, _id): _input}
annotatedEdges.append(deepcopy(copy_edge))
return annotatedEdges
def getEdges(inputs, outputs, predicates, knowledgegraph=None):
result = []
if not knowledgegraph:
kg = MetaKG()
kg.constructMetaKG(source="local")
else:
kg = knowledgegraph
for semantic_type, ids in inputs.items():
if (
(isinstance(outputs, list) and len(outputs) == 1)
or not isinstance(outputs, list)
or not isinstance(predicates, list)
):
edges = kg.filter(
{
"input_type": semantic_type,
"output_type": outputs,
"predicate": predicates,
}
)
else:
edges = []
if not isinstance(predicates, list):
predicates = [predicates]
for i, node in enumerate(outputs):
if i >= len(predicates):
tmp_predicate = None
else:
tmp_predicate = predicates[i]
edges += kg.filter(
{
"input_type": semantic_type,
"output_type": node,
"predicate": tmp_predicate,
}
)
if not edges or not ids:
continue
result.append({"edges": edges, "inputs": ids})
return result
def extractAllResolvedOutputIDs(res):
output_ids = {}
if res and len(res) > 0:
for item in res:
if "resolved_ids" in item["$output_id_mapping"]:
output_ids[
item["$output_id_mapping"]["resolved_ids"]["id"]["identifier"]
] = item["$output_id_mapping"]["resolved_ids"]
return output_ids
def groupsIDsbySemanticType(output_ids):
result = defaultdict(list)
for resolved_ids in output_ids.values():
result[resolved_ids.get("type")].append(resolved_ids)
return result
def restructureHintOutput(outputs):
result = {}
for output in outputs:
copy_output = deepcopy(output)
output_id = {}
for k, v in copy_output.items():
if k not in ["primary", "type"] and not isinstance(v, list):
copy_output[k] = [v]
if copy_output["primary"]["identifier"] in ALWAYS_PREFIXED:
curie = copy_output["primary"]["value"]
else:
curie = (
copy_output["primary"]["identifier"]
+ ":"
+ copy_output["primary"]["value"]
)
if "name" in copy_output:
output_id["label"] = copy_output["name"][0]
else:
output_id["label"] = curie
output_id["identifier"] = curie
copy_output.pop("display")
copy_output.pop("primary")
result.update(
{
curie: {
"type": copy_output.pop("type"),
"db_ids": copy_output,
"id": output_id,
}
}
)
return result
def stepResult2PandasTable(result, step, total_steps, extra_fields=[]):
if step == 0:
node1 = "input"
else:
node1 = "node" + str(step)
if step == total_steps - 1:
node2 = "output"
else:
node2 = "node" + str(step + 1)
if isinstance(result, list) and len(result) > 1:
table_dict = []
for rec in result:
d = {
node1
+ "_id": rec["$original_input"][rec["$input"]]["id"]["identifier"],
node1 + "_label": rec["$original_input"][rec["$input"]]["id"]["label"],
node1 + "_type": rec["$original_input"][rec["$input"]]["type"],
"pred" + str(step + 1): rec["$association"]["predicate"],
"pred" + str(step + 1) + "_source": ",".join(rec.get("provided_by"))
if rec.get("provided_by") != [None]
else None,
"pred" + str(step + 1) + "_api": rec.get("api"),
"pred"
+ str(step + 1)
+ "_publications": ",".join(rec.get("publications"))
if rec.get("publications")
else None,
node2
+ "_id": rec["$output_id_mapping"]["resolved_ids"]["id"]["identifier"],
node2
+ "_label": rec["$output_id_mapping"]["resolved_ids"]["id"]["label"],
node2 + "_type": rec["$output_id_mapping"]["resolved_ids"]["type"],
node2 + "_degree": rec.get("$nodeDegree"),
}
if isinstance(extra_fields, list) and len(extra_fields) > 0:
for field in extra_fields:
if field in [
"drug_phase",
"ngd",
"survival_prob_change",
"edgesOut",
]:
field = "$" + field
if field in rec:
if field in ["pvalue"]:
rec[field] = float(rec[field])
elif isinstance(rec[field], list) and len(rec[field]) == 1:
rec[field] = rec[field][0]
d.update(
{
"pred"
+ str(step + 1)
+ "_"
+ str(field).strip("$"): rec[field]
}
)
table_dict.append(d)
return pd.DataFrame(table_dict)
def validate_max_intermediate_nodes(intermediate_nodes):
"""
Validate if user inputs more than 2 intermediate nodes.
:param intermediate_nodes: a list of intermediate nodes
"""
if isinstance(intermediate_nodes, list) and len(intermediate_nodes) > 3:
print(
"Max number of intermediate nodes is 2. You specify {}. We can not proceed your query. Please refine your query".format(
len(intermediate_nodes) - 1
)
)
return False
return True
| apache-2.0 |
pypot/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
lenovor/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/tests/test_multiclass.py | 136 | 23649 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
lbishal/scikit-learn | sklearn/metrics/classification.py | 8 | 68395 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score <http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None, sample_weight=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred, sample_weight=sample_weight)
return (n_differences / (y_true.shape[0] * len(classes) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
rhiever/sklearn-benchmarks | model_code/grid_search/evaluate_model.py | 2 | 2557 | import sys
import itertools
import pandas as pd
from sklearn.model_selection import cross_val_predict, StratifiedKFold
from sklearn.metrics import accuracy_score, f1_score
from sklearn.pipeline import make_pipeline
from tpot_metrics import balanced_accuracy_score
import warnings
def evaluate_model(dataset, pipeline_components, pipeline_parameters):
input_data = pd.read_csv(dataset, compression='gzip', sep='\t')
features = input_data.drop('class', axis=1).values.astype(float)
labels = input_data['class'].values
pipelines = [dict(zip(pipeline_parameters.keys(), list(parameter_combination)))
for parameter_combination in itertools.product(*pipeline_parameters.values())]
with warnings.catch_warnings():
# Squash warning messages. Turn this off when debugging!
warnings.simplefilter('ignore')
for pipe_parameters in pipelines:
pipeline = []
for component in pipeline_components:
if component in pipe_parameters:
args = pipe_parameters[component]
pipeline.append(component(**args))
else:
pipeline.append(component())
try:
clf = make_pipeline(*pipeline)
cv_predictions = cross_val_predict(estimator=clf, X=features, y=labels, cv=StratifiedKFold(n_splits=10, shuffle=True, random_state=90483257))
accuracy = accuracy_score(labels, cv_predictions)
macro_f1 = f1_score(labels, cv_predictions, average='macro')
balanced_accuracy = balanced_accuracy_score(labels, cv_predictions)
except KeyboardInterrupt:
sys.exit(1)
# This is a catch-all to make sure that the evaluation won't crash due to a bad parameter
# combination or bad data. Turn this off when debugging!
except Exception as e:
continue
classifier_class = pipeline_components[-1]
param_string = ','.join(['{}={}'.format(parameter, value)
for parameter, value in pipe_parameters[classifier_class].items()])
out_text = '\t'.join([dataset.split('/')[-1][:-7],
classifier_class.__name__,
param_string,
str(accuracy),
str(macro_f1),
str(balanced_accuracy)])
print(out_text)
sys.stdout.flush()
| mit |
hammerlab/immuno | test/test_group_epitopes.py | 1 | 2479 | import pandas as pd
from immuno.group_epitopes import group_epitopes_dataframe
from immuno.peptide_binding_measure import (
IC50_FIELD_NAME, PERCENTILE_RANK_FIELD_NAME
)
"""
We're constructing a DataFrame with the following fields:
- chr
- pos
- ref
- alt
- TranscriptId
- SourceSequence
- MutationStart
- MutationEnd
- GeneMutationInfo
- PeptideMutationInfo
- Gene
- GeneInfo
- Epitope
- EpitopeStart
- EpitopeEnd
- Allele
- MHC_IC50
- MHC_PercentileRank
"""
epitopes_df = pd.DataFrame()
epitopes_df['chr'] = ['1', '1', 'X', 'X',]
epitopes_df['pos'] = [10, 10, 2000, 2000]
epitopes_df['ref'] = ['A', 'A', '', '']
epitopes_df['alt'] = ['T', 'T', 'C', 'C']
epitopes_df['TranscriptId'] = [
'ENST00000528762', 'ENST00000528762',
'ENST00000544455', 'ENST00000544455'
]
epitopes_df['SourceSequence'] = [
'ASIINFKELA', 'ASIINFKELA',
'ASILLLVFYW', 'ASILLLVFYW',
]
epitopes_df['MutationStart'] = [3, 3, 5, 5]
epitopes_df['MutationEnd'] = [4, 4, 6, 6]
epitopes_df['GeneMutationInfo'] = ['A>T', 'A>T', 'insC', 'insC']
epitopes_df['PeptideMutationInfo'] = ['L>I', 'L>I', 'fs', 'fs']
epitopes_df['Gene'] = ['SMAD4', 'SMAD4', 'TP53', 'TP53']
epitopes_df['GeneInfo'] = [None, None, None, None]
epitopes_df['Epitope'] = ['SIINFKEL', 'SIINFKEL', 'SILLLVFY', 'SILLLVFY']
epitopes_df['EpitopeStart'] = [1, 1, 1, 1]
epitopes_df['EpitopeEnd'] = [10, 10, 10, 10]
epitopes_df['Allele']= [
'HLA-A*02:01',
'HLA-B*08:02',
'HLA-A*02:01',
'HLA-B*08:02'
]
epitopes_df[IC50_FIELD_NAME] = [0.9, 205.9, 5039.0, 112.9]
epitopes_df[PERCENTILE_RANK_FIELD_NAME] = [0.1, 9.2, 25.2, 3.4]
def test_group_epitopes_dataframe():
grouped = group_epitopes_dataframe(epitopes_df)
assert len(grouped) == 2
assert isinstance(grouped, list)
for elt in grouped:
assert isinstance(elt, dict), \
"Wrong type for %s : %s" % (elt, type(elt))
assert elt['Gene'] in ("SMAD4", "TP53")
assert "Epitopes" in elt, elt.keys()
assert len(elt['Epitopes']) == 1
epitopes = elt['Epitopes']
assert isinstance(epitopes, list)
epitope = epitopes[0]
assert isinstance(epitope, dict), "Wrong type for epitope %s : %s" % (
epitope, type(epitope))
assert 'MHC_Allele_Scores' in epitope, epitope.keys()
assert len(epitope['MHC_Allele_Scores']) == 2
if __name__ == '__main__':
test_group_epitopes_dataframe() | apache-2.0 |
jwlawson/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 77 | 46403 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(tf_fn, labeled_tensor.tensor)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keep_dims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
droantree/cancersimulation | simpleCancerSim.py | 1 | 10354 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 26 18:52:50 2016
@author: donal
"""
#
#Simulation explanation:
# Based on the incidence rate parameter, cancer starts in a population according to a poisson process.
# The cancer progresses according to a Gompertz model (i.e. the probability of the sufferer dying from it
# at any particular stage is modeled as a Gompertz distribution.) The distribution is specified
# by providing the Median stage (i.e. number of days) and also the probability of death before half this
# median. (See misc.py) i.e. MEDmort is such that CDF(MEDmort)=0.5 and PHALFmort = CDFmort(MEDmort/2)
# In this model the two parameters are picked as follows:
# - MEDmort will be picked from a uniform distribution from 60 to 700 (approx 2 months to 2 years)
# - PHALFmort will be fixed at 0.02 (experiments suggest this as generating a reasonable shape)
# The cancer is only diagnosed/noticed by the sufferer at some stage after it has developed. It is
# assumed that once it is noticed it is immediately diagnosed by a doctor. A more complex
# simulation later can nodel the scenario where the sufferer has to make an appointment to be seen
# by their doctor after noticing the cancer in order to have it diagnosed.
# The probability of noticing the cancer follows a Gompertz model which is related to the progress model
# as follows:
# The Median number of days is picked from a uniform distribution from CDFINVmort(0.01) to MEDmort
# The ProbHalfMedian is picked from a uniform distribution from 0.02 to 0.2
# This is a discrete simulation, progressing on a day-to-day basis.
# The term "stage" refers to the number of days since a cancer started in a sufferer.
# There are a fixed number of treatment appointment slots per day and a sufferer is always booked
# into the next available slot when cancer is diagnosed and sufferers never miss or
# postpone their appointments.
# A sufferer always survives until their appointment but, of course, the prob of successful treatment
# decreases even to zero.
# When a suffer is treated, the treatment is immediately successful or they die immediately, i.e.
# this simulation does not model time to recover or time during which the cancer progresses to its
# terminal stage.
# The probability of the treatment being successful is based simply on a probability derived from the
# stage in the mortality model as follows: at stage(day) x, CDFmort(x) = p, then the prob of
# successful treatment is 1-p.
# The incidence rate of cancer is relatively low so the simulation does not model the decrease in
# the population size due to cancer deaths.
import numpy as np
import misc as gp
#import matplotlib.pyplot as plt
from tqdm import tqdm
#Simulation parameters
POPULATION = 500000
SLOTS_PER_DAY = 2 #number of appointment slots that can be allocated each day
DAYS_TO_RUN = 365 * 5 # years
CANCER_START_DAILY_INCIDENCE_RATE = float(1) / (2 * 70 * 365) #corresponds to person having a 50/50 chance of developing cancer in their life, avg lifespan 70 years
#Status values
STATUS_NOT_TREATED = "NT"
STATUS_TREATMENT_SUCCEEDED = "SC"
STATUS_TREATMENT_FAILED = "FL"
#Simulation tracking variables
class Sufferer:
def __init__(self, today, apptSchedule):
self.apptSchedule = apptSchedule
self.dayCancerStarted = today
self.appt = None #when created the cancer has not been diagnosed and so there is no appointment
self.status = STATUS_NOT_TREATED
MEDmort = float(np.random.randint(60, 700))
PHALFmort = 0.02
self.mortalityParams = gp.gompertzParams(MEDmort, PHALFmort)
MEDnotice = randomUniformFloat(gp.CDFInverseGompertz(self.mortalityParams, 0.01), MEDmort)
PHALFnotice = randomUniformFloat(0.02, 0.2)
self.noticingParams = gp.gompertzParams(MEDnotice, PHALFnotice)
def cancerStage(self, day):
return day - self.dayCancerStarted
def progressOneDay(self, today, simResults):
#3 cases - no appt yet (because cancer not noticed)
# - waiting for an appointment
# - had an appt and either success or failure
cancerStageToday = self.cancerStage(today)
if self.appt == None:
suffererNoticesCancerToday = self.isNoticed(cancerStageToday)
if suffererNoticesCancerToday:
self.appt = self.apptSchedule.getNextApptSlot(today)
#print "appt day=" + str(self.appt) + ", today=" + str(int(today))
waitingDays = self.appt - today
simResults.addApptSet(waitingDays, cancerStageToday + waitingDays)
#Note: if the sufferer does not notice the cancer there is no progress to process
elif today < self.appt:
pass #sufferer continues to wait for their appointment
elif today >= self.appt:
if self.isTreatmentSuccessful(cancerStageToday):
self.status = STATUS_TREATMENT_SUCCEEDED
else:
self.status = STATUS_TREATMENT_FAILED
def isNoticed(self, cancerStageToday):
cancerStageYesterday = cancerStageToday -1;
probNoticing = gp.ProbEventBeforeT2GivenNoEventBeforeTime1(cancerStageYesterday, cancerStageToday, self.noticingParams)
return pickRandomTF(probNoticing)
def isTreatmentSuccessful(self, cancerStageToday):
probTreatmentSuccessful = 1 - gp.CDFGompertz(self.mortalityParams, cancerStageToday)
return pickRandomTF(probTreatmentSuccessful)
def isStatusSuccess(self):
return self.status == STATUS_TREATMENT_SUCCEEDED
def isStatusFailed(self):
return self.status == STATUS_TREATMENT_FAILED
def hasBeenTreated(self):
return self.status <> STATUS_NOT_TREATED
######## End of class Sufferer
class ApptSchedule:
def __init__(self):
self.nextApptDay = 1
self.apptsRemainingNextApptDay = SLOTS_PER_DAY #this should never get down to zero
def resetNextApptDayToDay(self, day):
self.nextApptDay = day
self.apptsRemainingNextApptDay = SLOTS_PER_DAY
def decrementSlotsAvailableNextApptDay(self):
self.apptsRemainingNextApptDay -= 1
if self.apptsRemainingNextApptDay == 0:
self.resetNextApptDayToDay(self.nextApptDay + 1)
def getNextApptSlot(self, today):
if today > self.nextApptDay:
self.resetNextApptDayToDay(today)
nextSlotDay = self.nextApptDay
self.decrementSlotsAvailableNextApptDay()
#print "(Slots per day=" + str(int(SLOTS_PER_DAY)) + ") Next slot on day=" + str(int(today)) + ", next appt day=" + str(int(self.nextApptDay)) + ", appts remaining next day=" + str(int(self.apptsRemainingNextApptDay))
return nextSlotDay
######## End of class ApptSchedule
class Model:
def __init__(self, simResults):
self.sufferers = [] #a list of Sufferer objects
self.today = 0
self.apptSchedule = ApptSchedule()
self.simResults = simResults
def progressToNextDay(self):
self.today += 1
#create new sufferers randomly
numberOfNewCancerSuffers = numberOfNewCancerStartsToday()
self.createNewSuffers(numberOfNewCancerSuffers)
#go through all sufferers and "progress" them to today
sufferersTreated = [] #record those sufferers that have been treated so they can be removed later
for sufferer in self.sufferers:
sufferer.progressOneDay(self.today, self.simResults)
if sufferer.hasBeenTreated():
sufferersTreated.append(sufferer) #don't remove a sufferer from self.sufferers here because that would interfere with the for loop
if sufferer.isStatusFailed():
self.simResults.addDeath(self.today)
else:
self.simResults.addCure(self.today)
for sufferer in sufferersTreated:
self.sufferers.remove(sufferer)
def createNewSuffers(self, numberOfNewSufferers):
for i in range(numberOfNewSufferers):
newSufferer = Sufferer(self.today, self.apptSchedule)
self.sufferers.append(newSufferer)
######## End of class Model
class ModelResults:
def __init__(self):
self.deaths = np.zeros(DAYS_TO_RUN + 1)
self.cured = np.zeros(DAYS_TO_RUN + 1)
#from these next 3 we can calculate avg days to wait for an appt and avg stage when treated
self.apptsWaitingDaysTotal = 0
self.apptsTotal = 0
self.treatmentStageTotalDays = 0
def addDeath(self, day):
self.deaths[day] += 1
def addApptSet(self, daysToWait, stageAtAppt):
self.apptsWaitingDaysTotal += daysToWait
self.treatmentStageTotalDays += stageAtAppt
self.apptsTotal += 1
def addCure(self, day):
self.cured[day] += 1
def avgApptWaitDays(self):
if self.apptsTotal == 0:
return "--"
else:
return float(self.apptsWaitingDaysTotal) / float(self.apptsTotal)
def avgStageTreated(self):
if self.apptsTotal == 0:
return "--"
else:
return float(self.treatmentStageTotalDays) / float(self.apptsTotal)
def numberOfNewCancerStartsToday():
#Assume poisson process with mean rate*population.
averageStartsPerDay = CANCER_START_DAILY_INCIDENCE_RATE * POPULATION
return np.random.poisson(averageStartsPerDay)
def pickRandomTF(probOfTrue):
randomFrom0To1 = np.random.uniform()
return randomFrom0To1 <= probOfTrue
def randomUniformFloat(min, max):
return np.random.random_sample() * (max - min) + min
#Finally, actually run the model:
print "{:>10} {:>10} {:>10} {:>10}".format("slots", "deaths", "avg waits", "avg stage")
for SLOTS_PER_DAY in tqdm(reversed(range(5, 16))):
simResults = ModelResults();
simModel = Model(simResults);
for day in tqdm(range(DAYS_TO_RUN)):
simModel.progressToNextDay()
deaths = int(np.sum(simResults.deaths))
print "{:>10} {:>10} {:>10.2f} {:>10.2f}".format(SLOTS_PER_DAY, deaths, simResults.avgApptWaitDays(), simResults.avgStageTreated())
| gpl-3.0 |
daniel20162016/my-first | read_xml_all/calcul_matrix_good_de_192_matrix.py | 1 | 6349 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:45:22 2016
@author: wang
"""
#from matplotlib import pylab as plt
#from numpy import fft, fromstring, int16, linspace
#import wave
from read_wav_xml_good_1 import*
from matrix_24_2 import*
from max_matrix_norm import*
import numpy as np
# open a wave file
filename = 'francois_filon_pure_1.wav'
filename_1 ='francois_filon_pure_1.xml'
word ='de'
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)
#print 'word_start_point=',word_start_point
#print 'word_length_point=',word_length_point
#print 'word_end_point=',word_end_point
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#print 'matrix_all_step_4=',matrix_all_step_4
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
#print 'matrix_all_step_5=',matrix_all_step_5
np.savez('de_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)
| mit |
lsiemens/iprocess-projects | psipy/analytic.py | 1 | 5194 | ####
#
# Copyright (c) 2015, Luke Siemens
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
####
"""
analytic soutions to the schrodingers equation
AUTHOR: Luke Siemens
"""
import cmath
import numpy as np
import numpy.polynomial.hermite as hermite
from matplotlib import pyplot
class analytic_solution:
def __init__(self, x, m=1.0, dt=0.01, L=None):
self.x = np.asarray(x)
self.dx = self.x[1]-self.x[0]
if L == None:
self.L = float(self.x[-1] - self.x[0])
else:
self.L = float(L)
self.t = 0.0
self.dt = dt
self.m = float(m)
self.N = len(x)
self.V_x = np.zeros(self.x.shape)
self.Cns = np.array([], dtype=complex)
self._cache = [(None,0)] #empty first element
def clear_cache(self):
self._cache = [(None,0)]
def get_psi_n(self, n):
raise NotImplementedError("get_psi_n not implimented")
def get_energy_n(self, n):
raise NotImplementedError("get_energy_n not implimented")
def time_step(self):
self.t += self.dt
def get_axis(self):
return self.x
def get_psi(self):
psi_x = np.zeros(self.x.shape, dtype=complex)
for n, Cn in enumerate(self.Cns):
if n != 0:
if len(self._cache) - 1 >= n:
psi_x = psi_x + Cn*self._cache[n][0]*np.exp(-1j*self._cache[n][1]*self.t)
else:
psi_n = self.get_psi_n(n)
energy_n = self.get_energy_n(n)
self._cache.append((psi_n, energy_n))
psi_x = psi_x + Cn*psi_n*np.exp(-1j*energy_n*self.t)
return psi_x
def add_eigenstate(self, n, Cn):
try:
if n + 1 > len(self.Cns):
new_Cns = np.zeros((n + 1,))
new_Cns[:len(self.Cns)] = self.Cns
self.Cns = new_Cns
self.Cns[n] = Cn
except TypeError:
if np.max(n) + 1 > len(self.Cns):
new_Cns = np.zeros((np.max(n) + 1,), dtype=complex)
new_Cns[:len(self.Cns)] = self.Cns
self.Cns = new_Cns
for i in xrange(len(Cn)):
self.Cns[n[i]] = Cn[i]
self.Cns *= 1/np.sqrt(np.sum(np.conj(self.Cns)*self.Cns))
def eigenbasis(self, n_max, psi_x):
assert psi_x.shape == self.x.shape
self.Cns = np.zeros((n_max,), dtype=complex)
for n in xrange(n_max):
if n!=0:
integral = self.dx*np.sum(np.multiply(np.conj(self.get_psi_n(n)), psi_x))
self.Cns[n] = integral
self.Cns *= 1/np.sqrt(np.sum(np.conj(self.Cns)*self.Cns))
class inf_square_well(analytic_solution):
def get_psi_n(self, n):
if n%2 == 1:
psi = np.sqrt(2/(self.L))*np.cos(n*np.pi*self.x/self.L)
else:
psi = np.sqrt(2/(self.L))*np.sin(n*np.pi*self.x/self.L)
psi[self.x > self.L/2.0] = 0
psi[self.x < -self.L/2.0] = 0
return psi
def get_energy_n(self, n):
return (n*np.pi/self.L)**2/(2*self.m)
class harmonic_well(analytic_solution):
def __init__(self, x, k=1.0, m=1.0, dt=0.01, L=None):
analytic_solution.__init__(self, x, m, dt, L)
self.k = k
self.omega = np.sqrt(self.k/self.m)
def get_psi_n(self, n):
psi = np.sqrt(1/float(np.math.factorial(n-1)*2**(n-1)))*(self.m*self.omega/np.pi)**(0.25)*np.exp(-self.m*self.omega*self.x**2/2)*hermite.hermval(np.sqrt(self.m*self.omega)*self.x,[0]*(n-1)+[1.0])
psi[self.x > self.L/2.0] = 0
psi[self.x < -self.L/2.0] = 0
return psi
def get_energy_n(self, n):
return self.omega*(n-0.5)
| bsd-3-clause |
rvraghav93/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 53 | 4096 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
# #############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
# #############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
# #############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
# #############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
k_means_cluster_centers = np.sort(k_means.cluster_centers_, axis=0)
mbk_means_cluster_centers = np.sort(mbk.cluster_centers_, axis=0)
k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)
mbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers)
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for k in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/tests/frame/test_mutate_columns.py | 2 | 7589 | # -*- coding: utf-8 -*-
from __future__ import print_function
from pandas.compat import range, lrange
import numpy as np
from pandas import DataFrame, Series
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
# Column add, remove, delete.
class TestDataFrameMutateColumns(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_assign(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
original = df.copy()
result = df.assign(C=df.B / df.A)
expected = df.copy()
expected['C'] = [4, 2.5, 2]
assert_frame_equal(result, expected)
# lambda syntax
result = df.assign(C=lambda x: x.B / x.A)
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
# Non-Series array-like
result = df.assign(C=[4, 2.5, 2])
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
result = df.assign(B=df.B / df.A)
expected = expected.drop('B', axis=1).rename(columns={'C': 'B'})
assert_frame_equal(result, expected)
# overwrite
result = df.assign(A=df.A + df.B)
expected = df.copy()
expected['A'] = [5, 7, 9]
assert_frame_equal(result, expected)
# lambda
result = df.assign(A=lambda x: x.A + x.B)
assert_frame_equal(result, expected)
def test_assign_multiple(self):
df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=['A', 'B'])
result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)
expected = DataFrame([[1, 4, 7, 1, 4], [2, 5, 8, 2, 5],
[3, 6, 9, 3, 6]], columns=list('ABCDE'))
assert_frame_equal(result, expected)
def test_assign_alphabetical(self):
# GH 9818
df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
result = df.assign(D=df.A + df.B, C=df.A - df.B)
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
columns=list('ABCD'))
assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)
assert_frame_equal(result, expected)
def test_assign_bad(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
# non-keyword argument
with tm.assertRaises(TypeError):
df.assign(lambda x: x.A)
with tm.assertRaises(AttributeError):
df.assign(C=df.A, D=df.A + df.C)
with tm.assertRaises(KeyError):
df.assign(C=lambda df: df.A, D=lambda df: df['A'] + df['C'])
with tm.assertRaises(KeyError):
df.assign(C=df.A, D=lambda x: x['A'] + x['C'])
def test_insert_error_msmgs(self):
# GH 7432
df = DataFrame({'foo': ['a', 'b', 'c'], 'bar': [
1, 2, 3], 'baz': ['d', 'e', 'f']}).set_index('foo')
s = DataFrame({'foo': ['a', 'b', 'c', 'a'], 'fiz': [
'g', 'h', 'i', 'j']}).set_index('foo')
msg = 'cannot reindex from a duplicate axis'
with assertRaisesRegexp(ValueError, msg):
df['newcol'] = s
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)),
columns=['a', 'b', 'c', 'd'])
msg = 'incompatible index of inserted column with frame index'
with assertRaisesRegexp(TypeError, msg):
df['gr'] = df.groupby(['b', 'c']).count()
def test_insert_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=lrange(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K),
index=lrange(N))
assert_frame_equal(df, expected)
def test_insert(self):
df = DataFrame(np.random.randn(5, 3), index=np.arange(5),
columns=['c', 'b', 'a'])
df.insert(0, 'foo', df['a'])
self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'b', 'a'])
tm.assert_series_equal(df['a'], df['foo'], check_names=False)
df.insert(2, 'bar', df['c'])
self.assert_numpy_array_equal(df.columns,
['foo', 'c', 'bar', 'b', 'a'])
tm.assert_almost_equal(df['c'], df['bar'], check_names=False)
# diff dtype
# new item
df['x'] = df['a'].astype('float32')
result = Series(dict(float64=5, float32=1))
self.assertTrue((df.get_dtype_counts() == result).all())
# replacing current (in different block)
df['a'] = df['a'].astype('float32')
result = Series(dict(float64=4, float32=2))
self.assertTrue((df.get_dtype_counts() == result).all())
df['y'] = df['a'].astype('int32')
result = Series(dict(float64=4, float32=2, int32=1))
self.assertTrue((df.get_dtype_counts() == result).all())
with assertRaisesRegexp(ValueError, 'already exists'):
df.insert(1, 'a', df['b'])
self.assertRaises(ValueError, df.insert, 1, 'c', df['b'])
df.columns.name = 'some_name'
# preserve columns name field
df.insert(0, 'baz', df['c'])
self.assertEqual(df.columns.name, 'some_name')
def test_delitem(self):
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_pop(self):
self.frame.columns.name = 'baz'
self.frame.pop('A')
self.assertNotIn('A', self.frame)
self.frame['foo'] = 'bar'
self.frame.pop('foo')
self.assertNotIn('foo', self.frame)
# TODO self.assertEqual(self.frame.columns.name, 'baz')
# 10912
# inplace ops cause caching issue
a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[
'A', 'B', 'C'], index=['X', 'Y'])
b = a.pop('B')
b += 1
# original frame
expected = DataFrame([[1, 3], [4, 6]], columns=[
'A', 'C'], index=['X', 'Y'])
assert_frame_equal(a, expected)
# result
expected = Series([2, 5], index=['X', 'Y'], name='B') + 1
assert_series_equal(b, expected)
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
res = df.pop("a")
self.assertEqual(type(res), DataFrame)
self.assertEqual(len(res), 2)
self.assertEqual(len(df.columns), 1)
self.assertTrue("b" in df.columns)
self.assertFalse("a" in df.columns)
self.assertEqual(len(df.index), 2)
def test_insert_column_bug_4032(self):
# GH4032, inserting a column and renaming causing errors
df = DataFrame({'b': [1.1, 2.2]})
df = df.rename(columns={})
df.insert(0, 'a', [1, 2])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1, 1.1], [2, 2.2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
df.insert(0, 'c', [1.3, 2.3])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1.3, 1, 1.1], [2.3, 2, 2.2]],
columns=['c', 'a', 'b'])
assert_frame_equal(result, expected)
| gpl-2.0 |
Barmaley-exe/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 42 | 2894 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
matrogers/pylearn2 | pylearn2/packaged_dependencies/theano_linear/unshared_conv/test_localdot.py | 44 | 5013 | from __future__ import print_function
import nose
import unittest
import numpy as np
from theano.compat.six.moves import xrange
import theano
from .localdot import LocalDot
from ..test_matrixmul import SymbolicSelfTestMixin
class TestLocalDot32x32(unittest.TestCase, SymbolicSelfTestMixin):
channels = 3
bsize = 10 # batch size
imshp = (32, 32)
ksize = 5
nkern_per_group = 16
subsample_stride = 1
ngroups = 1
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
def setUp(self):
np.random.seed(234)
assert self.imshp[0] == self.imshp[1]
fModulesR = (self.imshp[0] - self.ksize + 1) // self.subsample_stride
#fModulesR += 1 # XXX GpuImgActs crashes w/o this??
fModulesC = fModulesR
self.fshape = (fModulesR, fModulesC, self.channels // self.ngroups,
self.ksize, self.ksize, self.ngroups, self.nkern_per_group)
self.ishape = (self.ngroups, self.channels // self.ngroups,
self.imshp[0], self.imshp[1], self.bsize)
self.hshape = (self.ngroups, self.nkern_per_group, fModulesR, fModulesC,
self.bsize)
filters = theano.shared(self.rand(self.fshape))
self.A = LocalDot(filters, self.imshp[0], self.imshp[1],
subsample=(self.subsample_stride, self.subsample_stride))
self.xlval = self.rand((self.hshape[-1],) + self.hshape[:-1])
self.xrval = self.rand(self.ishape)
self.xl = theano.shared(self.xlval)
self.xr = theano.shared(self.xrval)
# N.B. the tests themselves come from SymbolicSelfTestMixin
class TestLocalDotLargeGray(TestLocalDot32x32):
channels = 1
bsize = 128
imshp = (256, 256)
ksize = 9
nkern_per_group = 16
subsample_stride = 2
ngroups = 1
n_patches = 3000
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
# not really a test, but important code to support
# Currently exposes error, by e.g.:
# CUDA_LAUNCH_BLOCKING=1
# THEANO_FLAGS=device=gpu,mode=DEBUG_MODE
# nosetests -sd test_localdot.py:TestLocalDotLargeGray.run_autoencoder
def run_autoencoder(
self,
n_train_iter=10000, # -- make this small to be a good unit test
rf_shape=(9, 9),
n_filters=1024,
dtype='float32',
module_stride=2,
lr=0.01,
show_filters=True,
):
if show_filters:
# import here to fail right away
import matplotlib.pyplot as plt
try:
import skdata.vanhateren.dataset
except ImportError:
raise nose.SkipTest()
# 1. Get a set of image patches from the van Hateren data set
print('Loading van Hateren images')
n_images = 50
vh = skdata.vanhateren.dataset.Calibrated(n_images)
patches = vh.raw_patches((self.n_patches,) + self.imshp,
items=vh.meta[:n_images],
rng=np.random.RandomState(123),
)
patches = patches.astype('float32')
patches /= patches.reshape(self.n_patches, self.imshp[0] * self.imshp[1])\
.max(axis=1)[:, None, None]
# TODO: better local contrast normalization
if 0 and show_filters:
plt.subplot(2, 2, 1); plt.imshow(patches[0], cmap='gray')
plt.subplot(2, 2, 2); plt.imshow(patches[1], cmap='gray')
plt.subplot(2, 2, 3); plt.imshow(patches[2], cmap='gray')
plt.subplot(2, 2, 4); plt.imshow(patches[3], cmap='gray')
plt.show()
# -- Convert patches to localdot format:
# groups x colors x rows x cols x images
patches5 = patches[:, :, :, None, None].transpose(3, 4, 1, 2, 0)
print('Patches shape', patches.shape, self.n_patches, patches5.shape)
# 2. Set up an autoencoder
print('Setting up autoencoder')
hid = theano.tensor.tanh(self.A.rmul(self.xl))
out = self.A.rmul_T(hid)
cost = ((out - self.xl) ** 2).sum()
params = self.A.params()
gparams = theano.tensor.grad(cost, params)
train_updates = [(p, p - lr / self.bsize * gp)
for (p, gp) in zip(params, gparams)]
if 1:
train_fn = theano.function([], [cost], updates=train_updates)
else:
train_fn = theano.function([], [], updates=train_updates)
theano.printing.debugprint(train_fn)
# 3. Train it
params[0].set_value(0.001 * params[0].get_value())
for ii in xrange(0, self.n_patches, self.bsize):
self.xl.set_value(patches5[:, :, :, :, ii:ii + self.bsize], borrow=True)
cost_ii, = train_fn()
print('Cost', ii, cost_ii)
if 0 and show_filters:
self.A.imshow_gray()
plt.show()
assert cost_ii < 0 # TODO: determine a threshold for detecting regression bugs
| bsd-3-clause |
hrjn/scikit-learn | sklearn/feature_extraction/image.py | 19 | 17614 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x : integer
The size of the grid in the x direction.
n_y : integer
The size of the grid in the y direction.
n_z : integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/examples/learn/multiple_gpu.py | 49 | 3078 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: `Tensor` of input features.
target: `Tensor` of targets.
Returns:
Tuple of predictions, loss and training op.
"""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.5}
with tf.device('/gpu:1'):
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
with tf.device('/gpu:2'):
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| mit |
buckbaskin/stopsign | src/v1/manually_annotate_images.py | 1 | 5998 | #!/usr/bin/env python
import rospkg
import cv2
import numpy as np
from matplotlib import pyplot as plt
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('stopsign')
IMAGE_RATE = 11 # hz
EXACT_FILE = '%s/data/003_manual_labels/exact.csv' % (pkg_path,)
start_image_id = 1785
end_image_id = 2189
IMAGE_BASE_STRING = '%s/data/002_original_images/%s' % (pkg_path, 'frame%04d.jpg')
def get_image(image_id):
filename = IMAGE_BASE_STRING % (image_id,)
return cv2.imread(filename, cv2.IMREAD_COLOR)
def flatten_kp(kp):
v = np.array(np.zeros((7,)))
v[0] = kp.angle * 1000
v[1] = kp.class_id
v[2] = kp.octave
v[3] = kp.pt[0]
v[4] = kp.pt[1]
v[5] = kp.response * 100000000
v[6] = kp.size
return v
minx = 0
miny = 0
maxx = 10000
maxy = 10000
contour = []
def rebuild_contour():
global minx, miny, maxx, maxy
x1 = minx
x2 = int(2.0/3 * minx + 1.0/3 * maxx)
x3 = int(1.0/3 * minx + 2.0/3 * maxx)
x4 = maxx
y1 = miny
y2 = int(2.0/3 * miny + 1.0/3 * maxy)
y3 = int(1.0/3 * miny + 2.0/3 * maxy)
y4 = maxy
global contour
contour = np.array([[x2, y1], [x3, y1], [x4, y2], [x4, y3],
[x3, y4], [x2, y4], [x1, y3], [x1, y2]], np.int32)
rebuild_contour()
def click_and_crop(event, x, y, flags, param):
global minx, miny, maxx, maxy
if event == cv2.EVENT_LBUTTONDOWN:
minx = x
miny = y
elif event == cv2.EVENT_LBUTTONUP:
maxx = x
maxy = y
rebuild_contour()
def kp_des2vector(klass, image_id, kp, des):
vector = np.zeros((32+7+1+1,))
vector[:1] = np.array([klass]) * 1000
vector[-1] = np.array([image_id])
vector[-8:-1] = np.array(flatten_kp(kp))
vector[1:33] = des
return vector
def hand_label_image(image_id):
global minx, miny, maxx, maxy, contour
results = []
img = get_image(image_id)
# Initiate STAR detector
orb = cv2.ORB()
# find the keypoints with ORB
kp = orb.detect(img,None)
# compute the descriptors with ORB
kp, des = orb.compute(img, kp)
print('=====\npreview %04d\n' % (image_id,))
print('s -> image has a stopsign.\nUse mouse to select stopsign.')
print('\nOR\n')
print('n -> image does not have a stopsign')
print('---')
cv2.imshow('preview', img)
cv2.setMouseCallback('preview', click_and_crop)
val = cv2.waitKey(0) % 256
test_kp = val == ord('s')
cv2.destroyAllWindows()
if test_kp:
for i in range(20):
print('s -> accept polyline as region\n\nOR\n')
print('Use mouse to reselect the region')
print('n -> refresh polyline as region')
print('---')
imgur = cv2.drawKeypoints(
img,
filter(lambda x: cv2.pointPolygonTest(contour, x.pt, False) >= 0, kp),
color=(0,255,0),
flags=0)
cv2.polylines(imgur, [contour], True, (79*i % 255, 0, 255))
cv2.imshow('preview', imgur)
cv2.setMouseCallback('preview', click_and_crop)
val = cv2.waitKey(0)
if val == 1048691:
break
cv2.destroyAllWindows()
for index, keypoint in enumerate(kp):
descriptor = des[index]
if test_kp:
skip_because_of_radius = cv2.pointPolygonTest(contour, kp[index].pt, False) < 0
if not skip_because_of_radius:
img2 = cv2.drawKeypoints(img, [kp[index]], color=(0,255,0), flags=4)
if val == 1048691:
klass = 1
elif val == 1048686:
klass = 0
else:
cv2.destroyAllWindows()
raise NotImplementedError('Use s or n please.')
else:
klass = 0
else:
klass = 0
vector = kp_des2vector(klass, image_id, kp[index], descriptor)
results.append(vector)
cv2.destroyAllWindows()
minx = 0
miny = 0
maxx = 10000
maxy = 10000
return results, test_kp
def auto_label_image(image_id, klass):
results = []
img = get_image(image_id)
# Initiate STAR detector
orb = cv2.ORB()
# find the keypoints with ORB
kp = orb.detect(img,None)
# compute the descriptors with ORB
kp, des = orb.compute(img, kp)
for index, keypoint in enumerate(kp):
descriptor = des[index]
vector = kp_des2vector(klass, image_id, kp[index], descriptor)
results.append(vector)
return results
def extend_file(file, new_vectors):
for vector in new_vectors:
file.write(','.join(['%7.2f' % num for num in vector]) + '\n')
def expand_to_string(new_vectors):
for vec in new_vectors:
yield ','.join(['%7d' % num for num in vec])
### Begin the whole process ###
# Generate the first line from data
line0 = []
line0.append('class'.ljust(7))
for i in range(32):
line0.append('descr%02d' % (i,))
# line0.extend(['Keypoint Angle', 'Keypoint Class Id', 'Keypoint Octave', 'Keypoint X', 'Keypoint Y', 'Keypoint Response x 10^6', 'Keypoint Size'])
line0.extend(['angle'.ljust(7), 'classid', 'octave'.ljust(7), 'x'.ljust(7), 'y'.ljust(7), 'respons', 'size'.ljust(7)])
line0.append('imageid')
line0 = ','.join(line0)
exact_lines = [line0]
# Label all images before first stopsign as not-stopsign
print('Prefilling data')
for auto_image_id in range(start_image_id):
if auto_image_id % 100 == 0:
print('%d / %d' % (auto_image_id, start_image_id,))
new_vectors = auto_label_image(auto_image_id, 0)
exact_lines.extend(expand_to_string(new_vectors))
print('Done Prefilling Data')
# Hand label sampled images and auto fill the rest
for image_id in range(start_image_id, end_image_id, 1):
new_vectors, is_stopsign = hand_label_image(image_id)
exact_lines.extend(expand_to_string(new_vectors))
print('Write to EXACT_FILE')
with open(EXACT_FILE, 'w') as f:
for line in exact_lines:
f.write('%s\n' % (line,))
| mit |
kazemakase/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
NeuralEnsemble/elephant | elephant/signal_processing.py | 2 | 38717 | # -*- coding: utf-8 -*-
"""
Basic processing procedures for time series (e.g., performing a z-score of a
signal, or filtering a signal).
.. autosummary::
:toctree: _toctree/signal_processing
zscore
cross_correlation_function
butter
wavelet_transform
hilbert
rauc
derivative
:copyright: Copyright 2014-2020 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import neo
import numpy as np
import quantities as pq
import scipy.signal
from elephant.utils import deprecated_alias, check_same_units
__all__ = [
"zscore",
"cross_correlation_function",
"butter",
"wavelet_transform",
"hilbert",
"rauc",
"derivative"
]
def zscore(signal, inplace=True):
r"""
Apply a z-score operation to one or several `neo.AnalogSignal` objects.
The z-score operation subtracts the mean :math:`\mu` of the signal, and
divides by its standard deviation :math:`\sigma`:
.. math::
Z(x(t)) = \frac{x(t)-\mu}{\sigma}
If a `neo.AnalogSignal` object containing multiple signals is provided,
the z-transform is always calculated for each signal individually.
If a list of `neo.AnalogSignal` objects is supplied, the mean and standard
deviation are calculated across all objects of the list. Thus, all list
elements are z-transformed by the same values of :math:`\\mu` and
:math:`\sigma`. For a `neo.AnalogSignal` that contains multiple signals,
each signal of the array is treated separately across list elements.
Therefore, the number of signals must be identical for each
`neo.AnalogSignal` object of the list.
Parameters
----------
signal : neo.AnalogSignal or list of neo.AnalogSignal
Signals for which to calculate the z-score.
inplace : bool, optional
If True, the contents of the input `signal` is replaced by the
z-transformed signal, if possible, i.e when the signal type is float.
If False, a copy of the original `signal` is returned.
Default: True
Returns
-------
signal_ztransofrmed : neo.AnalogSignal or list of neo.AnalogSignal
The output format matches the input format: for each input
`neo.AnalogSignal`, a corresponding `neo.AnalogSignal` is returned,
containing the z-transformed signal with dimensionless unit.
Notes
-----
You may supply a list of `neo.AnalogSignal` objects, where each object in
the list contains the data of one trial of the experiment, and each signal
of the `neo.AnalogSignal` corresponds to the recordings from one specific
electrode in a particular trial. In this scenario, you will z-transform
the signal of each electrode separately, but transform all trials of a
given electrode in the same way.
Examples
--------
Z-transform a single `neo.AnalogSignal`, containing only a single signal.
>>> import neo
>>> import numpy as np
>>> import quantities as pq
>>> from elephant.signal_processing import zscore
...
>>> a = neo.AnalogSignal(
... np.array([1, 2, 3, 4, 5, 6]).reshape(-1,1) * pq.mV,
... t_start=0*pq.s, sampling_rate=1000*pq.Hz)
>>> zscore(a).as_quantity()
[[-1.46385011]
[-0.87831007]
[-0.29277002]
[ 0.29277002]
[ 0.87831007]
[ 1.46385011]] dimensionless
Z-transform a single `neo.AnalogSignal` containing multiple signals.
>>> b = neo.AnalogSignal(
... np.transpose([[1, 2, 3, 4, 5, 6],
... [11, 12, 13, 14, 15, 16]]) * pq.mV,
... t_start=0*pq.s, sampling_rate=1000*pq.Hz)
>>> zscore(b).as_quantity()
[[-1.46385011 -1.46385011]
[-0.87831007 -0.87831007]
[-0.29277002 -0.29277002]
[ 0.29277002 0.29277002]
[ 0.87831007 0.87831007]
[ 1.46385011 1.46385011]] dimensionless
Z-transform a list of `neo.AnalogSignal`, each one containing more than
one signal:
>>> c = neo.AnalogSignal(
... np.transpose([[21, 22, 23, 24, 25, 26],
... [31, 32, 33, 34, 35, 36]]) * pq.mV,
... t_start=0*pq.s, sampling_rate=1000*pq.Hz)
>>> zscore([b, c])
[<AnalogSignal(array([[-1.11669108, -1.08361877],
[-1.0672076 , -1.04878252],
[-1.01772411, -1.01394628],
[-0.96824063, -0.97911003],
[-0.91875714, -0.94427378],
[-0.86927366, -0.90943753]]) * dimensionless, [0.0 s, 0.006 s],
sampling rate: 1000.0 Hz)>,
<AnalogSignal(array([[ 0.78170952, 0.84779261],
[ 0.86621866, 0.90728682],
[ 0.9507278 , 0.96678104],
[ 1.03523694, 1.02627526],
[ 1.11974608, 1.08576948],
[ 1.20425521, 1.1452637 ]]) * dimensionless, [0.0 s, 0.006 s],
sampling rate: 1000.0 Hz)>]
"""
# Transform input to a list
if isinstance(signal, neo.AnalogSignal):
signal = [signal]
check_same_units(signal, object_type=neo.AnalogSignal)
# Calculate mean and standard deviation
signal_stacked = np.vstack(signal).magnitude
mean = signal_stacked.mean(axis=0)
std = signal_stacked.std(axis=0)
signal_ztransofrmed = []
for sig in signal:
sig_normalized = sig.magnitude.astype(mean.dtype, copy=not inplace)
sig_normalized -= mean
# items where std is zero are already zero
np.divide(sig_normalized, std, out=sig_normalized, where=std != 0)
sig_dimless = neo.AnalogSignal(signal=sig_normalized,
units=pq.dimensionless,
dtype=sig_normalized.dtype,
copy=False,
t_start=sig.t_start,
sampling_rate=sig.sampling_rate,
name=sig.name,
file_origin=sig.file_origin,
description=sig.description,
array_annotations=sig.array_annotations,
**sig.annotations)
signal_ztransofrmed.append(sig_dimless)
# Return single object, or list of objects
if len(signal_ztransofrmed) == 1:
signal_ztransofrmed = signal_ztransofrmed[0]
return signal_ztransofrmed
@deprecated_alias(ch_pairs='channel_pairs', nlags='n_lags',
env='hilbert_envelope')
def cross_correlation_function(signal, channel_pairs, hilbert_envelope=False,
n_lags=None, scaleopt='unbiased'):
r"""
Computes an estimator of the cross-correlation function
:cite:`signal-Stoica2005`.
.. math::
R(\tau) = \frac{1}{N-|k|} R'(\tau) \\
where :math:`R'(\tau) = \left<x(t)y(t+\tau)\right>` in a pairwise
manner, i.e.:
`signal[channel_pairs[0,0]]` vs `signal[channel_pairs[0,1]]`,
`signal[channel_pairs[1,0]]` vs `signal[channel_pairs[1,1]]`,
and so on.
The input time series are z-scored beforehand. `scaleopt` controls the
choice of :math:`R_{xy}(\tau)` normalizer. Alternatively, returns the
Hilbert envelope of :math:`R_{xy}(\tau)`, which is useful to determine the
correlation length of oscillatory signals.
Parameters
----------
signal : (nt, nch) neo.AnalogSignal
Signal with `nt` number of samples that contains `nch` LFP channels.
channel_pairs : list or (n, 2) np.ndarray
List with `n` channel pairs for which to compute cross-correlation.
Each element of the list must contain 2 channel indices.
If `np.ndarray`, the second axis must have dimension 2.
hilbert_envelope : bool, optional
If True, returns the Hilbert envelope of cross-correlation function
result.
Default: False
n_lags : int, optional
Defines the number of lags for cross-correlation function. If a `float`
is passed, it will be rounded to the nearest integer. Number of
samples of output is `2*n_lags+1`.
If None, the number of samples of the output is equal to the number of
samples of the input signal (namely `nt`).
Default: None
scaleopt : {'none', 'biased', 'unbiased', 'normalized', 'coeff'}, optional
Normalization option, equivalent to matlab `xcorr(..., scaleopt)`.
Specified as one of the following.
* 'none': raw, unscaled cross-correlation
.. math::
R_{xy}(\tau)
* 'biased': biased estimate of the cross-correlation:
.. math::
R_{xy,biased}(\tau) = \frac{1}{N} R_{xy}(\tau)
* 'unbiased': unbiased estimate of the cross-correlation:
.. math::
R_{xy,unbiased}(\tau) = \frac{1}{N-\tau} R_{xy}(\tau)
* 'normalized' or 'coeff': normalizes the sequence so that the
autocorrelations at zero lag equal 1:
.. math::
R_{xy,coeff}(\tau) = \frac{1}{\sqrt{R_{xx}(0) R_{yy}(0)}}
R_{xy}(\tau)
Default: 'unbiased'
Returns
-------
cross_corr : neo.AnalogSignal
Shape: `[2*n_lags+1, n]`
Pairwise cross-correlation functions for channel pairs given by
`channel_pairs`. If `hilbert_envelope` is True, the output is the
Hilbert envelope of the pairwise cross-correlation function. This is
helpful to compute the correlation length for oscillating
cross-correlation functions.
Raises
------
ValueError
If input `signal` is not a `neo.AnalogSignal`.
If `channel_pairs` is not a list of channel pair indices with shape
`(n,2)`.
If `hilbert_envelope` is not a boolean.
If `n_lags` is not a positive integer.
If `scaleopt` is not one of the predefined above keywords.
Examples
--------
>>> import neo
>>> import quantities as pq
>>> import matplotlib.pyplot as plt
>>> from elephant.signal_processing import cross_correlation_function
>>> dt = 0.02
>>> N = 2018
>>> f = 0.5
>>> t = np.arange(N)*dt
>>> x = np.zeros((N,2))
>>> x[:,0] = 0.2 * np.sin(2.*np.pi*f*t)
>>> x[:,1] = 5.3 * np.cos(2.*np.pi*f*t)
Generate neo.AnalogSignals from x and find cross-correlation
>>> signal = neo.AnalogSignal(x, units='mV', t_start=0.*pq.ms,
>>> sampling_rate=1/dt*pq.Hz, dtype=float)
>>> rho = cross_correlation_function(signal, [0,1], n_lags=150)
>>> env = cross_correlation_function(signal, [0,1], n_lags=150,
... hilbert_envelope=True)
...
>>> plt.plot(rho.times, rho)
>>> plt.plot(env.times, env) # should be equal to one
>>> plt.show()
"""
# Make channel_pairs a 2D array
pairs = np.asarray(channel_pairs)
if pairs.ndim == 1:
pairs = np.expand_dims(pairs, axis=0)
# Check input
if not isinstance(signal, neo.AnalogSignal):
raise ValueError('Input signal must be of type neo.AnalogSignal')
if pairs.shape[1] != 2:
raise ValueError("'channel_pairs' is not a list of channel pair "
"indices. Cannot define pairs for cross-correlation.")
if not isinstance(hilbert_envelope, bool):
raise ValueError("'hilbert_envelope' must be a boolean value")
if n_lags is not None:
if not isinstance(n_lags, int) or n_lags <= 0:
raise ValueError('n_lags must be a non-negative integer')
# z-score analog signal and store channel time series in different arrays
# Cross-correlation will be calculated between xsig and ysig
z_transformed = signal.magnitude - signal.magnitude.mean(axis=0)
z_transformed = np.divide(z_transformed, signal.magnitude.std(axis=0),
out=z_transformed,
where=z_transformed != 0)
# transpose (nch, xy, nt) -> (xy, nt, nch)
xsig, ysig = np.transpose(z_transformed.T[pairs], (1, 2, 0))
# Define vector of lags tau
nt, nch = xsig.shape
tau = np.arange(nt) - nt // 2
# Calculate cross-correlation by taking Fourier transform of signal,
# multiply in Fourier space, and transform back. Correct for bias due
# to zero-padding
xcorr = scipy.signal.fftconvolve(xsig, ysig[::-1], mode='same', axes=0)
if scaleopt == 'biased':
xcorr /= nt
elif scaleopt == 'unbiased':
normalizer = np.expand_dims(nt - np.abs(tau), axis=1)
xcorr /= normalizer
elif scaleopt in ('normalized', 'coeff'):
normalizer = np.sqrt((xsig ** 2).sum(axis=0) * (ysig ** 2).sum(axis=0))
xcorr /= normalizer
elif scaleopt != 'none':
raise ValueError("Invalid scaleopt mode: '{}'".format(scaleopt))
# Calculate envelope of cross-correlation function with Hilbert transform.
# This is useful for transient oscillatory signals.
if hilbert_envelope:
xcorr = np.abs(scipy.signal.hilbert(xcorr, axis=0))
# Cut off lags outside the desired range
if n_lags is not None:
tau0 = np.argwhere(tau == 0).item()
xcorr = xcorr[tau0 - n_lags: tau0 + n_lags + 1, :]
# Return neo.AnalogSignal
cross_corr = neo.AnalogSignal(xcorr,
units='',
t_start=tau[0] * signal.sampling_period,
t_stop=tau[-1] * signal.sampling_period,
sampling_rate=signal.sampling_rate,
dtype=float)
return cross_corr
@deprecated_alias(highpass_freq='highpass_frequency',
lowpass_freq='lowpass_frequency',
fs='sampling_frequency')
def butter(signal, highpass_frequency=None, lowpass_frequency=None, order=4,
filter_function='filtfilt', sampling_frequency=1.0, axis=-1):
"""
Butterworth filtering function for `neo.AnalogSignal`.
Filter type is determined according to how values of `highpass_frequency`
and `lowpass_frequency` are given (see "Parameters" section for details).
Parameters
----------
signal : neo.AnalogSignal or pq.Quantity or np.ndarray
Time series data to be filtered.
If `pq.Quantity` or `np.ndarray`, the sampling frequency should be
given through the keyword argument `fs`.
highpass_frequency : pq.Quantity of float, optional
High-pass cut-off frequency. If `float`, the given value is taken as
frequency in Hz.
Default: None
lowpass_frequency : pq.Quantity or float, optional
Low-pass cut-off frequency. If `float`, the given value is taken as
frequency in Hz.
Filter type is determined depending on the values of
`lowpass_frequency` and `highpass_frequency`:
* `highpass_frequency` only (`lowpass_frequency` is None):
highpass filter
* `lowpass_frequency` only (`highpass_frequency` is None):
lowpass filter
* `highpass_frequency` < `lowpass_frequency`: bandpass filter
* `highpass_frequency` > `lowpass_frequency`: bandstop filter
Default: None
order : int, optional
Order of the Butterworth filter.
Default: 4
filter_function : {'filtfilt', 'lfilter', 'sosfiltfilt'}, optional
Filtering function to be used. Available filters:
* 'filtfilt': `scipy.signal.filtfilt`;
* 'lfilter': `scipy.signal.lfilter`;
* 'sosfiltfilt': `scipy.signal.sosfiltfilt`.
In most applications 'filtfilt' should be used, because it doesn't
bring about phase shift due to filtering. For numerically stable
filtering, in particular higher order filters, use 'sosfiltfilt'
(see https://github.com/NeuralEnsemble/elephant/issues/220).
Default: 'filtfilt'
sampling_frequency : pq.Quantity or float, optional
The sampling frequency of the input time series. When given as
`float`, its value is taken as frequency in Hz. When `signal` is given
as `neo.AnalogSignal`, its attribute is used to specify the sampling
frequency and this parameter is ignored.
Default: 1.0
axis : int, optional
Axis along which filter is applied.
Default: last axis (-1)
Returns
-------
filtered_signal : neo.AnalogSignal or pq.Quantity or np.ndarray
Filtered input data. The shape and type is identical to those of the
input `signal`.
Raises
------
ValueError
If `filter_function` is not one of 'lfilter', 'filtfilt',
or 'sosfiltfilt'.
If both `highpass_frequency` and `lowpass_frequency` are None.
Examples
--------
>>> import neo
>>> import numpy as np
>>> import quantities as pq
>>> from elephant.signal_processing import butter
>>> noise = neo.AnalogSignal(np.random.normal(size=5000),
... sampling_rate=1000 * pq.Hz, units='mV')
>>> filtered_noise = butter(noise, highpass_frequency=250.0 * pq.Hz)
>>> filtered_noise
AnalogSignal with 1 channels of length 5000; units mV; datatype float64
sampling rate: 1000.0 Hz
time: 0.0 s to 5.0 s
Let's check that the normal noise power spectrum at zero frequency is close
to zero.
>>> from elephant.spectral import welch_psd
>>> freq, psd = welch_psd(filtered_noise, fs=1000.0)
>>> psd.shape
(1, 556)
>>> freq[0], psd[0, 0]
(array(0.) * Hz, array(7.21464674e-08) * mV**2/Hz)
"""
available_filters = 'lfilter', 'filtfilt', 'sosfiltfilt'
if filter_function not in available_filters:
raise ValueError("Invalid `filter_function`: {filter_function}. "
"Available filters: {available_filters}".format(
filter_function=filter_function,
available_filters=available_filters))
# design filter
if hasattr(signal, 'sampling_rate'):
sampling_frequency = signal.sampling_rate.rescale(pq.Hz).magnitude
if isinstance(highpass_frequency, pq.quantity.Quantity):
highpass_frequency = highpass_frequency.rescale(pq.Hz).magnitude
if isinstance(lowpass_frequency, pq.quantity.Quantity):
lowpass_frequency = lowpass_frequency.rescale(pq.Hz).magnitude
Fn = sampling_frequency / 2.
# filter type is determined according to the values of cut-off
# frequencies
if lowpass_frequency and highpass_frequency:
if highpass_frequency < lowpass_frequency:
Wn = (highpass_frequency / Fn, lowpass_frequency / Fn)
btype = 'bandpass'
else:
Wn = (lowpass_frequency / Fn, highpass_frequency / Fn)
btype = 'bandstop'
elif lowpass_frequency:
Wn = lowpass_frequency / Fn
btype = 'lowpass'
elif highpass_frequency:
Wn = highpass_frequency / Fn
btype = 'highpass'
else:
raise ValueError(
"Either highpass_frequency or lowpass_frequency must be given"
)
if filter_function == 'sosfiltfilt':
output = 'sos'
else:
output = 'ba'
designed_filter = scipy.signal.butter(order, Wn, btype=btype,
output=output)
# When the input is AnalogSignal, the axis for time index (i.e. the
# first axis) needs to be rolled to the last
data = np.asarray(signal)
if isinstance(signal, neo.AnalogSignal):
data = np.rollaxis(data, 0, len(data.shape))
# apply filter
if filter_function == 'lfilter':
b, a = designed_filter
filtered_data = scipy.signal.lfilter(b=b, a=a, x=data, axis=axis)
elif filter_function == 'filtfilt':
b, a = designed_filter
filtered_data = scipy.signal.filtfilt(b=b, a=a, x=data, axis=axis)
else:
filtered_data = scipy.signal.sosfiltfilt(sos=designed_filter,
x=data, axis=axis)
if isinstance(signal, neo.AnalogSignal):
filtered_data = np.rollaxis(filtered_data, -1, 0)
signal_out = signal.duplicate_with_new_data(filtered_data)
# todo use flag once is fixed
# https://github.com/NeuralEnsemble/python-neo/issues/752
signal_out.array_annotate(**signal.array_annotations)
return signal_out
elif isinstance(signal, pq.quantity.Quantity):
return filtered_data * signal.units
else:
return filtered_data
@deprecated_alias(nco='n_cycles', freq='frequency', fs='sampling_frequency')
def wavelet_transform(signal, frequency, n_cycles=6.0, sampling_frequency=1.0,
zero_padding=True):
r"""
Compute the wavelet transform of a given signal with Morlet mother
wavelet. The parametrization of the wavelet is based on
:cite:`signal-Le2001_83`.
Parameters
----------
signal : (Nt, Nch) neo.AnalogSignal or np.ndarray or list
Time series data to be wavelet-transformed. When multi-dimensional
`np.ndarray` or list is given, the time axis must be the last
dimension. If `neo.AnalogSignal`, `Nt` is the number of time points
and `Nch` is the number of channels.
frequency : float or list of float
Center frequency of the Morlet wavelet in Hz. Multiple center
frequencies can be given as a list, in which case the function
computes the wavelet transforms for all the given frequencies at once.
n_cycles : float, optional
Size of the mother wavelet (approximate number of oscillation cycles
within a wavelet). Corresponds to :math:`nco` in
:cite:`signal-Le2001_83`. A larger `n_cycles` value leads to a higher
frequency resolution and a lower temporal resolution, and vice versa.
Typically used values are in a range of 3–8, but one should be cautious
when using a value smaller than ~ 6, in which case the admissibility of
the wavelet is not ensured :cite:`signal-Farge1992_395`.
Default: 6.0
sampling_frequency : float, optional
Sampling rate of the input data in Hz.
When `signal` is given as a `neo.AnalogSignal`, the sampling frequency
is taken from its attribute and this parameter is ignored.
Default: 1.0
zero_padding : bool, optional
Specifies whether the data length is extended to the least power of
2 greater than the original length, by padding zeros to the tail, for
speeding up the computation.
If True, the extended part is cut out from the final result before
returned, so that the output has the same length as the input.
Default: True
Returns
-------
signal_wt : np.ndarray
Wavelet transform of the input data. When `frequency` was given as a
list, the way how the wavelet transforms for different frequencies are
returned depends on the input type:
* when the input was a `neo.AnalogSignal`, the returned array has
shape (`Nt`, `Nch`, `Nf`), where `Nf` = `len(freq)`, such that the
last dimension indexes the frequencies;
* when the input was a `np.ndarray` or list of shape
(`a`, `b`, ..., `c`, `Nt`), the returned array has a shape
(`a`, `b`, ..., `c`, `Nf`, `Nt`), such that the second last
dimension indexes the frequencies.
To summarize, `signal_wt.ndim` = `signal.ndim` + 1, with the
additional dimension in the last axis (for `neo.AnalogSignal` input)
or the second last axis (`np.ndarray` or list input) indexing the
frequencies.
Raises
------
ValueError
If `frequency` (or one of the values in `frequency` when it is a list)
is greater than the half of `sampling_frequency`.
If `n_cycles` is not positive.
Notes
-----
`n_cycles` is related to the wavelet number :math:`w` as
:math:`w \sim 2 \pi \frac{n_{\text{cycles}}}{6}` as defined in
:cite:`signal-Le2001_83`.
Examples
--------
>>> import neo
>>> import numpy as np
>>> import quantities as pq
>>> from elephant.signal_processing import wavelet_transform
>>> noise = neo.AnalogSignal(np.random.normal(size=7),
... sampling_rate=11 * pq.Hz, units='mV')
The wavelet frequency must be less than the half of the sampling rate;
picking at 5 Hz.
>>> wavelet_transform(noise, frequency=5)
array([[-1.00890049+3.003473j ],
[-1.43664254-2.8389273j ],
[ 3.02499511+0.96534578j],
[-2.79543976+1.4581079j ],
[ 0.94387304-2.98159518j],
[ 1.41476471+2.77389985j],
[-2.95996766-0.9872236j ]])
"""
def _morlet_wavelet_ft(freq, n_cycles, fs, n):
# Generate the Fourier transform of Morlet wavelet as defined
# in Le van Quyen et al. J Neurosci Meth 111:83-98 (2001).
sigma = n_cycles / (6. * freq)
freqs = np.fft.fftfreq(n, 1.0 / fs)
heaviside = np.array(freqs > 0., dtype=np.float)
ft_real = np.sqrt(2 * np.pi * freq) * sigma * np.exp(
-2 * (np.pi * sigma * (freqs - freq)) ** 2) * heaviside * fs
ft_imag = np.zeros_like(ft_real)
return ft_real + 1.0j * ft_imag
data = np.asarray(signal)
# When the input is AnalogSignal, the axis for time index (i.e. the
# first axis) needs to be rolled to the last
if isinstance(signal, neo.AnalogSignal):
data = np.rollaxis(data, 0, data.ndim)
# When the input is AnalogSignal, use its attribute to specify the
# sampling frequency
if hasattr(signal, 'sampling_rate'):
sampling_frequency = signal.sampling_rate
if isinstance(sampling_frequency, pq.quantity.Quantity):
sampling_frequency = sampling_frequency.rescale('Hz').magnitude
if isinstance(frequency, (list, tuple, np.ndarray)):
freqs = np.asarray(frequency)
else:
freqs = np.array([frequency, ])
if isinstance(freqs[0], pq.quantity.Quantity):
freqs = [f.rescale('Hz').magnitude for f in freqs]
# check whether the given central frequencies are less than the
# Nyquist frequency of the signal
if np.any(freqs >= sampling_frequency / 2):
raise ValueError("'frequency' elements must be less than the half of "
"the 'sampling_frequency' ({}) Hz"
.format(sampling_frequency))
# check if n_cycles is positive
if n_cycles <= 0:
raise ValueError("`n_cycles` must be positive")
n_orig = data.shape[-1]
if zero_padding:
n = 2 ** (int(np.log2(n_orig)) + 1)
else:
n = n_orig
# generate Morlet wavelets (in the frequency domain)
wavelet_fts = np.empty([len(freqs), n], dtype=np.complex)
for i, f in enumerate(freqs):
wavelet_fts[i] = _morlet_wavelet_ft(f, n_cycles, sampling_frequency, n)
# perform wavelet transform by convoluting the signal with the wavelets
if data.ndim == 1:
data = np.expand_dims(data, 0)
data = np.expand_dims(data, data.ndim - 1)
data = np.fft.ifft(np.fft.fft(data, n) * wavelet_fts)
signal_wt = data[..., 0:n_orig]
# reshape the result array according to the input
if isinstance(signal, neo.AnalogSignal):
signal_wt = np.rollaxis(signal_wt, -1)
if not isinstance(frequency, (list, tuple, np.ndarray)):
signal_wt = signal_wt[..., 0]
else:
if signal.ndim == 1:
signal_wt = signal_wt[0]
if not isinstance(frequency, (list, tuple, np.ndarray)):
signal_wt = signal_wt[..., 0, :]
return signal_wt
@deprecated_alias(N='padding')
def hilbert(signal, padding='nextpow'):
"""
Apply a Hilbert transform to a `neo.AnalogSignal` object in order to
obtain its (complex) analytic signal.
The time series of the instantaneous angle and amplitude can be obtained
as the angle (`np.angle` function) and absolute value (`np.abs` function)
of the complex analytic signal, respectively.
By default, the function will zero-pad the signal to a length
corresponding to the next higher power of 2. This will provide higher
computational efficiency at the expense of memory. In addition, this
circumvents a situation where, for some specific choices of the length of
the input, `scipy.signal.hilbert` function will not terminate.
Parameters
----------
signal : neo.AnalogSignal
Signal(s) to transform.
padding : int, {'none', 'nextpow'}, or None, optional
Defines whether the signal is zero-padded.
The `padding` argument corresponds to `N` in
`scipy.signal.hilbert(signal, N=padding)` function.
If 'none' or None, no padding.
If 'nextpow', zero-pad to the next length that is a power of 2.
If it is an `int`, directly specify the length to zero-pad to
(indicates the number of Fourier components).
Default: 'nextpow'
Returns
-------
neo.AnalogSignal
Contains the complex analytic signal(s) corresponding to the input
`signal`. The unit of the returned `neo.AnalogSignal` is
dimensionless.
Raises
------
ValueError:
If `padding` is not an integer or neither 'nextpow' nor 'none' (None).
Examples
--------
Create a sine signal at 5 Hz with increasing amplitude and calculate the
instantaneous phases:
>>> import neo
>>> import numpy as np
>>> import quantities as pq
>>> import matplotlib.pyplot as plt
>>> from elephant.signal_processing import hilbert
>>> t = np.arange(0, 5000) * pq.ms
>>> f = 5. * pq.Hz
>>> a = neo.AnalogSignal(
... np.array(
... (1 + t.magnitude / t[-1].magnitude) * np.sin(
... 2. * np.pi * f * t.rescale(pq.s))).reshape(
... (-1,1)) * pq.mV,
... t_start=0*pq.s,
... sampling_rate=1000*pq.Hz)
...
>>> analytic_signal = hilbert(a, padding='nextpow')
>>> angles = np.angle(analytic_signal)
>>> amplitudes = np.abs(analytic_signal)
>>> print(angles)
[[-1.57079633]
[-1.51334228]
[-1.46047675]
...,
[-1.73112977]
[-1.68211683]
[-1.62879501]]
>>> plt.plot(t, angles)
"""
# Length of input signals
n_org = signal.shape[0]
# Right-pad signal to desired length using the signal itself
if isinstance(padding, int):
# User defined padding
n = padding
elif padding == 'nextpow':
# To speed up calculation of the Hilbert transform, make sure we change
# the signal to be of a length that is a power of two. Failure to do so
# results in computations of certain signal lengths to not finish (or
# finish in absurd time). This might be a bug in scipy (0.16), e.g.,
# the following code will not terminate for this value of k:
#
# import numpy
# import scipy.signal
# k=679346
# t = np.arange(0, k) / 1000.
# a = (1 + t / t[-1]) * np.sin(2 * np.pi * 5 * t)
# analytic_signal = scipy.signal.hilbert(a)
#
# For this reason, nextpow is the default setting for now.
n = 2 ** (int(np.log2(n_org - 1)) + 1)
elif padding == 'none' or padding is None:
# No padding
n = n_org
else:
raise ValueError("Invalid padding '{}'.".format(padding))
output = signal.duplicate_with_new_data(
scipy.signal.hilbert(signal.magnitude, N=n, axis=0)[:n_org])
# todo use flag once is fixed
# https://github.com/NeuralEnsemble/python-neo/issues/752
output.array_annotate(**signal.array_annotations)
return output / output.units
def rauc(signal, baseline=None, bin_duration=None, t_start=None, t_stop=None):
"""
Calculate the rectified area under the curve (RAUC) for a
`neo.AnalogSignal`.
The signal is optionally divided into bins with duration `bin_duration`,
and the rectified signal (absolute value) is integrated within each bin to
find the area under the curve. The mean or median of the signal or an
arbitrary baseline may optionally be subtracted before rectification.
Parameters
----------
signal : neo.AnalogSignal
The signal to integrate. If `signal` contains more than one channel,
each is integrated separately.
baseline : pq.Quantity or {'mean', 'median'}, optional
A factor to subtract from the signal before rectification.
If 'mean', the mean value of the entire `signal` is subtracted on a
channel-by-channel basis.
If 'median', the median value of the entire `signal` is subtracted on
a channel-by-channel basis.
Default: None
bin_duration : pq.Quantity, optional
The length of time that each integration should span.
If None, there will be only one bin spanning the entire signal
duration.
If `bin_duration` does not divide evenly into the signal duration, the
end of the signal is padded with zeros to accomodate the final,
overextending bin.
Default: None
t_start : pq.Quantity, optional
Time to start the algorithm.
If None, starts at the beginning of `signal`.
Default: None
t_stop : pq.Quantity, optional
Time to end the algorithm.
If None, ends at the last time of `signal`.
The signal is cropped using `signal.time_slice(t_start, t_stop)` after
baseline removal. Useful if you want the RAUC for a short section of
the signal but want the mean or median calculation (`baseline`='mean'
or `baseline`='median') to use the entire signal for better baseline
estimation.
Default: None
Returns
-------
pq.Quantity or neo.AnalogSignal
If the number of bins is 1, the returned object is a scalar or
vector `pq.Quantity` containing a single RAUC value for each channel.
Otherwise, the returned object is a `neo.AnalogSignal` containing the
RAUC(s) for each bin stored as a sample, with times corresponding to
the center of each bin. The output signal will have the same number
of channels as the input signal.
Raises
------
ValueError
If `signal` is not `neo.AnalogSignal`.
If `bin_duration` is not None or `pq.Quantity`.
If `baseline` is not None, 'mean', 'median', or `pq.Quantity`.
See Also
--------
neo.AnalogSignal.time_slice : how `t_start` and `t_stop` are used
Examples
--------
>>> import neo
>>> import numpy as np
>>> import quantities as pq
>>> from elephant.signal_processing import rauc
>>> signal = neo.AnalogSignal(np.arange(10), sampling_rate=20 * pq.Hz,
... units='mV')
>>> rauc(signal)
array(2.025) * mV/Hz
"""
if not isinstance(signal, neo.AnalogSignal):
raise ValueError('Input signal is not a neo.AnalogSignal!')
if baseline is None:
pass
elif baseline == 'mean':
# subtract mean from each channel
signal = signal - signal.mean(axis=0)
elif baseline == 'median':
# subtract median from each channel
signal = signal - np.median(signal.as_quantity(), axis=0)
elif isinstance(baseline, pq.Quantity):
# subtract arbitrary baseline
signal = signal - baseline
else:
raise ValueError("baseline must be either None, 'mean', 'median', or "
"a Quantity. Got {}".format(baseline))
# slice the signal after subtracting baseline
signal = signal.time_slice(t_start, t_stop)
if bin_duration is not None:
# from bin duration, determine samples per bin and number of bins
if isinstance(bin_duration, pq.Quantity):
samples_per_bin = int(
np.round(
bin_duration.rescale('s') /
signal.sampling_period.rescale('s')))
n_bins = int(np.ceil(signal.shape[0] / samples_per_bin))
else:
raise ValueError("bin_duration must be a Quantity. Got {}".format(
bin_duration))
else:
# all samples in one bin
samples_per_bin = signal.shape[0]
n_bins = 1
# store the actual bin duration
bin_duration = samples_per_bin * signal.sampling_period
# reshape into equal size bins, padding the end with zeros if necessary
n_channels = signal.shape[1]
sig_binned = signal.as_quantity().copy()
sig_binned.resize(n_bins * samples_per_bin, n_channels, refcheck=False)
sig_binned = sig_binned.reshape(n_bins, samples_per_bin, n_channels)
# rectify and integrate over each bin
rauc = np.trapz(np.abs(sig_binned), dx=signal.sampling_period, axis=1)
if n_bins == 1:
# return a single value for each channel
return rauc.squeeze()
else:
# return an AnalogSignal with times corresponding to center of each bin
t_start = signal.t_start.rescale(bin_duration.units) + bin_duration / 2
rauc_sig = neo.AnalogSignal(rauc, t_start=t_start,
sampling_period=bin_duration)
return rauc_sig
def derivative(signal):
"""
Calculate the derivative of a `neo.AnalogSignal`.
Parameters
----------
signal : neo.AnalogSignal
The signal to differentiate. If `signal` contains more than one
channel, each is differentiated separately.
Returns
-------
derivative_sig : neo.AnalogSignal
The returned object is a `neo.AnalogSignal` containing the differences
between each successive sample value of the input signal divided by
the sampling period. Times are centered between the successive samples
of the input. The output signal will have the same number of channels
as the input signal.
Raises
------
TypeError
If `signal` is not a `neo.AnalogSignal`.
Examples
--------
>>> import neo
>>> import numpy as np
>>> import quantities as pq
>>> from elephant.signal_processing import derivative
>>> signal = neo.AnalogSignal([0, 3, 4, 11, -1], sampling_rate=1 * pq.Hz,
... units='mV')
>>> print(derivative(signal))
[[ 3.]
[ 1.]
[ 7.]
[-12.]] mV*Hz
"""
if not isinstance(signal, neo.AnalogSignal):
raise TypeError('Input signal is not a neo.AnalogSignal!')
derivative_sig = neo.AnalogSignal(
np.diff(signal.as_quantity(), axis=0) / signal.sampling_period,
t_start=signal.t_start + signal.sampling_period / 2,
sampling_period=signal.sampling_period)
return derivative_sig
| bsd-3-clause |
jjx02230808/project0223 | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
datapythonista/pandas | pandas/tests/io/parser/test_index_col.py | 4 | 8443 | """
Tests that the specified index column (a.k.a "index_col")
is properly handled or inferred during parsing for all of
the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
)
import pandas._testing as tm
@pytest.mark.parametrize("with_header", [True, False])
def test_index_col_named(all_parsers, with_header):
parser = all_parsers
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
header = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
if with_header:
data = header + no_header
result = parser.read_csv(StringIO(data), index_col="ID")
expected = parser.read_csv(StringIO(data), header=0).set_index("ID")
tm.assert_frame_equal(result, expected)
else:
data = no_header
msg = "Index ID invalid"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), index_col="ID")
def test_index_col_named2(all_parsers):
parser = all_parsers
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
expected = DataFrame(
{"a": [1, 5, 9], "b": [2, 6, 10], "c": [3, 7, 11], "d": [4, 8, 12]},
index=Index(["hello", "world", "foo"], name="message"),
)
names = ["a", "b", "c", "d", "message"]
result = parser.read_csv(StringIO(data), names=names, index_col=["message"])
tm.assert_frame_equal(result, expected)
def test_index_col_is_true(all_parsers):
# see gh-9798
data = "a,b\n1,2"
parser = all_parsers
msg = "The value of index_col couldn't be 'True'"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), index_col=True)
def test_infer_index_col(all_parsers):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"index_col,kwargs",
[
(None, {"columns": ["x", "y", "z"]}),
(False, {"columns": ["x", "y", "z"]}),
(0, {"columns": ["y", "z"], "index": Index([], name="x")}),
(1, {"columns": ["x", "z"], "index": Index([], name="y")}),
("x", {"columns": ["y", "z"], "index": Index([], name="x")}),
("y", {"columns": ["x", "z"], "index": Index([], name="y")}),
(
[0, 1],
{
"columns": ["z"],
"index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]),
},
),
(
["x", "y"],
{
"columns": ["z"],
"index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]),
},
),
(
[1, 0],
{
"columns": ["z"],
"index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]),
},
),
(
["y", "x"],
{
"columns": ["z"],
"index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]),
},
),
],
)
def test_index_col_empty_data(all_parsers, index_col, kwargs):
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=index_col)
expected = DataFrame(**kwargs)
tm.assert_frame_equal(result, expected)
def test_empty_with_index_col_false(all_parsers):
# see gh-10413
data = "x,y"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame(columns=["x", "y"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"index_names",
[
["", ""],
["foo", ""],
["", "bar"],
["foo", "bar"],
["NotReallyUnnamed", "Unnamed: 0"],
],
)
def test_multi_index_naming(all_parsers, index_names):
parser = all_parsers
# We don't want empty index names being replaced with "Unnamed: 0"
data = ",".join(index_names + ["col\na,c,1\na,d,2\nb,c,3\nb,d,4"])
result = parser.read_csv(StringIO(data), index_col=[0, 1])
expected = DataFrame(
{"col": [1, 2, 3, 4]}, index=MultiIndex.from_product([["a", "b"], ["c", "d"]])
)
expected.index.names = [name if name else None for name in index_names]
tm.assert_frame_equal(result, expected)
def test_multi_index_naming_not_all_at_beginning(all_parsers):
parser = all_parsers
data = ",Unnamed: 2,\na,c,1\na,d,2\nb,c,3\nb,d,4"
result = parser.read_csv(StringIO(data), index_col=[0, 2])
expected = DataFrame(
{"Unnamed: 2": ["c", "d", "c", "d"]},
index=MultiIndex(
levels=[["a", "b"], [1, 2, 3, 4]], codes=[[0, 0, 1, 1], [0, 1, 2, 3]]
),
)
tm.assert_frame_equal(result, expected)
def test_no_multi_index_level_names_empty(all_parsers):
# GH 10984
parser = all_parsers
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
expected = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
with tm.ensure_clean() as path:
expected.to_csv(path)
result = parser.read_csv(path, index_col=[0, 1, 2])
tm.assert_frame_equal(result, expected)
def test_header_with_index_col(all_parsers):
# GH 33476
parser = all_parsers
data = """
I11,A,A
I12,B,B
I2,1,3
"""
midx = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"])
idx = Index(["I2"])
expected = DataFrame([[1, 3]], index=idx, columns=midx)
result = parser.read_csv(StringIO(data), index_col=0, header=[0, 1])
tm.assert_frame_equal(result, expected)
col_idx = Index(["A", "A.1"])
idx = Index(["I12", "I2"], name="I11")
expected = DataFrame([["B", "B"], ["1", "3"]], index=idx, columns=col_idx)
result = parser.read_csv(StringIO(data), index_col="I11", header=0)
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_index_col_large_csv(all_parsers):
# https://github.com/pandas-dev/pandas/issues/37094
parser = all_parsers
N = 1_000_001
df = DataFrame({"a": range(N), "b": np.random.randn(N)})
with tm.ensure_clean() as path:
df.to_csv(path, index=False)
result = parser.read_csv(path, index_col=[0])
tm.assert_frame_equal(result, df.set_index("a"))
def test_index_col_multiindex_columns_no_data(all_parsers):
# GH#38292
parser = all_parsers
result = parser.read_csv(
StringIO("a0,a1,a2\nb0,b1,b2\n"), header=[0, 1], index_col=0
)
expected = DataFrame(
[],
columns=MultiIndex.from_arrays(
[["a1", "a2"], ["b1", "b2"]], names=["a0", "b0"]
),
)
tm.assert_frame_equal(result, expected)
def test_index_col_header_no_data(all_parsers):
# GH#38292
parser = all_parsers
result = parser.read_csv(StringIO("a0,a1,a2\n"), header=[0], index_col=0)
expected = DataFrame(
[],
columns=["a1", "a2"],
index=Index([], name="a0"),
)
tm.assert_frame_equal(result, expected)
def test_multiindex_columns_no_data(all_parsers):
# GH#38292
parser = all_parsers
result = parser.read_csv(StringIO("a0,a1,a2\nb0,b1,b2\n"), header=[0, 1])
expected = DataFrame(
[], columns=MultiIndex.from_arrays([["a0", "a1", "a2"], ["b0", "b1", "b2"]])
)
tm.assert_frame_equal(result, expected)
def test_multiindex_columns_index_col_with_data(all_parsers):
# GH#38292
parser = all_parsers
result = parser.read_csv(
StringIO("a0,a1,a2\nb0,b1,b2\ndata,data,data"), header=[0, 1], index_col=0
)
expected = DataFrame(
[["data", "data"]],
columns=MultiIndex.from_arrays(
[["a1", "a2"], ["b1", "b2"]], names=["a0", "b0"]
),
index=Index(["data"]),
)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
denera/MATH6840 | Homework 2/prob2.py | 1 | 1944 | import numpy as np
import scipy.sparse as sparse
import matplotlib
matplotlib.use('GTKAgg')
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
# plotting options
subplots = False
surface = True
# time domain settings
dt = 0.02 #0.03875
tmax = 0.9 #50.0
N = int((tmax/dt) + 1)
t = np.linspace(0, tmax, N)
# spatial domain settings
M = 10
dx = 1./(M-1)
xmin = 0.
xmax = 1.
x = np.linspace(xmin, xmax, M)
# heat conduction coefficient
nu = 1./6.;
# preallocate the solution using the initial condition
U = np.zeros((N, M))
U[0, :] = np.sin(2*np.pi*x)
# create a banded tri-diagonal [1, -2, 1] matrix in sparse format
diag = np.ones(M-2)
off = np.ones(M-3)
tri_diag = np.array([off, diag, off])
stencil = [-1, 0, 1]
coeffs = np.array([1., -2., 1.])
data = coeffs*tri_diag
B121 = sparse.diags(data, stencil)
# step through the time domain
# at each time step, we compute the next one
for n in xrange(N-1):
# NOTE: spatial solution is calculated only on the interior
# the Dirichlet boundary terms are zero and do not affect the stencil
if n == 0:
# in the beginning, use the exact solution
U[n+1, :] = np.exp(-1*((2*np.pi*nu)**2)*t[n+1])*np.sin(2*np.pi*x)
else:
# after the first time step, start leapfrogging
U[n+1, 1:-1] = U[n-1, 1:-1] + (2*nu*dt/(dx**2))*(B121.dot(U[n, 1:-1]))
# plot the 2D spatial solution as a 3D surface along the time domain
plt.close('all')
if surface:
X, T = np.meshgrid(x, t)
fig2 = plt.figure()
ax = fig2.add_subplot(1,1,1, projection='3d')
surf = ax.plot_surface(
X, T, U, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0.1, antialiased=True)
ax.set_xlabel('x')
ax.set_ylabel('t')
ax.set_zlabel('u')
ax.set_zlim(-1, 1)
fig2.colorbar(surf, shrink=0.5, aspect=5)
ax.view_init(30,45)
plt.show()
| mit |
mne-tools/mne-python | examples/visualization/evoked_topomap.py | 14 | 5554 | # -*- coding: utf-8 -*-
"""
.. _ex-evoked-topomap:
========================================
Plotting topographic maps of evoked data
========================================
Load evoked data and plot topomaps for selected time points using multiple
additional options.
"""
# Authors: Christian Brodbeck <[email protected]>
# Tal Linzen <[email protected]>
# Denis A. Engeman <[email protected]>
# Mikołaj Magnuski <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 5
import numpy as np
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
print(__doc__)
path = sample.data_path()
fname = path + '/MEG/sample/sample_audvis-ave.fif'
# load evoked corresponding to a specific condition
# from the fif file and subtract baseline
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
###############################################################################
# Basic :func:`~mne.viz.plot_topomap` options
# -------------------------------------------
#
# We plot evoked topographies using :func:`mne.Evoked.plot_topomap`. The first
# argument, ``times`` allows to specify time instants (in seconds!) for which
# topographies will be shown. We select timepoints from 50 to 150 ms with a
# step of 20ms and plot magnetometer data:
times = np.arange(0.05, 0.151, 0.02)
evoked.plot_topomap(times, ch_type='mag', time_unit='s')
###############################################################################
# If times is set to None at most 10 regularly spaced topographies will be
# shown:
evoked.plot_topomap(ch_type='mag', time_unit='s')
###############################################################################
# We can use ``nrows`` and ``ncols`` parameter to create multiline plots
# with more timepoints.
all_times = np.arange(-0.2, 0.5, 0.03)
evoked.plot_topomap(all_times, ch_type='mag', time_unit='s',
ncols=8, nrows='auto')
###############################################################################
# Instead of showing topographies at specific time points we can compute
# averages of 50 ms bins centered on these time points to reduce the noise in
# the topographies:
evoked.plot_topomap(times, ch_type='mag', average=0.05, time_unit='s')
###############################################################################
# We can plot gradiometer data (plots the RMS for each pair of gradiometers)
evoked.plot_topomap(times, ch_type='grad', time_unit='s')
###############################################################################
# Additional :func:`~mne.viz.plot_topomap` options
# ------------------------------------------------
#
# We can also use a range of various :func:`mne.viz.plot_topomap` arguments
# that control how the topography is drawn. For example:
#
# * ``cmap`` - to specify the color map
# * ``res`` - to control the resolution of the topographies (lower resolution
# means faster plotting)
# * ``outlines='skirt'`` to see the topography stretched beyond the head circle
# * ``contours`` to define how many contour lines should be plotted
evoked.plot_topomap(times, ch_type='mag', cmap='Spectral_r', res=32,
outlines='skirt', contours=4, time_unit='s')
###############################################################################
# If you look at the edges of the head circle of a single topomap you'll see
# the effect of extrapolation. There are three extrapolation modes:
#
# - ``extrapolate='local'`` extrapolates only to points close to the sensors.
# - ``extrapolate='head'`` extrapolates out to the head head circle.
# - ``extrapolate='box'`` extrapolates to a large box stretching beyond the
# head circle.
#
# The default value ``extrapolate='auto'`` will use ``'local'`` for MEG sensors
# and ``'head'`` otherwise. Here we show each option:
extrapolations = ['local', 'head', 'box']
fig, axes = plt.subplots(figsize=(7.5, 4.5), nrows=2, ncols=3)
# Here we look at EEG channels, and use a custom head sphere to get all the
# sensors to be well within the drawn head surface
for axes_row, ch_type in zip(axes, ('mag', 'eeg')):
for ax, extr in zip(axes_row, extrapolations):
evoked.plot_topomap(0.1, ch_type=ch_type, size=2, extrapolate=extr,
axes=ax, show=False, colorbar=False,
sphere=(0., 0., 0., 0.09))
ax.set_title('%s %s' % (ch_type.upper(), extr), fontsize=14)
fig.tight_layout()
###############################################################################
# More advanced usage
# -------------------
#
# Now we plot magnetometer data as topomap at a single time point: 100 ms
# post-stimulus, add channel labels, title and adjust plot margins:
evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False,
size=6, res=128, title='Auditory response',
time_unit='s')
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
###############################################################################
# Animating the topomap
# ---------------------
#
# Instead of using a still image we can plot magnetometer data as an animation,
# which animates properly only in matplotlib interactive mode.
# sphinx_gallery_thumbnail_number = 9
times = np.arange(0.05, 0.151, 0.01)
fig, anim = evoked.animate_topomap(
times=times, ch_type='mag', frame_rate=2, time_unit='s', blit=False)
| bsd-3-clause |
100star/h2o | py/testdir_single_jvm/test_summary2_unifiles.py | 9 | 10223 | import unittest, time, sys, random, math, getpass
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_util, h2o_browse as h2b, h2o_print as h2p
import h2o_summ
DO_TRY_SCIPY = False
if getpass.getuser()=='kevin' or getpass.getuser()=='jenkins':
DO_TRY_SCIPY = True
DO_MEDIAN = True
# FIX!. we seem to lose accuracy with fewer bins -> more iterations. Maybe we're leaking or ??
# this test failed (if run as user kevin) with 10 bins
MAX_QBINS = 1000 # pass
MAX_QBINS = 1000 # pass
# this one doesn't fail with 10 bins
# this failed. interestingly got same number as 1000 bin summary2 (the 7.433..
# on runifA.csv (2nd col?)
# MAX_QBINS = 20
# Exception: h2o quantile multipass is not approx. same as sort algo. h2o_util.assertApproxEqual failed comparing 7.43337413296 and 8.26268245. {'tol': 2e-07}.
MAX_QBINS = 27
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
# h2o.sleep(3600)
h2o.tear_down_cloud()
def test_summary2_unifiles(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
# new with 1000 bins. copy expected from R
tryList = [
('cars.csv', 'c.hex', [
(None, None,None,None,None,None),
('economy (mpg)', None,None,None,None,None),
('cylinders', None,None,None,None,None),
],
),
('runifA.csv', 'A.hex', [
(None, 1.00, 25.00, 50.00, 75.00, 100.0),
('x', -99.9, -44.7, 8.26, 58.00, 91.7),
],
),
# colname, (min, 25th, 50th, 75th, max)
('runif.csv', 'x.hex', [
(None, 1.00, 5000.0, 10000.0, 15000.0, 20000.00),
('D', -5000.00, -3735.0, -2443, -1187.0, 99.8),
('E', -100000.0, -49208.0, 1783.8, 50621.9, 100000.0),
('F', -1.00, -0.4886, 0.00868, 0.5048, 1.00),
],
),
('runifB.csv', 'B.hex', [
(None, 1.00, 2501.00, 5001.00, 7501.00, 10000.00),
('x', -100.00, -50.1, 0.974, 51.7, 100,00),
],
),
('runifC.csv', 'C.hex', [
(None, 1.00, 25002.00, 50002.00, 75002.00, 100000.00),
('x', -100.00, -50.45, -1.135, 49.28, 100.00),
],
),
]
timeoutSecs = 15
trial = 1
n = h2o.nodes[0]
lenNodes = len(h2o.nodes)
timeoutSecs = 60
for (csvFilename, hex_key, expectedCols) in tryList:
csvPathname = csvFilename
csvPathnameFull = h2i.find_folder_and_filename('smalldata', csvPathname, returnFullPath=True)
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname,
schema='put', hex_key=hex_key, timeoutSecs=10, doSummary=False)
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
numRows = inspect["numRows"]
numCols = inspect["numCols"]
# okay to get more cols than we want
# okay to vary MAX_QBINS because we adjust the expected accuracy
summaryResult = h2o_cmd.runSummary(key=hex_key, max_qbins=MAX_QBINS)
h2o.verboseprint("summaryResult:", h2o.dump_json(summaryResult))
summaries = summaryResult['summaries']
scipyCol = 0
for expected, column in zip(expectedCols, summaries):
colname = column['colname']
if expected[0]:
self.assertEqual(colname, expected[0]), colname, expected[0]
else:
# if the colname is None, skip it (so we don't barf on strings on the h2o quantile page
scipyCol += 1
continue
quantile = 0.5 if DO_MEDIAN else .999
# h2o has problem if a list of columns (or dictionary) is passed to 'column' param
q = h2o.nodes[0].quantiles(source_key=hex_key, column=column['colname'],
quantile=quantile, max_qbins=MAX_QBINS, multiple_pass=2, interpolation_type=7) # for comparing to summary2
qresult = q['result']
qresult_single = q['result_single']
h2p.blue_print("h2o quantiles result:", qresult)
h2p.blue_print("h2o quantiles result_single:", qresult_single)
h2p.blue_print("h2o quantiles iterations:", q['iterations'])
h2p.blue_print("h2o quantiles interpolated:", q['interpolated'])
print h2o.dump_json(q)
# ('', '1.00', '25002.00', '50002.00', '75002.00', '100000.00'),
coltype = column['type']
nacnt = column['nacnt']
stats = column['stats']
stattype= stats['type']
print stattype
# FIX! we should compare mean and sd to expected?
# enums don't have mean or sd?
if stattype!='Enum':
mean = stats['mean']
sd = stats['sd']
zeros = stats['zeros']
mins = stats['mins']
maxs = stats['maxs']
print "colname:", colname, "mean (2 places):", h2o_util.twoDecimals(mean)
print "colname:", colname, "std dev. (2 places):", h2o_util.twoDecimals(sd)
pct = stats['pct']
print "pct:", pct
print ""
# the thresholds h2o used, should match what we expected
expectedPct= [0.01, 0.05, 0.1, 0.25, 0.33, 0.5, 0.66, 0.75, 0.9, 0.95, 0.99]
pctile = stats['pctile']
# figure out the expected max error
# use this for comparing to sklearn/sort
if expected[1] and expected[5]:
expectedRange = expected[5] - expected[1]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = 0.5 * expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
# hack..assume just one None is enough to ignore for cars.csv
if expected[1]:
h2o_util.assertApproxEqual(mins[0], expected[1], tol=maxErr, msg='min is not approx. expected')
if expected[2]:
h2o_util.assertApproxEqual(pctile[3], expected[2], tol=maxErr, msg='25th percentile is not approx. expected')
if expected[3]:
h2o_util.assertApproxEqual(pctile[5], expected[3], tol=maxErr, msg='50th percentile (median) is not approx. expected')
if expected[4]:
h2o_util.assertApproxEqual(pctile[7], expected[4], tol=maxErr, msg='75th percentile is not approx. expected')
if expected[5]:
h2o_util.assertApproxEqual(maxs[0], expected[5], tol=maxErr, msg='max is not approx. expected')
hstart = column['hstart']
hstep = column['hstep']
hbrk = column['hbrk']
hcnt = column['hcnt']
for b in hcnt:
# should we be able to check for a uniform distribution in the files?
e = .1 * numRows
# self.assertAlmostEqual(b, .1 * rowCount, delta=.01*rowCount,
# msg="Bins not right. b: %s e: %s" % (b, e))
if stattype!='Enum':
pt = h2o_util.twoDecimals(pctile)
print "colname:", colname, "pctile (2 places):", pt
mx = h2o_util.twoDecimals(maxs)
mn = h2o_util.twoDecimals(mins)
print "colname:", colname, "maxs: (2 places):", mx
print "colname:", colname, "mins: (2 places):", mn
# FIX! we should do an exec and compare using the exec quantile too
actual = mn[0], pt[3], pt[5], pt[7], mx[0]
print "min/25/50/75/max colname:", colname, "(2 places):", actual
print "maxs colname:", colname, "(2 places):", mx
print "mins colname:", colname, "(2 places):", mn
# don't check if colname is empty..means it's a string and scipy doesn't parse right?
# need to ignore the car names
if colname!='' and expected[scipyCol]:
# don't do for enums
# also get the median with a sort (h2o_summ.percentileOnSortedlist()
h2o_summ.quantile_comparisons(
csvPathnameFull,
skipHeader=True,
col=scipyCol,
datatype='float',
quantile=0.5 if DO_MEDIAN else 0.999,
# FIX! ignore for now
h2oSummary2=pctile[5 if DO_MEDIAN else 10],
h2oQuantilesApprox=qresult_single,
h2oQuantilesExact=qresult,
h2oSummary2MaxErr=maxErr,
)
if False and h2o_util.approxEqual(pctile[5], 0.990238116744, tol=0.002, msg='stop here'):
raise Exception("stopping to look")
scipyCol += 1
trial += 1
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
toobaz/pandas | pandas/tests/window/test_grouper.py | 2 | 5595 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas.util.testing as tm
class TestGrouperGrouping:
def setup_method(self, method):
self.series = Series(np.arange(10))
self.frame = DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)})
def test_mutated(self):
msg = r"group\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
self.frame.groupby("A", foo=1)
g = self.frame.groupby("A")
assert not g.mutated
g = self.frame.groupby("A", mutated=True)
assert g.mutated
def test_getitem(self):
g = self.frame.groupby("A")
g_mutated = self.frame.groupby("A", mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).mean())
result = g.rolling(2).mean().B
tm.assert_series_equal(result, expected)
result = g.rolling(2).B.mean()
tm.assert_series_equal(result, expected)
result = g.B.rolling(2).mean()
tm.assert_series_equal(result, expected)
result = self.frame.B.groupby(self.frame.A).rolling(2).mean()
tm.assert_series_equal(result, expected)
def test_getitem_multiple(self):
# GH 13174
g = self.frame.groupby("A")
r = g.rolling(2)
g_mutated = self.frame.groupby("A", mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).count())
result = r.B.count()
tm.assert_series_equal(result, expected)
result = r.B.count()
tm.assert_series_equal(result, expected)
def test_rolling(self):
g = self.frame.groupby("A")
r = g.rolling(window=4)
for f in ["sum", "mean", "min", "max", "count", "kurt", "skew"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.rolling(4), f)())
tm.assert_frame_equal(result, expected)
for f in ["std", "var"]:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1))
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = g.apply(lambda x: x.rolling(4).quantile(0.5))
tm.assert_frame_equal(result, expected)
def test_rolling_corr_cov(self):
g = self.frame.groupby("A")
r = g.rolling(window=4)
for f in ["corr", "cov"]:
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.rolling(4), f)(self.frame)
expected = g.apply(func)
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.rolling(4), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_rolling_apply(self, raw):
g = self.frame.groupby("A")
r = g.rolling(window=4)
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw))
tm.assert_frame_equal(result, expected)
def test_rolling_apply_mutability(self):
# GH 14013
df = pd.DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6})
g = df.groupby("A")
mi = pd.MultiIndex.from_tuples(
[("bar", 3), ("bar", 4), ("bar", 5), ("foo", 0), ("foo", 1), ("foo", 2)]
)
mi.names = ["A", None]
# Grouped column should not be a part of the output
expected = pd.DataFrame([np.nan, 2.0, 2.0] * 2, columns=["B"], index=mi)
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
# Call an arbitrary function on the groupby
g.sum()
# Make sure nothing has been mutated
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
def test_expanding(self):
g = self.frame.groupby("A")
r = g.expanding()
for f in ["sum", "mean", "min", "max", "count", "kurt", "skew"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.expanding(), f)())
tm.assert_frame_equal(result, expected)
for f in ["std", "var"]:
result = getattr(r, f)(ddof=0)
expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0))
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = g.apply(lambda x: x.expanding().quantile(0.5))
tm.assert_frame_equal(result, expected)
def test_expanding_corr_cov(self):
g = self.frame.groupby("A")
r = g.expanding()
for f in ["corr", "cov"]:
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.expanding(), f)(self.frame)
expected = g.apply(func)
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.expanding(), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_expanding_apply(self, raw):
g = self.frame.groupby("A")
r = g.expanding()
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw))
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
Yadnyawalkya/integration_tests | cfme/utils/smem_memory_monitor.py | 2 | 67671 | """Monitor Memory on a CFME/Miq appliance and builds report&graphs displaying usage per process."""
import json
import os
import time
import traceback
from collections import OrderedDict
from datetime import datetime
from threading import Thread
import yaml
from yaycl import AttrDict
from cfme.utils.conf import cfme_performance
from cfme.utils.log import logger
from cfme.utils.path import results_path
from cfme.utils.version import current_version
from cfme.utils.version import get_version
miq_workers = [
'MiqGenericWorker',
'MiqPriorityWorker',
'MiqScheduleWorker',
'MiqUiWorker',
'MiqWebServiceWorker',
'MiqWebsocketWorker',
'MiqReportingWorker',
'MiqReplicationWorker',
'MiqSmartProxyWorker',
'MiqVimBrokerWorker',
'MiqEmsRefreshCoreWorker',
# Refresh Workers:
'ManageIQ::Providers::Microsoft::InfraManager::RefreshWorker',
'ManageIQ::Providers::Openstack::InfraManager::RefreshWorker',
'ManageIQ::Providers::Redhat::InfraManager::RefreshWorker',
'ManageIQ::Providers::Vmware::InfraManager::RefreshWorker',
'MiqEmsRefreshWorkerMicrosoft', # 5.4
'MiqEmsRefreshWorkerRedhat', # 5.4
'MiqEmsRefreshWorkerVmware', # 5.4
'ManageIQ::Providers::Amazon::CloudManager::RefreshWorker',
'ManageIQ::Providers::Azure::CloudManager::RefreshWorker',
'ManageIQ::Providers::Google::CloudManager::RefreshWorker',
'ManageIQ::Providers::Openstack::CloudManager::RefreshWorker',
'MiqEmsRefreshWorkerAmazon', # 5.4
'MiqEmsRefreshWorkerOpenstack', # 5.4
'ManageIQ::Providers::AnsibleTower::ConfigurationManager::RefreshWorker',
'ManageIQ::Providers::Foreman::ConfigurationManager::RefreshWorker',
'ManageIQ::Providers::Foreman::ProvisioningManager::RefreshWorker',
'MiqEmsRefreshWorkerForemanConfiguration', # 5.4
'MiqEmsRefreshWorkerForemanProvisioning', # 5.4
'ManageIQ::Providers::Atomic::ContainerManager::RefreshWorker',
'ManageIQ::Providers::AtomicEnterprise::ContainerManager::RefreshWorker',
'ManageIQ::Providers::Kubernetes::ContainerManager::RefreshWorker',
'ManageIQ::Providers::Openshift::ContainerManager::RefreshWorker',
'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::RefreshWorker',
'ManageIQ::Providers::StorageManager::CinderManager::RefreshWorker',
'ManageIQ::Providers::StorageManager::SwiftManager::RefreshWorker',
'ManageIQ::Providers::Amazon::NetworkManager::RefreshWorker',
'ManageIQ::Providers::Azure::NetworkManager::RefreshWorker',
'ManageIQ::Providers::Google::NetworkManager::RefreshWorker',
'ManageIQ::Providers::Openstack::NetworkManager::RefreshWorker',
'MiqNetappRefreshWorker',
'MiqSmisRefreshWorker',
# Event Workers:
'MiqEventHandler',
'ManageIQ::Providers::Openstack::InfraManager::EventCatcher',
'ManageIQ::Providers::StorageManager::CinderManager::EventCatcher',
'ManageIQ::Providers::Redhat::InfraManager::EventCatcher',
'ManageIQ::Providers::Vmware::InfraManager::EventCatcher',
'MiqEventCatcherRedhat', # 5.4
'MiqEventCatcherVmware', # 5.4
'ManageIQ::Providers::Amazon::CloudManager::EventCatcher',
'ManageIQ::Providers::Azure::CloudManager::EventCatcher',
'ManageIQ::Providers::Google::CloudManager::EventCatcher',
'ManageIQ::Providers::Openstack::CloudManager::EventCatcher',
'MiqEventCatcherAmazon', # 5.4
'MiqEventCatcherOpenstack', # 5.4
'ManageIQ::Providers::Atomic::ContainerManager::EventCatcher',
'ManageIQ::Providers::AtomicEnterprise::ContainerManager::EventCatcher',
'ManageIQ::Providers::Kubernetes::ContainerManager::EventCatcher',
'ManageIQ::Providers::Openshift::ContainerManager::EventCatcher',
'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::EventCatcher',
'ManageIQ::Providers::Openstack::NetworkManager::EventCatcher',
# Metrics Processor/Collector Workers
'MiqEmsMetricsProcessorWorker',
'ManageIQ::Providers::Openstack::InfraManager::MetricsCollectorWorker',
'ManageIQ::Providers::Redhat::InfraManager::MetricsCollectorWorker',
'ManageIQ::Providers::Vmware::InfraManager::MetricsCollectorWorker',
'MiqEmsMetricsCollectorWorkerRedhat', # 5.4
'MiqEmsMetricsCollectorWorkerVmware', # 5.4
'ManageIQ::Providers::Amazon::CloudManager::MetricsCollectorWorker',
'ManageIQ::Providers::Azure::CloudManager::MetricsCollectorWorker',
'ManageIQ::Providers::Openstack::CloudManager::MetricsCollectorWorker',
'MiqEmsMetricsCollectorWorkerAmazon', # 5.4
'MiqEmsMetricsCollectorWorkerOpenstack', # 5.4
'ManageIQ::Providers::Atomic::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::AtomicEnterprise::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::Kubernetes::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::Openshift::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::Openstack::NetworkManager::MetricsCollectorWorker',
'MiqStorageMetricsCollectorWorker',
'MiqVmdbStorageBridgeWorker']
ruby_processes = list(miq_workers)
ruby_processes.extend(['evm:dbsync:replicate', 'MIQ Server (evm_server.rb)', 'evm_watchdog.rb',
'appliance_console.rb'])
process_order = list(ruby_processes)
process_order.extend(['memcached', 'postgres', 'httpd', 'collectd'])
# Timestamp created at first import, thus grouping all reports of like workload
test_ts = time.strftime('%Y%m%d%H%M%S')
# 10s sample interval (occasionally sampling can take almost 4s on an appliance doing a lot of work)
SAMPLE_INTERVAL = 10
class SmemMemoryMonitor(Thread):
def __init__(self, ssh_client, scenario_data):
super(SmemMemoryMonitor, self).__init__()
self.ssh_client = ssh_client
self.scenario_data = scenario_data
self.grafana_urls = {}
self.miq_server_id = ''
self.use_slab = False
self.signal = True
def create_process_result(self, process_results, starttime, process_pid, process_name,
memory_by_pid):
if process_pid in list(memory_by_pid.keys()):
if process_name not in process_results:
process_results[process_name] = OrderedDict()
process_results[process_name][process_pid] = OrderedDict()
if process_pid not in process_results[process_name]:
process_results[process_name][process_pid] = OrderedDict()
process_results[process_name][process_pid][starttime] = {}
rss_mem = memory_by_pid[process_pid]['rss']
pss_mem = memory_by_pid[process_pid]['pss']
uss_mem = memory_by_pid[process_pid]['uss']
vss_mem = memory_by_pid[process_pid]['vss']
swap_mem = memory_by_pid[process_pid]['swap']
process_results[process_name][process_pid][starttime]['rss'] = rss_mem
process_results[process_name][process_pid][starttime]['pss'] = pss_mem
process_results[process_name][process_pid][starttime]['uss'] = uss_mem
process_results[process_name][process_pid][starttime]['vss'] = vss_mem
process_results[process_name][process_pid][starttime]['swap'] = swap_mem
del memory_by_pid[process_pid]
else:
logger.warning('Process {} PID, not found: {}'.format(process_name, process_pid))
def get_appliance_memory(self, appliance_results, plottime):
# 5.5/5.6 - RHEL 7 / Centos 7
# Application Memory Used : MemTotal - (MemFree + Slab + Cached)
# 5.4 - RHEL 6 / Centos 6
# Application Memory Used : MemTotal - (MemFree + Buffers + Cached)
# Available memory could potentially be better metric
appliance_results[plottime] = {}
result = self.ssh_client.run_command('cat /proc/meminfo')
if result.failed:
logger.error('Exit_status nonzero in get_appliance_memory: {}, {}'
.format(result.rc, result.output))
del appliance_results[plottime]
else:
meminfo_raw = result.output.replace('kB', '').strip()
meminfo = OrderedDict((k.strip(), v.strip()) for k, v in
(value.strip().split(':') for value in meminfo_raw.split('\n')))
appliance_results[plottime]['total'] = float(meminfo['MemTotal']) / 1024
appliance_results[plottime]['free'] = float(meminfo['MemFree']) / 1024
if 'MemAvailable' in meminfo: # 5.5, RHEL 7/Centos 7
self.use_slab = True
mem_used = (float(meminfo['MemTotal']) - (float(meminfo['MemFree']) + float(
meminfo['Slab']) + float(meminfo['Cached']))) / 1024
else: # 5.4, RHEL 6/Centos 6
mem_used = (float(meminfo['MemTotal']) - (float(meminfo['MemFree']) + float(
meminfo['Buffers']) + float(meminfo['Cached']))) / 1024
appliance_results[plottime]['used'] = mem_used
appliance_results[plottime]['buffers'] = float(meminfo['Buffers']) / 1024
appliance_results[plottime]['cached'] = float(meminfo['Cached']) / 1024
appliance_results[plottime]['slab'] = float(meminfo['Slab']) / 1024
appliance_results[plottime]['swap_total'] = float(meminfo['SwapTotal']) / 1024
appliance_results[plottime]['swap_free'] = float(meminfo['SwapFree']) / 1024
def get_evm_workers(self):
result = self.ssh_client.run_command(
'psql -t -q -d vmdb_production -c '
'\"select pid,type from miq_workers where miq_server_id = \'{}\'\"'.format(
self.miq_server_id))
if result.output.strip():
workers = {}
for worker in result.output.strip().split('\n'):
pid_worker = worker.strip().split('|')
if len(pid_worker) == 2:
workers[pid_worker[0].strip()] = pid_worker[1].strip()
else:
logger.error('Unexpected output from psql: {}'.format(worker))
return workers
else:
return {}
# Old method of obtaining per process memory (Appliances without smem)
# def get_pids_memory(self):
# result = self.ssh_client.run_command(
# 'ps -A -o pid,rss,vsz,comm,cmd | sed 1d')
# pids_memory = result.output.strip().split('\n')
# memory_by_pid = {}
# for line in pids_memory:
# values = [s for s in line.strip().split(' ') if s]
# pid = values[0]
# memory_by_pid[pid] = {}
# memory_by_pid[pid]['rss'] = float(values[1]) / 1024
# memory_by_pid[pid]['vss'] = float(values[2]) / 1024
# memory_by_pid[pid]['name'] = values[3]
# memory_by_pid[pid]['cmd'] = ' '.join(values[4:])
# return memory_by_pid
def get_miq_server_id(self):
# Obtain the Miq Server GUID:
result = self.ssh_client.run_command('cat /var/www/miq/vmdb/GUID')
logger.info('Obtained appliance GUID: {}'.format(result.output.strip()))
# Get server id:
result = self.ssh_client.run_command(
'psql -t -q -d vmdb_production -c "select id from miq_servers where guid = \'{}\'"'
''.format(result.output.strip()))
logger.info('Obtained miq_server_id: {}'.format(result.output.strip()))
self.miq_server_id = result.output.strip()
def get_pids_memory(self):
result = self.ssh_client.run_command(
'smem -c \'pid rss pss uss vss swap name command\' | sed 1d')
pids_memory = result.output.strip().split('\n')
memory_by_pid = {}
for line in pids_memory:
if line.strip():
try:
values = [s for s in line.strip().split(' ') if s]
pid = values[0]
int(pid)
memory_by_pid[pid] = {}
memory_by_pid[pid]['rss'] = float(values[1]) / 1024
memory_by_pid[pid]['pss'] = float(values[2]) / 1024
memory_by_pid[pid]['uss'] = float(values[3]) / 1024
memory_by_pid[pid]['vss'] = float(values[4]) / 1024
memory_by_pid[pid]['swap'] = float(values[5]) / 1024
memory_by_pid[pid]['name'] = values[6]
memory_by_pid[pid]['cmd'] = ' '.join(values[7:])
except Exception as e:
logger.error('Processing smem output error: {}'.format(e.__class__.__name__, e))
logger.error('Issue with pid: {} line: {}'.format(pid, line))
logger.error('Complete smem output: {}'.format(result.output))
return memory_by_pid
def _real_run(self):
""" Result dictionaries:
appliance_results[timestamp][measurement] = value
appliance_results[timestamp]['total'] = value
appliance_results[timestamp]['free'] = value
appliance_results[timestamp]['used'] = value
appliance_results[timestamp]['buffers'] = value
appliance_results[timestamp]['cached'] = value
appliance_results[timestamp]['slab'] = value
appliance_results[timestamp]['swap_total'] = value
appliance_results[timestamp]['swap_free'] = value
appliance measurements: total/free/used/buffers/cached/slab/swap_total/swap_free
process_results[name][pid][timestamp][measurement] = value
process_results[name][pid][timestamp]['rss'] = value
process_results[name][pid][timestamp]['pss'] = value
process_results[name][pid][timestamp]['uss'] = value
process_results[name][pid][timestamp]['vss'] = value
process_results[name][pid][timestamp]['swap'] = value
"""
appliance_results = OrderedDict()
process_results = OrderedDict()
install_smem(self.ssh_client)
self.get_miq_server_id()
logger.info('Starting Monitoring Thread.')
while self.signal:
starttime = time.time()
plottime = datetime.now()
self.get_appliance_memory(appliance_results, plottime)
workers = self.get_evm_workers()
memory_by_pid = self.get_pids_memory()
for worker_pid in workers:
self.create_process_result(process_results, plottime, worker_pid,
workers[worker_pid], memory_by_pid)
for pid in sorted(memory_by_pid.keys()):
if memory_by_pid[pid]['name'] == 'httpd':
self.create_process_result(process_results, plottime, pid, 'httpd',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'postgres':
self.create_process_result(process_results, plottime, pid, 'postgres',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'postmaster':
self.create_process_result(process_results, plottime, pid, 'postgres',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'memcached':
self.create_process_result(process_results, plottime, pid, 'memcached',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'collectd':
self.create_process_result(process_results, plottime, pid, 'collectd',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'ruby':
if 'evm_server.rb' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'MIQ Server (evm_server.rb)', memory_by_pid)
elif 'MIQ Server' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'MIQ Server (evm_server.rb)', memory_by_pid)
elif 'evm_watchdog.rb' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'evm_watchdog.rb', memory_by_pid)
elif 'appliance_console.rb' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'appliance_console.rb', memory_by_pid)
elif 'evm:dbsync:replicate' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'evm:dbsync:replicate', memory_by_pid)
else:
logger.debug('Unaccounted for ruby pid: {}'.format(pid))
timediff = time.time() - starttime
logger.debug('Monitoring sampled in {}s'.format(round(timediff, 4)))
# Sleep Monitoring interval
# Roughly 10s samples, accounts for collection of memory measurements
time_to_sleep = abs(SAMPLE_INTERVAL - timediff)
time.sleep(time_to_sleep)
logger.info('Monitoring CFME Memory Terminating')
create_report(self.scenario_data, appliance_results, process_results, self.use_slab,
self.grafana_urls)
def run(self):
try:
self._real_run()
except Exception as e:
logger.error('Error in Monitoring Thread: {}'.format(e))
logger.error('{}'.format(traceback.format_exc()))
def install_smem(ssh_client):
# smem is included by default in 5.6 appliances
logger.info('Installing smem.')
ver = get_version()
if ver == '55':
ssh_client.run_command('rpm -i {}'.format(cfme_performance['tools']['rpms']['epel7_rpm']))
ssh_client.run_command('yum install -y smem')
# Patch smem to display longer command line names
logger.info('Patching smem')
ssh_client.run_command(r'sed -i s/\.27s/\.200s/g /usr/bin/smem')
def create_report(scenario_data, appliance_results, process_results, use_slab, grafana_urls):
logger.info('Creating Memory Monitoring Report.')
ver = current_version()
provider_names = 'No Providers'
if 'providers' in scenario_data['scenario']:
provider_names = ', '.join(scenario_data['scenario']['providers'])
workload_path = results_path.join('{}-{}-{}'.format(test_ts, scenario_data['test_dir'], ver))
if not os.path.exists(str(workload_path)):
os.makedirs(str(workload_path))
scenario_path = workload_path.join(scenario_data['scenario']['name'])
if os.path.exists(str(scenario_path)):
logger.warning('Duplicate Workload-Scenario Name: {}'.format(scenario_path))
scenario_path = workload_path.join('{}-{}'.format(time.strftime('%Y%m%d%H%M%S'),
scenario_data['scenario']['name']))
logger.warning('Using: {}'.format(scenario_path))
os.mkdir(str(scenario_path))
mem_graphs_path = scenario_path.join('graphs')
if not os.path.exists(str(mem_graphs_path)):
os.mkdir(str(mem_graphs_path))
mem_rawdata_path = scenario_path.join('rawdata')
if not os.path.exists(str(mem_rawdata_path)):
os.mkdir(str(mem_rawdata_path))
graph_appliance_measurements(mem_graphs_path, ver, appliance_results, use_slab, provider_names)
graph_individual_process_measurements(mem_graphs_path, process_results, provider_names)
graph_same_miq_workers(mem_graphs_path, process_results, provider_names)
graph_all_miq_workers(mem_graphs_path, process_results, provider_names)
# Dump scenario Yaml:
with open(str(scenario_path.join('scenario.yml')), 'w') as scenario_file:
yaml.safe_dump(dict(scenario_data['scenario']), scenario_file, default_flow_style=False)
generate_summary_csv(scenario_path.join('{}-summary.csv'.format(ver)), appliance_results,
process_results, provider_names, ver)
generate_raw_data_csv(mem_rawdata_path, appliance_results, process_results)
generate_summary_html(scenario_path, ver, appliance_results, process_results, scenario_data,
provider_names, grafana_urls)
generate_workload_html(scenario_path, ver, scenario_data, provider_names, grafana_urls)
logger.info('Finished Creating Report')
def compile_per_process_results(procs_to_compile, process_results, ts_end):
alive_pids = 0
recycled_pids = 0
total_running_rss = 0
total_running_pss = 0
total_running_uss = 0
total_running_vss = 0
total_running_swap = 0
for process in procs_to_compile:
if process in process_results:
for pid in process_results[process]:
if ts_end in process_results[process][pid]:
alive_pids += 1
total_running_rss += process_results[process][pid][ts_end]['rss']
total_running_pss += process_results[process][pid][ts_end]['pss']
total_running_uss += process_results[process][pid][ts_end]['uss']
total_running_vss += process_results[process][pid][ts_end]['vss']
total_running_swap += process_results[process][pid][ts_end]['swap']
else:
recycled_pids += 1
return alive_pids, recycled_pids, total_running_rss, total_running_pss, total_running_uss, \
total_running_vss, total_running_swap
def generate_raw_data_csv(directory, appliance_results, process_results):
starttime = time.time()
file_name = str(directory.join('appliance.csv'))
with open(file_name, 'w') as csv_file:
csv_file.write('TimeStamp,Total,Free,Used,Buffers,Cached,Slab,Swap_Total,Swap_Free\n')
for ts in appliance_results:
csv_file.write('{},{},{},{},{},{},{},{},{}\n'.format(ts,
appliance_results[ts]['total'], appliance_results[ts]['free'],
appliance_results[ts]['used'], appliance_results[ts]['buffers'],
appliance_results[ts]['cached'], appliance_results[ts]['slab'],
appliance_results[ts]['swap_total'], appliance_results[ts]['swap_free']))
for process_name in process_results:
for process_pid in process_results[process_name]:
file_name = str(directory.join('{}-{}.csv'.format(process_pid, process_name)))
with open(file_name, 'w') as csv_file:
csv_file.write('TimeStamp,RSS,PSS,USS,VSS,SWAP\n')
for ts in process_results[process_name][process_pid]:
csv_file.write('{},{},{},{},{},{}\n'.format(ts,
process_results[process_name][process_pid][ts]['rss'],
process_results[process_name][process_pid][ts]['pss'],
process_results[process_name][process_pid][ts]['uss'],
process_results[process_name][process_pid][ts]['vss'],
process_results[process_name][process_pid][ts]['swap']))
timediff = time.time() - starttime
logger.info('Generated Raw Data CSVs in: {}'.format(timediff))
def generate_summary_csv(file_name, appliance_results, process_results, provider_names,
version_string):
starttime = time.time()
with open(str(file_name), 'w') as csv_file:
csv_file.write('Version: {}, Provider(s): {}\n'.format(version_string, provider_names))
csv_file.write('Measurement,Start of test,End of test\n')
start = list(appliance_results.keys())[0]
end = list(appliance_results.keys())[-1]
csv_file.write('Appliance Total Memory,{},{}\n'.format(
round(appliance_results[start]['total'], 2), round(appliance_results[end]['total'], 2)))
csv_file.write('Appliance Free Memory,{},{}\n'.format(
round(appliance_results[start]['free'], 2), round(appliance_results[end]['free'], 2)))
csv_file.write('Appliance Used Memory,{},{}\n'.format(
round(appliance_results[start]['used'], 2), round(appliance_results[end]['used'], 2)))
csv_file.write('Appliance Buffers,{},{}\n'.format(
round(appliance_results[start]['buffers'], 2),
round(appliance_results[end]['buffers'], 2)))
csv_file.write('Appliance Cached,{},{}\n'.format(
round(appliance_results[start]['cached'], 2),
round(appliance_results[end]['cached'], 2)))
csv_file.write('Appliance Slab,{},{}\n'.format(
round(appliance_results[start]['slab'], 2),
round(appliance_results[end]['slab'], 2)))
csv_file.write('Appliance Total Swap,{},{}\n'.format(
round(appliance_results[start]['swap_total'], 2),
round(appliance_results[end]['swap_total'], 2)))
csv_file.write('Appliance Free Swap,{},{}\n'.format(
round(appliance_results[start]['swap_free'], 2),
round(appliance_results[end]['swap_free'], 2)))
summary_csv_measurement_dump(csv_file, process_results, 'rss')
summary_csv_measurement_dump(csv_file, process_results, 'pss')
summary_csv_measurement_dump(csv_file, process_results, 'uss')
summary_csv_measurement_dump(csv_file, process_results, 'vss')
summary_csv_measurement_dump(csv_file, process_results, 'swap')
timediff = time.time() - starttime
logger.info('Generated Summary CSV in: {}'.format(timediff))
def generate_summary_html(directory, version_string, appliance_results, process_results,
scenario_data, provider_names, grafana_urls):
starttime = time.time()
file_name = str(directory.join('index.html'))
with open(file_name, 'w') as html_file:
html_file.write('<html>\n')
html_file.write('<head><title>{} - {} Memory Usage Performance</title></head>'.format(
version_string, provider_names))
html_file.write('<body>\n')
html_file.write('<b>CFME {} {} Test Results</b><br>\n'.format(version_string,
scenario_data['test_name'].title()))
html_file.write('<b>Appliance Roles:</b> {}<br>\n'.format(
scenario_data['appliance_roles'].replace(',', ', ')))
html_file.write('<b>Provider(s):</b> {}<br>\n'.format(provider_names))
html_file.write('<b><a href=\'https://{}/\' target="_blank">{}</a></b>\n'.format(
scenario_data['appliance_ip'], scenario_data['appliance_name']))
if grafana_urls:
for g_name in sorted(grafana_urls.keys()):
html_file.write(
' : <b><a href=\'{}\' target="_blank">{}</a></b>'.format(grafana_urls[g_name],
g_name))
html_file.write('<br>\n')
html_file.write('<b><a href=\'{}-summary.csv\'>Summary CSV</a></b>'.format(version_string))
html_file.write(' : <b><a href=\'workload.html\'>Workload Info</a></b>')
html_file.write(' : <b><a href=\'graphs/\'>Graphs directory</a></b>\n')
html_file.write(' : <b><a href=\'rawdata/\'>CSVs directory</a></b><br>\n')
start = list(appliance_results.keys())[0]
end = list(appliance_results.keys())[-1]
timediff = end - start
total_proc_count = 0
for proc_name in process_results:
total_proc_count += len(list(process_results[proc_name].keys()))
growth = appliance_results[end]['used'] - appliance_results[start]['used']
max_used_memory = 0
for ts in appliance_results:
if appliance_results[ts]['used'] > max_used_memory:
max_used_memory = appliance_results[ts]['used']
html_file.write('<table border="1">\n')
html_file.write('<tr><td>\n')
# Appliance Wide Results
html_file.write('<table style="width:100%" border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b>Version</b></td>\n')
html_file.write('<td><b>Start Time</b></td>\n')
html_file.write('<td><b>End Time</b></td>\n')
html_file.write('<td><b>Total Test Time</b></td>\n')
html_file.write('<td><b>Total Memory</b></td>\n')
html_file.write('<td><b>Start Used Memory</b></td>\n')
html_file.write('<td><b>End Used Memory</b></td>\n')
html_file.write('<td><b>Used Memory Growth</b></td>\n')
html_file.write('<td><b>Max Used Memory</b></td>\n')
html_file.write('<td><b>Total Tracked Processes</b></td>\n')
html_file.write('</tr>\n')
html_file.write('<td><a href=\'rawdata/appliance.csv\'>{}</a></td>\n'.format(
version_string))
html_file.write('<td>{}</td>\n'.format(start.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(end.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(str(timediff).partition('.')[0]))
html_file.write('<td>{}</td>\n'.format(round(appliance_results[end]['total'], 2)))
html_file.write('<td>{}</td>\n'.format(round(appliance_results[start]['used'], 2)))
html_file.write('<td>{}</td>\n'.format(round(appliance_results[end]['used'], 2)))
html_file.write('<td>{}</td>\n'.format(round(growth, 2)))
html_file.write('<td>{}</td>\n'.format(round(max_used_memory, 2)))
html_file.write('<td>{}</td>\n'.format(total_proc_count))
html_file.write('</table>\n')
# CFME/Miq Worker Results
html_file.write('<table style="width:100%" border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b>Total CFME/Miq Workers</b></td>\n')
html_file.write('<td><b>End Running Workers</b></td>\n')
html_file.write('<td><b>Recycled Workers</b></td>\n')
html_file.write('<td><b>End Total Worker RSS</b></td>\n')
html_file.write('<td><b>End Total Worker PSS</b></td>\n')
html_file.write('<td><b>End Total Worker USS</b></td>\n')
html_file.write('<td><b>End Total Worker VSS</b></td>\n')
html_file.write('<td><b>End Total Worker SWAP</b></td>\n')
html_file.write('</tr>\n')
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
miq_workers, process_results, end)
html_file.write('<tr>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
html_file.write('</table>\n')
# Per Process Summaries:
html_file.write('<table style="width:100%" border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b>Application/Process Group</b></td>\n')
html_file.write('<td><b>Total Processes</b></td>\n')
html_file.write('<td><b>End Running Processes</b></td>\n')
html_file.write('<td><b>Recycled Processes</b></td>\n')
html_file.write('<td><b>End Total Process RSS</b></td>\n')
html_file.write('<td><b>End Total Process PSS</b></td>\n')
html_file.write('<td><b>End Total Process USS</b></td>\n')
html_file.write('<td><b>End Total Process VSS</b></td>\n')
html_file.write('<td><b>End Total Process SWAP</b></td>\n')
html_file.write('</tr>\n')
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
ruby_processes, process_results, end)
t_a_pids = a_pids
t_r_pids = r_pids
tt_rss = t_rss
tt_pss = t_pss
tt_uss = t_uss
tt_vss = t_vss
tt_swap = t_swap
html_file.write('<tr>\n')
html_file.write('<td>ruby</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# memcached Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
['memcached'], process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>memcached</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# Postgres Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
['postgres'], process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>postgres</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# httpd Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(['httpd'],
process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>httpd</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# collectd Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
['collectd'], process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>collectd</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>total</td>\n')
html_file.write('<td>{}</td>\n'.format(t_a_pids + t_r_pids))
html_file.write('<td>{}</td>\n'.format(t_a_pids))
html_file.write('<td>{}</td>\n'.format(t_r_pids))
html_file.write('<td>{}</td>\n'.format(round(tt_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_swap, 2)))
html_file.write('</tr>\n')
html_file.write('</table>\n')
# Appliance Graph
html_file.write('</td></tr><tr><td>\n')
file_name = '{}-appliance_memory.png'.format(version_string)
html_file.write('<img src=\'graphs/{}\'>\n'.format(file_name))
file_name = '{}-appliance_swap.png'.format(version_string)
# Check for swap usage through out time frame:
max_swap_used = 0
for ts in appliance_results:
swap_used = appliance_results[ts]['swap_total'] - appliance_results[ts]['swap_free']
if swap_used > max_swap_used:
max_swap_used = swap_used
if max_swap_used < 10: # Less than 10MiB Max, then hide graph
html_file.write('<br><a href=\'graphs/{}\'>Swap Graph '.format(file_name))
html_file.write('(Hidden, max_swap_used < 10 MiB)</a>\n')
else:
html_file.write('<img src=\'graphs/{}\'>\n'.format(file_name))
html_file.write('</td></tr><tr><td>\n')
# Per Process Results
html_file.write('<table style="width:100%" border="1"><tr>\n')
html_file.write('<td><b>Process Name</b></td>\n')
html_file.write('<td><b>Process Pid</b></td>\n')
html_file.write('<td><b>Start Time</b></td>\n')
html_file.write('<td><b>End Time</b></td>\n')
html_file.write('<td><b>Time Alive</b></td>\n')
html_file.write('<td><b>RSS Mem Start</b></td>\n')
html_file.write('<td><b>RSS Mem End</b></td>\n')
html_file.write('<td><b>RSS Mem Change</b></td>\n')
html_file.write('<td><b>PSS Mem Start</b></td>\n')
html_file.write('<td><b>PSS Mem End</b></td>\n')
html_file.write('<td><b>PSS Mem Change</b></td>\n')
html_file.write('<td><b>CSV</b></td>\n')
html_file.write('</tr>\n')
# By Worker Type Memory Used
for ordered_name in process_order:
if ordered_name in process_results:
for pid in process_results[ordered_name]:
start = list(process_results[ordered_name][pid].keys())[0]
end = list(process_results[ordered_name][pid].keys())[-1]
timediff = end - start
html_file.write('<tr>\n')
if len(process_results[ordered_name]) > 1:
html_file.write('<td><a href=\'#{}\'>{}</a></td>\n'.format(ordered_name,
ordered_name))
html_file.write('<td><a href=\'graphs/{}-{}.png\'>{}</a></td>\n'.format(
ordered_name, pid, pid))
else:
html_file.write('<td>{}</td>\n'.format(ordered_name))
html_file.write('<td><a href=\'#{}-{}.png\'>{}</a></td>\n'.format(
ordered_name, pid, pid))
html_file.write('<td>{}</td>\n'.format(start.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(end.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(str(timediff).partition('.')[0]))
rss_change = process_results[ordered_name][pid][end]['rss'] - \
process_results[ordered_name][pid][start]['rss']
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][start]['rss'], 2)))
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][end]['rss'], 2)))
html_file.write('<td>{}</td>\n'.format(round(rss_change, 2)))
pss_change = process_results[ordered_name][pid][end]['pss'] - \
process_results[ordered_name][pid][start]['pss']
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][start]['pss'], 2)))
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][end]['pss'], 2)))
html_file.write('<td>{}</td>\n'.format(round(pss_change, 2)))
html_file.write('<td><a href=\'rawdata/{}-{}.csv\'>csv</a></td>\n'.format(
pid, ordered_name))
html_file.write('</tr>\n')
else:
logger.debug('Process/Worker not part of test: {}'.format(ordered_name))
html_file.write('</table>\n')
# Worker Graphs
for ordered_name in process_order:
if ordered_name in process_results:
html_file.write('<tr><td>\n')
html_file.write('<div id=\'{}\'>Process name: {}</div><br>\n'.format(
ordered_name, ordered_name))
if len(process_results[ordered_name]) > 1:
file_name = '{}-all.png'.format(ordered_name)
html_file.write('<img id=\'{}\' src=\'graphs/{}\'><br>\n'.format(file_name,
file_name))
else:
for pid in sorted(process_results[ordered_name]):
file_name = '{}-{}.png'.format(ordered_name, pid)
html_file.write('<img id=\'{}\' src=\'graphs/{}\'><br>\n'.format(
file_name, file_name))
html_file.write('</td></tr>\n')
html_file.write('</table>\n')
html_file.write('</body>\n')
html_file.write('</html>\n')
timediff = time.time() - starttime
logger.info('Generated Summary html in: {}'.format(timediff))
def generate_workload_html(directory, ver, scenario_data, provider_names, grafana_urls):
starttime = time.time()
file_name = str(directory.join('workload.html'))
with open(file_name, 'w') as html_file:
html_file.write('<html>\n')
html_file.write('<head><title>{} - {}</title></head>'.format(
scenario_data['test_name'], provider_names))
html_file.write('<body>\n')
html_file.write('<b>CFME {} {} Test Results</b><br>\n'.format(ver,
scenario_data['test_name'].title()))
html_file.write('<b>Appliance Roles:</b> {}<br>\n'.format(
scenario_data['appliance_roles'].replace(',', ', ')))
html_file.write('<b>Provider(s):</b> {}<br>\n'.format(provider_names))
html_file.write('<b><a href=\'https://{}/\' target="_blank">{}</a></b>\n'.format(
scenario_data['appliance_ip'], scenario_data['appliance_name']))
if grafana_urls:
for g_name in sorted(grafana_urls.keys()):
html_file.write(
' : <b><a href=\'{}\' target="_blank">{}</a></b>'.format(grafana_urls[g_name],
g_name))
html_file.write('<br>\n')
html_file.write('<b><a href=\'{}-summary.csv\'>Summary CSV</a></b>'.format(ver))
html_file.write(' : <b><a href=\'index.html\'>Memory Info</a></b>')
html_file.write(' : <b><a href=\'graphs/\'>Graphs directory</a></b>\n')
html_file.write(' : <b><a href=\'rawdata/\'>CSVs directory</a></b><br>\n')
html_file.write('<br><b>Scenario Data: </b><br>\n')
yaml_html = get_scenario_html(scenario_data['scenario'])
html_file.write(yaml_html + '\n')
html_file.write('<br>\n<br>\n<br>\n<b>Quantifier Data: </b>\n<br>\n<br>\n<br>\n<br>\n')
html_file.write('<table border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> System Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
system_path = ('../version_info/system.csv')
html_file.write('<a href="{}" download="System_Versions-{}-{}"> System Versions</a>'
.format(system_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> Process Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
process_path = ('../version_info/processes.csv')
html_file.write('<a href="{}" download="Process_Versions-{}-{}"> Process Versions</a>'
.format(process_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> Ruby Gem Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
gems_path = ('../version_info/gems.csv')
html_file.write('<a href="{}" download="Gem_Versions-{}-{}"> Ruby Gem Versions</a>'
.format(gems_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> RPM Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
rpms_path = ('../version_info/rpms.csv')
html_file.write('<a href="{}" download="RPM_Versions-{}-{}"> RPM Versions</a>'
.format(rpms_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('</table>\n')
html_file.write('</body>\n')
html_file.write('</html>\n')
timediff = time.time() - starttime
logger.info('Generated Workload html in: {}'.format(timediff))
def add_workload_quantifiers(quantifiers, scenario_data):
starttime = time.time()
ver = current_version()
workload_path = results_path.join('{}-{}-{}'.format(test_ts, scenario_data['test_dir'], ver))
directory = workload_path.join(scenario_data['scenario']['name'])
file_name = str(directory.join('workload.html'))
marker = '<b>Quantifier Data: </b>'
yaml_dict = quantifiers
yaml_string = str(json.dumps(yaml_dict, indent=4))
yaml_html = yaml_string.replace('\n', '<br>\n')
with open(file_name, 'r+') as html_file:
line = ''
while marker not in line:
line = html_file.readline()
marker_pos = html_file.tell()
remainder = html_file.read()
html_file.seek(marker_pos)
html_file.write('{} \n'.format(yaml_html))
html_file.write(remainder)
timediff = time.time() - starttime
logger.info('Added quantifiers in: {}'.format(timediff))
def get_scenario_html(scenario_data):
scenario_dict = create_dict(scenario_data)
scenario_yaml = yaml.safe_dump(scenario_dict)
scenario_html = scenario_yaml.replace('\n', '<br>\n')
scenario_html = scenario_html.replace(', ', '<br>\n - ')
scenario_html = scenario_html.replace(' ', ' ')
scenario_html = scenario_html.replace('[', '<br>\n - ')
scenario_html = scenario_html.replace(']', '\n')
return scenario_html
def create_dict(attr_dict):
main_dict = dict(attr_dict)
for key, value in main_dict.items():
if type(value) == AttrDict:
main_dict[key] = create_dict(value)
return main_dict
def graph_appliance_measurements(graphs_path, ver, appliance_results, use_slab, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
from cycler import cycler
starttime = time.time()
dates = list(appliance_results.keys())
total_memory_list = list(appliance_results[ts]['total']
for ts in appliance_results.keys())
free_memory_list = list(appliance_results[ts]['free']
for ts in appliance_results.keys())
used_memory_list = list(appliance_results[ts]['used']
for ts in appliance_results.keys())
buffers_memory_list = list(appliance_results[ts]['buffers']
for ts in appliance_results.keys())
cache_memory_list = list(appliance_results[ts]['cached']
for ts in appliance_results.keys())
slab_memory_list = list(appliance_results[ts]['slab']
for ts in appliance_results.keys())
swap_total_list = list(appliance_results[ts]['swap_total']
for ts in appliance_results.keys())
swap_free_list = list(appliance_results[ts]['swap_free']
for ts in appliance_results.keys())
# Stack Plot Memory Usage
file_name = graphs_path.join('{}-appliance_memory.png'.format(ver))
mpl.rcParams['axes.prop_cycle'] = cycler('color', ['firebrick', 'coral', 'steelblue',
'forestgreen'])
fig, ax = plt.subplots()
plt.title('Provider(s): {}\nAppliance Memory'.format(provider_names))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
if use_slab:
y = [used_memory_list, slab_memory_list, cache_memory_list, free_memory_list]
else:
y = [used_memory_list, buffers_memory_list, cache_memory_list, free_memory_list]
plt.stackplot(dates, *y, baseline='zero')
ax.annotate(str(round(total_memory_list[0], 2)), xy=(dates[0], total_memory_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(total_memory_list[-1], 2)), xy=(dates[-1], total_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
if use_slab:
ax.annotate(str(round(slab_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] +
slab_memory_list[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(slab_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1] +
slab_memory_list[-1]), xytext=(4, -4), textcoords='offset points')
ax.annotate(str(round(cache_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] +
slab_memory_list[0] + cache_memory_list[0]), xytext=(4, 4),
textcoords='offset points')
ax.annotate(str(round(cache_memory_list[-1], 2)), xy=(
dates[-1], used_memory_list[-1] + slab_memory_list[-1] + cache_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
else:
ax.annotate(str(round(buffers_memory_list[0], 2)), xy=(
dates[0], used_memory_list[0] + buffers_memory_list[0]), xytext=(4, 4),
textcoords='offset points')
ax.annotate(str(round(buffers_memory_list[-1], 2)), xy=(dates[-1],
used_memory_list[-1] + buffers_memory_list[-1]), xytext=(4, -4),
textcoords='offset points')
ax.annotate(str(round(cache_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] +
buffers_memory_list[0] + cache_memory_list[0]), xytext=(4, 4),
textcoords='offset points')
ax.annotate(str(round(cache_memory_list[-1], 2)), xy=(
dates[-1], used_memory_list[-1] + buffers_memory_list[-1] + cache_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
ax.annotate(str(round(used_memory_list[0], 2)), xy=(dates[0], used_memory_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(used_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
p1 = plt.Rectangle((0, 0), 1, 1, fc='firebrick')
p2 = plt.Rectangle((0, 0), 1, 1, fc='coral')
p3 = plt.Rectangle((0, 0), 1, 1, fc='steelblue')
p4 = plt.Rectangle((0, 0), 1, 1, fc='forestgreen')
if use_slab:
ax.legend([p1, p2, p3, p4], ['Used', 'Slab', 'Cached', 'Free'],
bbox_to_anchor=(1.45, 0.22), fancybox=True)
else:
ax.legend([p1, p2, p3, p4], ['Used', 'Buffers', 'Cached', 'Free'],
bbox_to_anchor=(1.45, 0.22), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
# Stack Plot Swap usage
mpl.rcParams['axes.prop_cycle'] = cycler('color', ['firebrick', 'forestgreen'])
file_name = graphs_path.join('{}-appliance_swap.png'.format(ver))
fig, ax = plt.subplots()
plt.title('Provider(s): {}\nAppliance Swap'.format(provider_names))
plt.xlabel('Date / Time')
plt.ylabel('Swap (MiB)')
swap_used_list = [t - f for f, t in zip(swap_free_list, swap_total_list)]
y = [swap_used_list, swap_free_list]
plt.stackplot(dates, *y, baseline='zero')
ax.annotate(str(round(swap_total_list[0], 2)), xy=(dates[0], swap_total_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_total_list[-1], 2)), xy=(dates[-1], swap_total_list[-1]),
xytext=(4, -4), textcoords='offset points')
ax.annotate(str(round(swap_used_list[0], 2)), xy=(dates[0], swap_used_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_used_list[-1], 2)), xy=(dates[-1], swap_used_list[-1]),
xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
p1 = plt.Rectangle((0, 0), 1, 1, fc='firebrick')
p2 = plt.Rectangle((0, 0), 1, 1, fc='forestgreen')
ax.legend([p1, p2], ['Used Swap', 'Free Swap'], bbox_to_anchor=(1.45, 0.22), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
# Reset Colors
mpl.rcdefaults()
timediff = time.time() - starttime
logger.info('Plotted Appliance Memory in: {}'.format(timediff))
def graph_all_miq_workers(graph_file_path, process_results, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
starttime = time.time()
file_name = graph_file_path.join('all-processes.png')
fig, ax = plt.subplots()
plt.title('Provider(s): {}\nAll Workers/Monitored Processes'.format(provider_names))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
for process_name in process_results:
if 'Worker' in process_name or 'Handler' in process_name or 'Catcher' in process_name:
for process_pid in process_results[process_name]:
dates = list(process_results[process_name][process_pid].keys())
rss_samples = list(process_results[process_name][process_pid][ts]['rss']
for ts in process_results[process_name][process_pid].keys())
vss_samples = list(process_results[process_name][process_pid][ts]['vss']
for ts in process_results[process_name][process_pid].keys())
plt.plot(dates, rss_samples, linewidth=1, label='{} {} RSS'.format(process_pid,
process_name))
plt.plot(dates, vss_samples, linewidth=1, label='{} {} VSS'.format(
process_pid, process_name))
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
timediff = time.time() - starttime
logger.info('Plotted All Type/Process Memory in: {}'.format(timediff))
def graph_individual_process_measurements(graph_file_path, process_results, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
starttime = time.time()
for process_name in process_results:
for process_pid in process_results[process_name]:
file_name = graph_file_path.join('{}-{}.png'.format(process_name, process_pid))
dates = list(process_results[process_name][process_pid].keys())
rss_samples = list(process_results[process_name][process_pid][ts]['rss']
for ts in process_results[process_name][process_pid].keys())
pss_samples = list(process_results[process_name][process_pid][ts]['pss']
for ts in process_results[process_name][process_pid].keys())
uss_samples = list(process_results[process_name][process_pid][ts]['uss']
for ts in process_results[process_name][process_pid].keys())
vss_samples = list(process_results[process_name][process_pid][ts]['vss']
for ts in process_results[process_name][process_pid].keys())
swap_samples = list(process_results[process_name][process_pid][ts]['swap']
for ts in process_results[process_name][process_pid].keys())
fig, ax = plt.subplots()
plt.title('Provider(s)/Size: {}\nProcess/Worker: {}\nPID: {}'.format(provider_names,
process_name, process_pid))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
plt.plot(dates, rss_samples, linewidth=1, label='RSS')
plt.plot(dates, pss_samples, linewidth=1, label='PSS')
plt.plot(dates, uss_samples, linewidth=1, label='USS')
plt.plot(dates, vss_samples, linewidth=1, label='VSS')
plt.plot(dates, swap_samples, linewidth=1, label='Swap')
if rss_samples:
ax.annotate(str(round(rss_samples[0], 2)), xy=(dates[0], rss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(rss_samples[-1], 2)), xy=(dates[-1], rss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if pss_samples:
ax.annotate(str(round(pss_samples[0], 2)), xy=(dates[0], pss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(pss_samples[-1], 2)), xy=(dates[-1], pss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if uss_samples:
ax.annotate(str(round(uss_samples[0], 2)), xy=(dates[0], uss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(uss_samples[-1], 2)), xy=(dates[-1], uss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if vss_samples:
ax.annotate(str(round(vss_samples[0], 2)), xy=(dates[0], vss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(vss_samples[-1], 2)), xy=(dates[-1], vss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if swap_samples:
ax.annotate(str(round(swap_samples[0], 2)), xy=(dates[0], swap_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_samples[-1], 2)), xy=(dates[-1], swap_samples[-1]),
xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
timediff = time.time() - starttime
logger.info('Plotted Individual Process Memory in: {}'.format(timediff))
def graph_same_miq_workers(graph_file_path, process_results, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
starttime = time.time()
for process_name in process_results:
if len(process_results[process_name]) > 1:
logger.debug('Plotting {} {} processes on single graph.'.format(
len(process_results[process_name]), process_name))
file_name = graph_file_path.join('{}-all.png'.format(process_name))
fig, ax = plt.subplots()
pids = 'PIDs: '
for i, pid in enumerate(process_results[process_name], 1):
pids = '{}{}'.format(pids, '{},{}'.format(pid, [' ', '\n'][i % 6 == 0]))
pids = pids[0:-2]
plt.title('Provider: {}\nProcess/Worker: {}\n{}'.format(provider_names,
process_name, pids))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
for process_pid in process_results[process_name]:
dates = list(process_results[process_name][process_pid].keys())
rss_samples = list(process_results[process_name][process_pid][ts]['rss']
for ts in process_results[process_name][process_pid].keys())
pss_samples = list(process_results[process_name][process_pid][ts]['pss']
for ts in process_results[process_name][process_pid].keys())
uss_samples = list(process_results[process_name][process_pid][ts]['uss']
for ts in process_results[process_name][process_pid].keys())
vss_samples = list(process_results[process_name][process_pid][ts]['vss']
for ts in process_results[process_name][process_pid].keys())
swap_samples = list(process_results[process_name][process_pid][ts]['swap']
for ts in process_results[process_name][process_pid].keys())
plt.plot(dates, rss_samples, linewidth=1, label='{} RSS'.format(process_pid))
plt.plot(dates, pss_samples, linewidth=1, label='{} PSS'.format(process_pid))
plt.plot(dates, uss_samples, linewidth=1, label='{} USS'.format(process_pid))
plt.plot(dates, vss_samples, linewidth=1, label='{} VSS'.format(process_pid))
plt.plot(dates, swap_samples, linewidth=1, label='{} SWAP'.format(process_pid))
if rss_samples:
ax.annotate(str(round(rss_samples[0], 2)), xy=(dates[0], rss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(rss_samples[-1], 2)), xy=(dates[-1],
rss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if pss_samples:
ax.annotate(str(round(pss_samples[0], 2)), xy=(dates[0],
pss_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(pss_samples[-1], 2)), xy=(dates[-1],
pss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if uss_samples:
ax.annotate(str(round(uss_samples[0], 2)), xy=(dates[0],
uss_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(uss_samples[-1], 2)), xy=(dates[-1],
uss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if vss_samples:
ax.annotate(str(round(vss_samples[0], 2)), xy=(dates[0],
vss_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(vss_samples[-1], 2)), xy=(dates[-1],
vss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if swap_samples:
ax.annotate(str(round(swap_samples[0], 2)), xy=(dates[0],
swap_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_samples[-1], 2)), xy=(dates[-1],
swap_samples[-1]), xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
timediff = time.time() - starttime
logger.info('Plotted Same Type/Process Memory in: {}'.format(timediff))
def summary_csv_measurement_dump(csv_file, process_results, measurement):
csv_file.write('---------------------------------------------\n')
csv_file.write('Per Process {} Memory Usage\n'.format(measurement.upper()))
csv_file.write('---------------------------------------------\n')
csv_file.write('Process/Worker Type,PID,Start of test,End of test\n')
for ordered_name in process_order:
if ordered_name in process_results:
for process_pid in sorted(process_results[ordered_name]):
start = list(process_results[ordered_name][process_pid].keys())[0]
end = list(process_results[ordered_name][process_pid].keys())[-1]
csv_file.write('{},{},{},{}\n'.format(ordered_name, process_pid,
round(process_results[ordered_name][process_pid][start][measurement], 2),
round(process_results[ordered_name][process_pid][end][measurement], 2)))
| gpl-2.0 |
scikit-optimize/scikit-optimize.github.io | 0.7/_downloads/fafa416932f350631f99d023396799bd/sklearn-gridsearchcv-replacement.py | 1 | 6397 | """
==========================================
Scikit-learn hyperparameter search wrapper
==========================================
Iaroslav Shcherbatyi, Tim Head and Gilles Louppe. June 2017.
Reformatted by Holger Nahrstaedt 2020
.. currentmodule:: skopt
Introduction
============
This example assumes basic familiarity with
`scikit-learn <http://scikit-learn.org/stable/index.html>`_.
Search for parameters of machine learning models that result in best
cross-validation performance is necessary in almost all practical
cases to get a model with best generalization estimate. A standard
approach in scikit-learn is using :obj:`sklearn.model_selection.GridSearchCV` class, which takes
a set of values for every parameter to try, and simply enumerates all
combinations of parameter values. The complexity of such search grows
exponentially with the addition of new parameters. A more scalable
approach is using :obj:`sklearn.model_selection.RandomizedSearchCV`, which however does not take
advantage of the structure of a search space.
Scikit-optimize provides a drop-in replacement for :obj:`sklearn.model_selection.GridSearchCV`,
which utilizes Bayesian Optimization where a predictive model referred
to as "surrogate" is used to model the search space and utilized to
arrive at good parameter values combination as soon as possible.
Note: for a manual hyperparameter optimization example, see
"Hyperparameter Optimization" notebook.
"""
print(__doc__)
import numpy as np
#############################################################################
# Minimal example
# ===============
#
# A minimal example of optimizing hyperparameters of SVC (Support Vector machine Classifier) is given below.
from skopt import BayesSearchCV
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
X, y = load_digits(10, True)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75, test_size=.25, random_state=0)
# log-uniform: understand as search over p = exp(x) by varying x
opt = BayesSearchCV(
SVC(),
{
'C': (1e-6, 1e+6, 'log-uniform'),
'gamma': (1e-6, 1e+1, 'log-uniform'),
'degree': (1, 8), # integer valued parameter
'kernel': ['linear', 'poly', 'rbf'], # categorical parameter
},
n_iter=32,
cv=3
)
opt.fit(X_train, y_train)
print("val. score: %s" % opt.best_score_)
print("test score: %s" % opt.score(X_test, y_test))
#############################################################################
# Advanced example
# ================
#
# In practice, one wants to enumerate over multiple predictive model classes,
# with different search spaces and number of evaluations per class. An
# example of such search over parameters of Linear SVM, Kernel SVM, and
# decision trees is given below.
from skopt import BayesSearchCV
from skopt.space import Real, Categorical, Integer
from sklearn.datasets import load_digits
from sklearn.svm import LinearSVC, SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
X, y = load_digits(10, True)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# pipeline class is used as estimator to enable
# search over different model types
pipe = Pipeline([
('model', SVC())
])
# single categorical value of 'model' parameter is
# sets the model class
# We will get ConvergenceWarnings because the problem is not well-conditioned.
# But that's fine, this is just an example.
linsvc_search = {
'model': [LinearSVC(max_iter=1000)],
'model__C': (1e-6, 1e+6, 'log-uniform'),
}
# explicit dimension classes can be specified like this
svc_search = {
'model': Categorical([SVC()]),
'model__C': Real(1e-6, 1e+6, prior='log-uniform'),
'model__gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'model__degree': Integer(1,8),
'model__kernel': Categorical(['linear', 'poly', 'rbf']),
}
opt = BayesSearchCV(
pipe,
[(svc_search, 20), (linsvc_search, 16)], # (parameter space, # of evaluations)
cv=3
)
opt.fit(X_train, y_train)
print("val. score: %s" % opt.best_score_)
print("test score: %s" % opt.score(X_test, y_test))
#############################################################################
# Progress monitoring and control using `callback` argument of `fit` method
# =========================================================================
#
# It is possible to monitor the progress of :class:`BayesSearchCV` with an event
# handler that is called on every step of subspace exploration. For single job
# mode, this is called on every evaluation of model configuration, and for
# parallel mode, this is called when n_jobs model configurations are evaluated
# in parallel.
#
# Additionally, exploration can be stopped if the callback returns `True`.
# This can be used to stop the exploration early, for instance when the
# accuracy that you get is sufficiently high.
#
# An example usage is shown below.
from skopt import BayesSearchCV
from sklearn.datasets import load_iris
from sklearn.svm import SVC
X, y = load_iris(True)
searchcv = BayesSearchCV(
SVC(gamma='scale'),
search_spaces={'C': (0.01, 100.0, 'log-uniform')},
n_iter=10,
cv=3
)
# callback handler
def on_step(optim_result):
score = searchcv.best_score_
print("best score: %s" % score)
if score >= 0.98:
print('Interrupting!')
return True
searchcv.fit(X, y, callback=on_step)
#############################################################################
# Counting total iterations that will be used to explore all subspaces
# ====================================================================
#
# Subspaces in previous examples can further increase in complexity if you add
# new model subspaces or dimensions for feature extraction pipelines. For
# monitoring of progress, you would like to know the total number of
# iterations it will take to explore all subspaces. This can be
# calculated with `total_iterations` property, as in the code below.
from skopt import BayesSearchCV
from sklearn.datasets import load_iris
from sklearn.svm import SVC
X, y = load_iris(True)
searchcv = BayesSearchCV(
SVC(),
search_spaces=[
({'C': (0.1, 1.0)}, 19), # 19 iterations for this subspace
{'gamma':(0.1, 1.0)}
],
n_iter=23
)
print(searchcv.total_iterations)
| bsd-3-clause |
samzhang111/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
rstebbing/subdivision-regression | python/generate_example_doosabin_regression_problem.py | 1 | 3248 | #########################################################
# File: generate_example_doosabin_regression_problem.py #
# Copyright Richard Stebbing 2015. #
# Distributed under the MIT License. #
# (See accompany file LICENSE or copy at #
# http://opensource.org/licenses/MIT) #
#########################################################
# Imports
import argparse
import numpy as np
import json
from sklearn.neighbors import NearestNeighbors
# Requires `subdivision`.
from subdivision import doosabin
# Requires `rscommon`.
from rscommon.face_array import sequence_to_raw_face_array
# main
def main():
parser = argparse.ArgumentParser()
parser.add_argument('num_data_points', type=int)
parser.add_argument('output_path')
parser.add_argument('--radius-std-dev', type=float, default=0.05)
parser.add_argument('--hemisphere', default=False, action='store_true')
parser.add_argument('--num-subdivisions', type=int, default=0)
parser.add_argument('--initialisation-sample-density', type=int,
default=16)
parser.add_argument('--seed', type=int)
args = parser.parse_args()
# Generate matrix `Y` of points uniformly sampled on a sphere then
# displaced radially with zero-mean Gaussian noise.
if args.seed is not None:
np.random.seed(args.seed)
Y = np.random.randn(args.num_data_points * 3).reshape(-1, 3)
n = np.linalg.norm(Y, axis=1)
n[n <= 0.0] = 1.0
r = (1.0 + args.radius_std_dev * np.random.randn(args.num_data_points))
Y *= (r / n)[:, np.newaxis]
if args.hemisphere:
Y[Y[:, 2] < 0.0, 2] *= -1.0
# Initial mesh and geometry is the cube from:
# subdivision/doosabin/examples/visualise_doosabin_subdivision.py.
T = [[0, 1, 3, 2],
[4, 6, 7, 5],
[1, 5, 7, 3],
[6, 4, 0, 2],
[0, 4, 5, 1],
[3, 7, 6, 2]]
X = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1]], dtype=np.float64)
X -= np.mean(X, axis=0)
num_subdivisions = max(args.num_subdivisions,
doosabin.is_initial_subdivision_required(T))
for i in xrange(num_subdivisions):
T, X = doosabin.subdivide(T, X)
# Initialise preimages.
surface = doosabin.surface(T)
pd, Ud, Td = surface.uniform_parameterisation(
args.initialisation_sample_density)
M = surface.M(pd, Ud, X)
nn = NearestNeighbors(n_neighbors=1)
nn.fit(M)
i = nn.kneighbors(Y, return_distance=False).ravel()
p, U = pd[i], Ud[i]
# Output.
z = {}
def z_setitem(k, A):
z[k] = np.asarray(A).ravel().tolist()
z_setitem('Y', Y)
z_setitem('raw_face_array', sequence_to_raw_face_array(T))
z_setitem('X', X)
z_setitem('p', p)
z_setitem('U', U)
print 'Output:', args.output_path
with open(args.output_path, 'wb') as fp:
json.dump(z, fp, indent=4)
if __name__ == '__main__':
main()
| mit |
plissonf/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/tests/test_multiclass.py | 5 | 26249 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal, assert_raises_regex
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from sklearn.utils import shuffle
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression,
SGDClassifier)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
assert_raise_message(ValueError, msg, check_classification_targets, y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_partial_fit():
# Test if partial_fit is working as intented
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(y)))
assert_greater(np.mean(y == pred), 0.65)
# Test when mini batches doesn't have all classes
# with SGDClassifier
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr = OneVsRestClassifier(SGDClassifier(n_iter=1, shuffle=False,
random_state=0))
ovr.partial_fit(X[:7], y[:7], np.unique(y))
ovr.partial_fit(X[7:], y[7:])
pred = ovr.predict(X)
ovr1 = OneVsRestClassifier(SGDClassifier(n_iter=1, shuffle=False,
random_state=0))
pred1 = ovr1.fit(X, y).predict(X)
assert_equal(np.mean(pred == y), np.mean(pred1 == y))
def test_ovr_partial_fit_exceptions():
ovr = OneVsRestClassifier(MultinomialNB())
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr.partial_fit(X[:7], y[:7], np.unique(y))
# A new class value which was not in the first call of partial_fit
# It should raise ValueError
y1 = [5] + y[7:-1]
assert_raises_regex(ValueError, "Mini-batch contains \[.+\] while classes"
" must be subset of \[.+\]",
ovr.partial_fit, X=X[7:], y=y1)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_
# function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
iris_data_list = [list(a) for a in iris.data]
prediction_from_list = ovo.fit(iris_data_list,
list(iris.target)).predict(iris_data_list)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_partial_fit_predict():
X, y = shuffle(iris.data, iris.target)
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert_equal(len(ovo1.estimators_), n_classes * (n_classes - 1) / 2)
assert_greater(np.mean(y == pred1), 0.65)
assert_almost_equal(pred1, pred2)
# Test when mini-batches don't have all target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovo1.partial_fit(iris.data[60:], iris.target[60:])
pred1 = ovo1.predict(iris.data)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred1, pred2)
assert_equal(len(ovo1.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred1), 0.65)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_pairwise_indices():
clf_precomputed = svm.SVC(kernel='precomputed')
X, y = iris.data, iris.target
ovr_false = OneVsOneClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
ovr_false.fit(linear_kernel, y)
n_estimators = len(ovr_false.estimators_)
precomputed_indices = ovr_false.pairwise_indices_
for idx in precomputed_indices:
assert_equal(idx.shape[0] * n_estimators / (n_estimators - 1),
linear_kernel.shape[0])
def test_pairwise_attribute():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert_false(ovr_false._pairwise)
ovr_true = MultiClassClassifier(clf_precomputed)
assert_true(ovr_true._pairwise)
def test_pairwise_cross_val_score():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC(kernel='linear')
X, y = iris.data, iris.target
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
ovr_true = MultiClassClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(ovr_true, linear_kernel, y)
score_linear = cross_val_score(ovr_false, X, y)
assert_array_equal(score_precomputed, score_linear)
| mit |
Adai0808/scikit-learn | sklearn/linear_model/tests/test_base.py | 101 | 12205 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data, _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_linear_regression_sample_weights():
rng = np.random.RandomState(0)
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
clf = LinearRegression()
clf.fit(X, y, sample_weight)
coefs1 = clf.coef_
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_greater(clf.score(X, y), 0.9)
assert_array_almost_equal(clf.predict(X), y)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
scaled_y = y * np.sqrt(sample_weight)
scaled_X = X * np.sqrt(sample_weight)[:, np.newaxis]
clf.fit(X, y)
coefs2 = clf.coef_
assert_array_almost_equal(coefs1, coefs2)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
clf = LinearRegression()
# make sure the "OK" sample weights actually work
clf.fit(X, y, sample_weights_OK)
clf.fit(X, y, sample_weights_OK_1)
clf.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
rng = np.random.RandomState(0)
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
| bsd-3-clause |
mikebenfield/scikit-learn | sklearn/naive_bayes.py | 20 | 30830 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array, check_consistent_length
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like, shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB(priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB(priors=None)
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, priors=None):
self.priors = priors
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool, optional (default=False)
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
n_classes = len(self.classes_)
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if priors.sum() != 1.0:
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_) -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes], optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional (default=0.0)
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,], optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
HolgerPeters/scikit-learn | sklearn/semi_supervised/label_propagation.py | 39 | 16726 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semi-supervised classification algorithms. At a high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supports RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# License: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.graph import graph_laplacian
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
elif callable(self.kernel):
if y is None:
return self.kernel(X, X)
else:
return self.kernel(X, y)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" or an explicit function "
" are supported at this time." % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix.
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propagation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
adsass/wwt-frontend | cats/merge.py | 1 | 1120 | from glob import glob
import pandas as pd
import numpy as np
import json
def ten(x):
denom = 1.
result = 0
neg = -1 if x[0].startswith('-') else 1
if len(x) == 1:
return float(x[0])
if len(x) == 2:
return float(x[0]) + float(x[1]) / 60 * neg
return float(x[0]) + float(x[1]) / 60 * neg + float(x[2]) / 3600 * neg
files = glob('Npix*tsv')
#some tiles are empty
dne = set('Npix%i.tsv' % i for i in
[1037, 1073, 1973, 2661, 2922, 2933, 2997, 3005, 38, 389])
files = [f for f in files if f not in dne]
tables = [pd.read_csv(f, delimiter='\t')
for f in files]
table = pd.concat(tables, ignore_index=True)
ra = table['RA'].str.split(' ')
dec = table['DEC'].str.split(' ')
ra = np.array([ten(r) for r in ra])
dec = np.array([ten(d) for d in dec])
table['RA'] = ra
table['DEC'] = dec
table = table.dropna(subset=['RA', 'DEC'])
table = table[['MAIN_ID', 'RA', 'DEC']]
data = [dict(name=row['MAIN_ID'], ra=row['RA'] * 15, dec=row['DEC'])
for i, row in table.iterrows()]
with open('merged.json', 'w') as outfile:
json.dump(data, outfile, indent=1)
| mit |
Akshay0724/scikit-learn | sklearn/model_selection/tests/test_split.py | 12 | 47658 | """Test the split module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
from scipy import stats
from scipy.misc import comb
from itertools import combinations
from itertools import combinations_with_replacement
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import _num_samples
from sklearn.utils.mocking import MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import check_cv
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge
from sklearn.model_selection._split import _validate_shuffle_split
from sklearn.model_selection._split import _CVIterableWrapper
from sklearn.model_selection._split import _build_repr
from sklearn.datasets import load_digits
from sklearn.datasets import make_classification
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.svm import SVC
X = np.ones(10)
y = np.arange(10) // 2
P_sparse = coo_matrix(np.eye(5))
test_groups = (
np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'])
digits = load_digits()
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
@ignore_warnings
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, test_size=0.1, "
"train_size=None)")
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits,
n_unique_groups, comb(n_unique_groups, p),
n_shuffle_splits, 2]
for i, (cv, cv_repr) in enumerate(zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps],
[loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr,
ss_repr, ps_repr])):
# Test if get_n_splits works correctly
assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups))
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(list(cv.split(X, y, groups)),
list(cv.split(X_1d, y, groups)))
# Test that train, test indices returned are integers
for train, test in cv.split(X, y, groups):
assert_equal(np.asarray(train).dtype.kind, 'i')
assert_equal(np.asarray(train).dtype.kind, 'i')
# Test if the repr works without any errors
assert_equal(cv_repr, repr(cv))
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, X, y, groups, expected_n_splits=None):
n_samples = _num_samples(X)
# Check that a all the samples appear at least once in a test fold
if expected_n_splits is not None:
assert_equal(cv.get_n_splits(X, y, groups), expected_n_splits)
else:
expected_n_splits = cv.get_n_splits(X, y, groups)
collected_test_samples = set()
iterations = 0
for train, test in cv.split(X, y, groups):
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_splits)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
X1 = np.array([[1, 2], [3, 4], [5, 6]])
X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, next, KFold(4).split(X1))
# Check that a warning is raised if the least populated class has too few
# members.
y = np.array([3, 3, -1, -1, 3])
skf_3 = StratifiedKFold(3)
assert_warns_message(Warning, "The least populated class",
next, skf_3.split(X2, y))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
with warnings.catch_warnings():
warnings.simplefilter("ignore")
check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3)
# Check that errors are raised if all n_groups for individual
# classes are less than n_splits.
y = np.array([3, 3, -1, -1, 2])
assert_raises(ValueError, next, skf_3.split(X2, y))
# Error when number of folds is <= 1
assert_raises(ValueError, KFold, 0)
assert_raises(ValueError, KFold, 1)
error_string = ("k-fold cross-validation requires at least one"
" train/test split")
assert_raise_message(ValueError, error_string,
StratifiedKFold, 0)
assert_raise_message(ValueError, error_string,
StratifiedKFold, 1)
# When n_splits is not integer:
assert_raises(ValueError, KFold, 1.5)
assert_raises(ValueError, KFold, 2.0)
assert_raises(ValueError, StratifiedKFold, 1.5)
assert_raises(ValueError, StratifiedKFold, 2.0)
# When shuffle is not a bool:
assert_raises(TypeError, KFold, n_splits=4, shuffle=None)
def test_kfold_indices():
# Check all indices are returned in the test folds
X1 = np.ones(18)
kf = KFold(3)
check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
X2 = np.ones(17)
kf = KFold(3)
check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3)
# Check if get_n_splits returns the number of folds
assert_equal(5, KFold(5).get_n_splits(X2))
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
splits = KFold(2).split(X2[:-1])
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = KFold(2).split(X2)
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
X, y = np.ones(4), [1, 1, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
# Check if get_n_splits returns the number of folds
assert_equal(5, StratifiedKFold(5).get_n_splits(X, y))
# Make sure string labels are also supported
X = np.ones(7)
y1 = ['1', '1', '1', '0', '0', '0', '0']
y2 = [1, 1, 1, 0, 0, 0, 0]
np.testing.assert_equal(
list(StratifiedKFold(2).split(X, y1)),
list(StratifiedKFold(2).split(X, y2)))
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves class ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
X = np.ones(n_samples)
y = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in (False, True):
for train, test in StratifiedKFold(5, shuffle=shuffle).split(X, y):
assert_almost_equal(np.sum(y[train] == 4) / len(train), 0.10, 2)
assert_almost_equal(np.sum(y[train] == 0) / len(train), 0.89, 2)
assert_almost_equal(np.sum(y[train] == 1) / len(train), 0.01, 2)
assert_almost_equal(np.sum(y[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(y[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(y[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for i in range(11, 17):
kf = KFold(5).split(X=np.ones(i))
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
X = np.ones(17)
y = [0] * 3 + [1] * 14
for shuffle in (True, False):
cv = StratifiedKFold(3, shuffle=shuffle)
for i in range(11, 17):
skf = cv.split(X[:i], y[:i])
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_shuffle_kfold():
# Check the indices are shuffled properly
kf = KFold(3)
kf2 = KFold(3, shuffle=True, random_state=0)
kf3 = KFold(3, shuffle=True, random_state=1)
X = np.ones(300)
all_folds = np.zeros(300)
for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
kf.split(X), kf2.split(X), kf3.split(X)):
for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
# Assert that there is no complete overlap
assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1))
# Set all test indices in successive iterations of kf2 to 1
all_folds[te2] = 1
# Check that all indices are returned in the different test folds
assert_equal(sum(all_folds), 300)
def test_shuffle_kfold_stratifiedkfold_reproducibility():
# Check that when the shuffle is True multiple split calls produce the
# same split when random_state is set
X = np.ones(15) # Divisible by 3
y = [0] * 7 + [1] * 8
X2 = np.ones(16) # Not divisible by 3
y2 = [0] * 8 + [1] * 8
kf = KFold(3, shuffle=True, random_state=0)
skf = StratifiedKFold(3, shuffle=True, random_state=0)
for cv in (kf, skf):
np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y)))
np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2)))
kf = KFold(3, shuffle=True)
skf = StratifiedKFold(3, shuffle=True)
for cv in (kf, skf):
for data in zip((X, X2), (y, y2)):
try:
np.testing.assert_equal(list(cv.split(*data)),
list(cv.split(*data)))
except AssertionError:
pass
else:
raise AssertionError("The splits for data, %s, are same even "
"when random state is not set" % data)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
X_40 = np.ones(40)
y = [0] * 20 + [1] * 20
kf0 = StratifiedKFold(5, shuffle=True, random_state=0)
kf1 = StratifiedKFold(5, shuffle=True, random_state=1)
for (_, test0), (_, test1) in zip(kf0.split(X_40, y),
kf1.split(X_40, y)):
assert_not_equal(set(test0), set(test1))
check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact by computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.93) than that the non
# shuffling variant (around 0.81).
X, y = digits.data[:600], digits.target[:600]
model = SVC(C=10, gamma=0.005)
n_splits = 3
cv = KFold(n_splits=n_splits, shuffle=False)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.92, mean_score)
assert_greater(mean_score, 0.80)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = KFold(n_splits, shuffle=True, random_state=0)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
cv = KFold(n_splits, shuffle=True, random_state=1)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = StratifiedKFold(n_splits)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.93, mean_score)
assert_greater(mean_score, 0.80)
def test_shuffle_split():
ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X)
ss2 = ShuffleSplit(test_size=2, random_state=0).split(X)
ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X)
for typ in six.integer_types:
ss4 = ShuffleSplit(test_size=typ(2), random_state=0).split(X)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
X = np.arange(7)
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.2).split(X, y))
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 3, 2).split(X, y))
X = np.arange(9)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6)
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 8, 0.6).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.6, 8).split(X, y))
# Train size or test size too small
assert_raises(ValueError, next,
StratifiedShuffleSplit(train_size=2).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(test_size=2).split(X, y))
def test_stratified_shuffle_split_respects_test_size():
y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2])
test_size = 5
train_size = 10
sss = StratifiedShuffleSplit(6, test_size=test_size, train_size=train_size,
random_state=0).split(np.ones(len(y)), y)
for train, test in sss:
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50),
np.concatenate([[i] * (100 + i) for i in range(11)]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'],
]
for y in ys:
sss = StratifiedShuffleSplit(6, test_size=0.33,
random_state=0).split(np.ones(len(y)), y)
y = np.asanyarray(y) # To make it indexable for y[train]
# this is how test-size is computed internally
# in _validate_shuffle_split
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_splits = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
prob = bf.pmf(count)
assert_true(prob > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
groups = np.array((n_samples // 2) * [0, 1])
splits = StratifiedShuffleSplit(n_splits=n_splits,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits_actual = 0
for train, test in splits.split(X=np.ones(n_samples), y=groups):
n_splits_actual += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits_actual, n_splits)
n_train, n_test = _validate_shuffle_split(
n_samples, test_size=1. / n_folds, train_size=1. - (1. / n_folds))
assert_equal(len(train), n_train)
assert_equal(len(test), n_test)
assert_equal(len(set(train).intersection(test)), 0)
group_counts = np.unique(groups)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(n_train + n_test, len(groups))
assert_equal(len(group_counts), 2)
ex_test_p = float(n_test) / n_samples
ex_train_p = float(n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
y = [0, 1, 2, 3] * 3 + [4, 5] * 5
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1,
test_size=0.5, random_state=0)
train, test = next(iter(sss.split(X=X, y=y)))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = PredefinedSplit(folds)
# n_splits is simply the no of unique folds
assert_equal(len(np.unique(folds)), ps.get_n_splits())
for train_ind, test_ind in ps.split():
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_group_shuffle_split():
for groups_i in test_groups:
X = y = np.ones(len(groups_i))
n_splits = 6
test_size = 1./3
slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(slo.get_n_splits(X, y, groups=groups_i), n_splits)
l_unique = np.unique(groups_i)
l = np.asarray(groups_i)
for train, test in slo.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
l_train_unique = np.unique(l[train])
l_test_unique = np.unique(l[test])
assert_false(np.any(np.in1d(l[train], l_test_unique)))
assert_false(np.any(np.in1d(l[test], l_train_unique)))
# Second test: train and test add up to all the data
assert_equal(l[train].size + l[test].size, l.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test:
# unique train and test groups are correct, +- 1 for rounding error
assert_true(abs(len(l_test_unique) -
round(test_size * len(l_unique))) <= 1)
assert_true(abs(len(l_train_unique) -
round((1.0 - test_size) * len(l_unique))) <= 1)
def test_leave_one_p_group_out():
logo = LeaveOneGroupOut()
lpgo_1 = LeavePGroupsOut(n_groups=1)
lpgo_2 = LeavePGroupsOut(n_groups=2)
# Make sure the repr works
assert_equal(repr(logo), 'LeaveOneGroupOut()')
assert_equal(repr(lpgo_1), 'LeavePGroupsOut(n_groups=1)')
assert_equal(repr(lpgo_2), 'LeavePGroupsOut(n_groups=2)')
assert_equal(repr(LeavePGroupsOut(n_groups=3)),
'LeavePGroupsOut(n_groups=3)')
for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1),
(lpgo_2, 2))):
for i, groups_i in enumerate(test_groups):
n_groups = len(np.unique(groups_i))
n_splits = (n_groups if p_groups_out == 1
else n_groups * (n_groups - 1) / 2)
X = y = np.ones(len(groups_i))
# Test that the length is correct
assert_equal(cv.get_n_splits(X, y, groups=groups_i), n_splits)
groups_arr = np.asarray(groups_i)
# Split using the original list / array / list of string groups_i
for train, test in cv.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
assert_array_equal(np.intersect1d(groups_arr[train],
groups_arr[test]).tolist(),
[])
# Second test: train and test add up to all the data
assert_equal(len(train) + len(test), len(groups_i))
# Third test:
# The number of groups in test must be equal to p_groups_out
assert_true(np.unique(groups_arr[test]).shape[0], p_groups_out)
def test_leave_group_out_changing_groups():
# Check that LeaveOneGroupOut and LeavePGroupsOut work normally if
# the groups variable is changed before calling split
groups = np.array([0, 1, 2, 1, 1, 2, 0, 0])
X = np.ones(len(groups))
groups_changing = np.array(groups, copy=True)
lolo = LeaveOneGroupOut().split(X, groups=groups)
lolo_changing = LeaveOneGroupOut().split(X, groups=groups)
lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
groups_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
# n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3
assert_equal(
3, LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X,
groups=groups))
# n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups)
assert_equal(3, LeaveOneGroupOut().get_n_splits(X, y=X,
groups=groups))
def test_leave_one_p_group_out_error_on_fewer_number_of_groups():
X = y = groups = np.ones(0)
assert_raise_message(ValueError, "Found array with 0 sample(s)", next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than 2 unique groups ([ 1.]). "
"LeaveOneGroupOut expects at least 2.")
assert_raise_message(ValueError, msg, next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ([ 1.]). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups be present")
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
X = y = groups = np.arange(3)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ([0 1 2]). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups be present")
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
def test_train_test_split_errors():
assert_raises(ValueError, train_test_split)
assert_raises(ValueError, train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# don't convert lists to anything else by default
split = train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = train_test_split(y, test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
@ignore_warnings
def train_test_split_pandas():
# check train_test_split doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_sparse():
# check that train_test_split converts scipy sparse matrices
# to csr, as stated in the documentation
X = np.arange(100).reshape((10, 10))
sparse_types = [csr_matrix, csc_matrix, coo_matrix]
for InputFeatureType in sparse_types:
X_s = InputFeatureType(X)
X_train, X_test = train_test_split(X_s)
assert_true(isinstance(X_train, csr_matrix))
assert_true(isinstance(X_test, csr_matrix))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = train_test_split(X_df)
def train_test_split_list_input():
# Check that when y is a list / list of string labels, it works.
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
for stratify in (True, False):
X_train1, X_test1, y_train1, y_test1 = train_test_split(
X, y1, stratify=y1 if stratify else None, random_state=0)
X_train2, X_test2, y_train2, y_test2 = train_test_split(
X, y2, stratify=y2 if stratify else None, random_state=0)
X_train3, X_test3, y_train3, y_test3 = train_test_split(
X, y3, stratify=y3 if stratify else None, random_state=0)
np.testing.assert_equal(X_train1, X_train2)
np.testing.assert_equal(y_train2, y_train3)
np.testing.assert_equal(X_test1, X_test3)
np.testing.assert_equal(y_test3, y_test2)
def test_shufflesplit_errors():
# When the {test|train}_size is a float/invalid, error is raised at init
assert_raises(ValueError, ShuffleSplit, test_size=None, train_size=None)
assert_raises(ValueError, ShuffleSplit, test_size=2.0)
assert_raises(ValueError, ShuffleSplit, test_size=1.0)
assert_raises(ValueError, ShuffleSplit, test_size=0.1, train_size=0.95)
assert_raises(ValueError, ShuffleSplit, train_size=1j)
# When the {test|train}_size is an int, validation is based on the input X
# and happens at split(...)
assert_raises(ValueError, next, ShuffleSplit(test_size=11).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=10).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=8,
train_size=3).split(X))
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = ShuffleSplit(random_state=21)
assert_array_equal(list(a for a, b in ss.split(X)),
list(a for a, b in ss.split(X)))
def test_stratifiedshufflesplit_list_input():
# Check that when y is a list / list of string labels, it works.
sss = StratifiedShuffleSplit(test_size=2, random_state=42)
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
np.testing.assert_equal(list(sss.split(X, y1)),
list(sss.split(X, y2)))
np.testing.assert_equal(list(sss.split(X, y3)),
list(sss.split(X, y2)))
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
train_test_split(X, y, test_size=0.2, random_state=42)
def test_check_cv():
X = np.ones(9)
cv = check_cv(3, classifier=False)
# Use numpy.testing.assert_equal which recursively compares
# lists of lists
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = check_cv(3, y_binary, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_binary)),
list(cv.split(X, y_binary)))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = check_cv(3, y_multiclass, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass)),
list(cv.split(X, y_multiclass)))
X = np.ones(5)
y_multilabel = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1],
[1, 1, 0, 1], [0, 0, 1, 0]])
cv = check_cv(3, y_multilabel, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = check_cv(3, y_multioutput, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
# Check if the old style classes are wrapped to have a split method
X = np.ones(9)
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv1 = check_cv(3, y_multiclass, classifier=True)
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv2 = check_cv(OldSKF(y_multiclass, n_folds=3))
np.testing.assert_equal(list(cv1.split(X, y_multiclass)),
list(cv2.split()))
assert_raises(ValueError, check_cv, cv="lolo")
def test_cv_iterable_wrapper():
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv = OldSKF(y_multiclass, n_folds=3)
wrapped_old_skf = _CVIterableWrapper(cv)
# Check if split works correctly
np.testing.assert_equal(list(cv), list(wrapped_old_skf.split()))
# Check if get_n_splits works correctly
assert_equal(len(cv), wrapped_old_skf.get_n_splits())
kf_iter = KFold(n_splits=5).split(X, y)
kf_iter_wrapped = check_cv(kf_iter)
# Since the wrapped iterable is enlisted and stored,
# split can be called any number of times to produce
# consistent results.
np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)),
list(kf_iter_wrapped.split(X, y)))
# If the splits are randomized, successive calls to split yields different
# results
kf_randomized_iter = KFold(n_splits=5, shuffle=True).split(X, y)
kf_randomized_iter_wrapped = check_cv(kf_randomized_iter)
np.testing.assert_equal(list(kf_randomized_iter_wrapped.split(X, y)),
list(kf_randomized_iter_wrapped.split(X, y)))
try:
np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)),
list(kf_randomized_iter_wrapped.split(X, y)))
splits_are_equal = True
except AssertionError:
splits_are_equal = False
assert_false(splits_are_equal, "If the splits are randomized, "
"successive calls to split should yield different results")
def test_group_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_groups = 15
n_samples = 1000
n_splits = 5
X = y = np.ones(n_samples)
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
groups = rng.randint(0, n_groups, n_samples)
ideal_n_groups_per_fold = n_samples // n_splits
len(np.unique(groups))
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
lkf = GroupKFold(n_splits=n_splits)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# Construct the test data
groups = np.array(['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David',
'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia'])
n_groups = len(np.unique(groups))
n_samples = len(groups)
n_splits = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
ideal_n_groups_per_fold = n_samples // n_splits
X = y = np.ones(n_samples)
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# groups can also be a list
cv_iter = list(lkf.split(X, y, groups.tolist()))
for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups),
cv_iter):
assert_array_equal(train1, train2)
assert_array_equal(test1, test2)
# Should fail if there are more folds than groups
groups = np.array([1, 1, 1, 2, 2])
X = y = np.ones(len(groups))
assert_raises_regexp(ValueError, "Cannot have number of splits.*greater",
next, GroupKFold(n_splits=3).split(X, y, groups))
def test_time_series_cv():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]
# Should fail if there are more folds than samples
assert_raises_regexp(ValueError, "Cannot have number of folds.*greater",
next,
TimeSeriesSplit(n_splits=7).split(X))
tscv = TimeSeriesSplit(2)
# Manually check that Time Series CV preserves the data
# ordering on toy datasets
splits = tscv.split(X[:-1])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3])
assert_array_equal(test, [4, 5])
splits = TimeSeriesSplit(2).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1, 2])
assert_array_equal(test, [3, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3, 4])
assert_array_equal(test, [5, 6])
# Check get_n_splits returns the correct number of splits
splits = TimeSeriesSplit(2).split(X)
n_splits_actual = len(list(splits))
assert_equal(n_splits_actual, tscv.get_n_splits())
assert_equal(n_splits_actual, 2)
def test_nested_cv():
# Test if nested cross validation works with different combinations of cv
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 5, 15)
cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(), StratifiedKFold(),
StratifiedShuffleSplit(n_splits=3, random_state=0)]
for inner_cv, outer_cv in combinations_with_replacement(cvs, 2):
gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]},
cv=inner_cv)
cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv,
fit_params={'groups': groups})
def test_build_repr():
class MockSplitter:
def __init__(self, a, b=0, c=None):
self.a = a
self.b = b
self.c = c
def __repr__(self):
return _build_repr(self)
assert_equal(repr(MockSplitter(5, 6)), "MockSplitter(a=5, b=6, c=None)")
| bsd-3-clause |
jmpolom/sti-wav | src/sti.py | 1 | 20252 | #!/usr/bin/python
"""
Speech Transmission Index (STI) from speech waveforms (real speech)
Copyright (C) 2011 Jon Polom <[email protected]>
Licensed under the GNU General Public License
"""
from datetime import date, datetime
from matplotlib.mlab import cohere,psd
from numpy import append,array,clip,log10,nonzero,ones,power,reshape
from numpy import searchsorted,shape,sqrt,sum,vstack,zeros
from numpy.ma import masked_array
from scipy.io import wavfile
from scipy.signal import butter,firwin,decimate,lfilter
from sys import stdout
from warnings import catch_warnings,simplefilter
__author__ = "Jonathan Polom <[email protected]>"
__date__ = date(2011, 04, 22)
__version__ = "0.5"
def thirdOctaves(minFreq, maxFreq):
"""
Calculates a list of frequencies spaced 1/3 octave apart in hertz
between minFreq and maxFreq
Input
-----
* minFreq : float or int
Must be non-zero and non-negative
* maxFreq : float or int
Must be non-zero and non-negative
Output
------
* freqs : ndarray
"""
if minFreq <= 0 or maxFreq <= 0:
raise ValueError("minFreq and maxFreq must be non-zero and non-negative")
else:
maxFreq = float(maxFreq)
f = float(minFreq)
freqs = array([f])
while f < maxFreq:
f = f * 10**0.1
freqs = append(freqs, f)
return freqs
def fftWindowSize(freqRes, hz):
"""
Calculate power of 2 window length for FFT to achieve specified frequency
resolution. Useful for power spectra and coherence calculations.
Input
-----
* freqRes : float
Desired frequency resolution in hertz
* hz : int
Sample rate, in hertz, of signal undergoing FFT
Output
------
* window : int
"""
freqRes = float(freqRes) # make sure frequency res is a float
pwr = 1 # initial power of 2 to try
res = hz / float(2**pwr) # calculate frequency resolution
while res > freqRes:
pwr += 1
res = hz / float(2**pwr)
return 2**pwr
def downsampleBands(audio, hz, downsampleFactor):
"""
Downsample audio by integer factor
Input
-----
* audio : array-like
Array of original audio samples
* hz : float or int
Original audio sample rate in hertz
* downsampleFactor : int
Factor to downsample audio by, if desired
Output
------
* dsAudio : ndarray
Downsampled audio array
* hz : int
Downsampled audio sample rate in hertz
"""
# calculate downsampled audio rate in hertz
downsampleFactor = int(downsampleFactor) # factor must be integer
hz = int(hz / downsampleFactor)
for band in audio:
ds = decimate(band, downsampleFactor, ftype='fir')
try:
dsAudio = append(dsAudio, ds)
except:
dsAudio = ds
return dsAudio, hz
def octaveBandFilter(audio, hz,
octaveBands=[125, 250, 500, 1000, 2000, 4000, 8000],
butterOrd=6, hammingTime=16.6):
"""
Octave band filter raw audio. The audio is filtered through butterworth
filters of order 6 (by default), squared to obtain the envelope and finally
low-pass filtered using a 'hammingTime' length Hamming filter at 25 Hz.
Input
-----
* audio : array-like
Array of raw audio samples
* hz : float or int
Audio sample rate in hertz
* octaveBands : array-like
list or array of octave band center frequencies
* butterOrd : int
butterworth filter order
* hammingTime : float or int
Hamming window length, in milliseconds relative to audio sample rate
Output
------
* octaveBandAudio : ndarray
Octave band filtered audio
* hz : float or int
Filtered audio sample rate
"""
print "Butterworth filter order:",butterOrd
print "Hamming filter length: ",hammingTime,"milliseconds"
print "Audio sample rate: ",hz
# calculate the nyquist frequency
nyquist = hz * 0.5
# length of Hamming window for FIR low-pass at 25 Hz
hammingLength = (hammingTime / 1000.0) * hz
# process each octave band
for f in octaveBands:
bands = str(octaveBands[:octaveBands.index(f) + 1]).strip('[]')
statusStr = "Octave band filtering audio at: " + bands
unitStr = "Hz ".rjust(80 - len(statusStr))
stdout.write(statusStr)
stdout.write(unitStr)
stdout.write('\r')
stdout.flush()
# filter the output at the octave band f
f1 = f / sqrt(2)
f2 = f * sqrt(2)
# for some odd reason the band-pass butterworth doesn't work right
# when the filter order is high (above 3). likely a SciPy issue?
# also, butter likes to complain about possibly useless results when
# calculating filter coefficients for high order (above 4) low-pass
# filters with relatively low knee frequencies (relative to nyquist F).
# perhaps I just don't know how digital butterworth filters work and
# their limitations but I think this is odd.
# the issue described here will be sent to their mailing list
if f < max(octaveBands):
with catch_warnings(): # suppress the spurious warnings given
simplefilter('ignore') # under certain conditions
b1,a1 = butter(butterOrd, f1/nyquist, btype='high')
b2,a2 = butter(butterOrd, f2/nyquist, btype='low')
filtOut = lfilter(b1, a1, audio) # high-pass raw audio at f1
filtOut = lfilter(b2, a2, filtOut) # low-pass after high-pass at f1
else:
with catch_warnings():
simplefilter('ignore')
b1,a1 = butter(butterOrd, f/nyquist, btype='high')
filtOut = lfilter(b1, a1, audio)
filtOut = array(filtOut)**2
b = firwin(hammingLength, 25.0, nyq=nyquist)
filtOut = lfilter(b, 1, filtOut)
filtOut = filtOut * -1.0
# stack-up octave band filtered audio
try:
octaveBandAudio = vstack((octaveBandAudio, filtOut))
except:
octaveBandAudio = filtOut
print
return octaveBandAudio
def octaveBandSpectra(filteredAudioBands, hz, fftRes=0.06):
"""
Calculate octave band power spectras
Input
-----
* filteredAudioBands : array-like
Octave band filtered audio
* hz : float or int
Audio sample rate in hertz. Must be the same for clean and dirty audio
* fftRes : float or int
Desired FFT frequency resolution
Output
------
* spectras : ndarray
Power spectra values
* fftfreqs : ndarray
Frequencies for FFT points
"""
# FFT window size for PSD calculation: 32768 for ~0.06 Hz res at 2 kHz
psdWindow = fftWindowSize(fftRes, hz)
print "Calculating octave band power spectras",
print "(FFT length:",psdWindow,"samples)"
for band in filteredAudioBands:
spectra, freqs = psd(band, NFFT=psdWindow, Fs=hz)
spectra = reshape(spectra, len(freqs)) # change to row vector
spectra = spectra / max(spectra) # scale to [0,1]
# stack-up octave band spectras
try:
spectras = vstack((spectras, spectra))
fftfreqs = vstack((fftfreqs, freqs))
except:
spectras = spectra
fftfreqs = freqs
return spectras, fftfreqs
def octaveBandCoherence(degrAudioBands, refAudioBands,
hz, fftRes=0.122):
"""
Calculate coherence between clean and degraded octave band audio
Input
-----
* degrAudioBands : array-like
Degraded octave band audio
* refAudioBands : array-like
Reference (clean) octave band audio
* hz : float or int
Audio sample rate. Must be common between clean and dirty audio
* fftRes : float or int
Desired FFT frequency resolution
Output
------
* coherences : ndarray
Coherence values
* fftfreqs : ndarray
Frequencies for FFT points
"""
# FFT window size for PSD calculation: 32768 for ~0.06 Hz res at 2 kHz
# Beware that 'cohere' isn't as forgiving as 'psd' with FFT lengths
# larger than half the length of the signal
psdWindow = fftWindowSize(fftRes, hz)
print "Calculating degraded and reference audio coherence",
print "(FFT length:",psdWindow,"samples)"
for i,band in enumerate(degrAudioBands):
with catch_warnings(): # catch and ignore spurious warnings
simplefilter('ignore') # due to some irrelevant divide by 0's
coherence, freqs = cohere(band, refAudioBands[i],
NFFT=psdWindow, Fs=hz)
# stack-up octave band spectras
try:
coherences = vstack((coherences, coherence))
fftfreqs = vstack((fftfreqs, freqs))
except:
coherences = coherence
fftfreqs = freqs
return coherences, fftfreqs
def thirdOctaveRootSum(spectras, fftfreqs, minFreq=0.25, maxFreq=25.0):
"""
Calculates square root of sum of spectra over 1/3 octave bands
Input
-----
* spectras : array-like
Array or list of octave band spectras
* fftfreqs : array-like
Array or list of octave band FFT frequencies
* minFreq : float
Min frequency in 1/3 octave bands
* maxFreq : float
Max frequency in 1/3 octave bands
Output
------
* thirdOctaveRootSums : ndarray
Square root of spectra sums over 1/3 octave intervals
"""
print "Calculating 1/3 octave square-rooted sums from",
print minFreq,"to",maxFreq,"Hz"
thirdOctaveBands = thirdOctaves(minFreq, maxFreq)
# loop over the spectras contained in 'spectras' and calculate 1/3 oct MTF
for i,spectra in enumerate(spectras):
freqs = fftfreqs[i] # get fft frequencies for spectra
# calculate the third octave sums
for f13 in thirdOctaveBands:
f131 = f13 / power(2, 1.0/6.0) # band start
f132 = f13 * power(2, 1.0/6.0) # band end
li = searchsorted(freqs, f131)
ui = searchsorted(freqs, f132) + 1
s = sum(spectra[li:ui]) # sum the spectral components in band
s = sqrt(s) # take square root of summed components
try:
sums = append(sums, s)
except:
sums = array([s])
# stack-up third octave modulation transfer functions
try:
thirdOctaveSums = vstack((thirdOctaveSums, sums))
except:
thirdOctaveSums = sums
# remove temp 'sum' and 'counts' variables for next octave band
del(sums)
return thirdOctaveSums
def thirdOctaveRMS(spectras, fftfreqs, minFreq=0.25, maxFreq=25.0):
"""
Calculates RMS value of spectra over 1/3 octave bands
Input
-----
* spectras : array-like
Array or list of octave band spectras
* fftfreqs : array-like
Array or list of octave band FFT frequencies
* minFreq : float
Min frequency in 1/3 octave bands
* maxFreq : float
Max frequency in 1/3 octave bands
Output
------
* thirdOctaveRMSValues : ndarray
RMS value of spectra over 1/3 octave intervals
"""
print "Calculating 1/3 octave RMS values from",
print minFreq,"to",maxFreq,"Hz"
thirdOctaveBands = thirdOctaves(minFreq, maxFreq)
# loop over the spectras contained in 'spectras' and calculate 1/3 oct MTF
for i,spectra in enumerate(spectras):
freqs = fftfreqs[i] # get fft frequencies for spectra
# calculate the third octave sums
for f13 in thirdOctaveBands:
f131 = f13 / power(2, 1.0/6.0) # band start
f132 = f13 * power(2, 1.0/6.0) # band end
li = searchsorted(freqs, f131)
ui = searchsorted(freqs, f132) + 1
s = sum(spectra[li:ui]**2) # sum the spectral components in band
s = s / len(spectra[li:ui]) # divide by length of sum
s = sqrt(s) # square root
try:
sums = append(sums, s)
except:
sums = array([s])
# stack-up third octave modulation transfer functions
try:
thirdOctaveRMSValues = vstack((thirdOctaveRMSValues, sums))
except:
thirdOctaveRMSValues = sums
# remove temp 'sum' and 'counts' variables for next octave band
del(sums)
return thirdOctaveRMSValues
def sti(modulations, coherences, minCoherence=0.8):
"""
Calculate the speech transmission index from third octave modulation
indices. The indices are truncated after coherence between clean and dirty
audio falls below 'minCoherence' or 0.8, by default.
Input
-----
* modulations : array-like
Modulation indices spaced at 1/3 octaves within each octave band
* coherences : array-like
Coherence between clean and dirty octave band filtered audio
* minCoherence : float
The minimum coherence to include a mod index in the STI computation
Output
------
* index : float
The speech transmission index (STI)
"""
# create masking array of zeroes
snrMask = zeros(modulations.shape, dtype=int)
# sort through coherence array and mask corresponding SNRs where coherence
# values fall below 'minCoherence' (0.8 in most cases and by default)
for i,band in enumerate(coherences):
lessThanMin = nonzero(band < minCoherence)[0]
if len(lessThanMin) >= 1:
discardAfter = min(lessThanMin)
snrMask[i][discardAfter:] = ones((len(snrMask[i][discardAfter:])))
modulations = clip(modulations, 0, 0.99) # clip to [0, 0.99] (max: ~1)
snr = 10*log10(modulations/(1 - modulations)) # estimate SNR
snr = clip(snr, -15, 15) # clip to [-15,15]
snr = masked_array(snr, mask=snrMask) # exclude values from sum
snrCounts = (snr / snr).sum(axis=1) # count SNRs
snrCounts = snrCounts.data # remove masking
octaveBandSNR = snr.sum(axis=1) / snrCounts # calc average SNR
alpha = 7 * (snrCounts / snrCounts.sum()) # calc alpha weight
# octave band weighting factors, Steeneken and Houtgast (1985)
w = [0.129, 0.143, 0.114, 0.114, 0.186, 0.171, 0.143]
# calculate the STI measure
snrp = alpha * w * octaveBandSNR
snrp = snrp.sum()
index = (snrp + 15) / 30.0
print "Speech Transmission Index (STI):",index
return index
def stiFromAudio(reference, degraded, hz, calcref=False, downsample=None,
name="unnamed"):
"""
Calculate the speech transmission index (STI) from clean and dirty
(ie: distorted) audio samples. The clean and dirty audio samples must have
a common sample rate for successful use of this function.
Input
-----
* reference : array-like
Clean reference audio sample as an array of floating-point values
* degraded : array-like
Degraded audio sample as an array, or array of arrays for multiple
samples, of floating-point values
* hz : int
Audio sample rate in hertz
* calcref : boolean
Calculate STI for reference signal alone
* downsample : int or None
Downsampling integer factor
* name : string
Name of sample set, for output tracking in larger runs
Output
------
* sti : array-like or float
The calculated speech transmission index (STI) value(s)
"""
# put single sample degraded array into another array so the loop works
if type(degraded) is not type([]):
degraded = [degraded]
print "-"*80
print "Speech Transmission Index (STI) from speech waveforms".center(80)
print "-"*80
print
print "Sample set: ",name
print "Number of samples: ",len(degraded)
print "Date/time: ",datetime.now().isoformat()
print "Calculate reference STI:",
if calcref:
print "yes"
else:
print "no"
print
print " Reference Speech ".center(80,'*')
refOctaveBands = octaveBandFilter(reference, hz)
refRate = hz
# downsampling, if desired
if type(downsample) is type(1):
refOctaveBands, refRate = downsampleBands(refOctaveBands, refRate,
downsample)
# calculate STI for reference sample, if boolean set
if calcref:
# STI calc procedure
spectras, sfreqs = octaveBandSpectra(refOctaveBands, refRate)
coherences, cfreqs = octaveBandCoherence(refOctaveBands, refOctaveBands,
refRate)
thirdOctaveMTF = thirdOctaveRootSum(spectras, sfreqs)
thirdOctaveCoherences = thirdOctaveRMS(coherences, cfreqs)
# add to interim array for MTFs and coherences
try:
thirdOctaveTemps.append([thirdOctaveMTF, thirdOctaveCoherences])
except:
thirdOctaveTemps = [[thirdOctaveMTF, thirdOctaveCoherences]]
print
# loop over degraded audio samples and calculate STIs
for j,sample in enumerate(degraded):
print " Degraded Speech: Sample {0} ".format(j + 1).center(80,'*')
degrOctaveBands = octaveBandFilter(sample, hz)
degrRate = hz
# downsampling, if desired
if type(downsample) is type(1):
degrOctaveBands, degrRate = downsampleBands(degrOctaveBands,
degrRate, downsample)
# STI calc procedure
spectras, sfreqs = octaveBandSpectra(degrOctaveBands, degrRate)
coherences, cfreqs = octaveBandCoherence(refOctaveBands,
degrOctaveBands, refRate)
thirdOctaveMTF = thirdOctaveRootSum(spectras, sfreqs)
thirdOctaveCoherences = thirdOctaveRMS(coherences, cfreqs)
# add to interim array for MTFs and coherences
try:
thirdOctaveTemps.append([thirdOctaveMTF, thirdOctaveCoherences])
except:
thirdOctaveTemps = [[thirdOctaveMTF, thirdOctaveCoherences]]
print
# calculate the STI values
print " Speech Transmission Index ".center(80,'*')
for i in range(0,len(thirdOctaveTemps)):
sampleSTI = sti(thirdOctaveTemps[i][0], thirdOctaveTemps[i][1])
# add to STI output array
try:
stiValues.append(sampleSTI)
except:
stiValues = [sampleSTI]
# unpack single value
if len(stiValues) == 1:
stiValues = stiValues[0]
print
return stiValues
def readwav(path):
"""
Reads Microsoft WAV format audio files, scales integer sample values and
to [0,1]. Returns a tuple consisting of scaled WAV samples and sample rate
in hertz.
Input
-----
* path : string
Valid system path to file
Output
------
* audio : array-like
Array of scaled sampled
* rate : int
Audio sample rate in hertz
"""
wav = wavfile.read(path)
rate = wav[0]
audio = array(wav[1])
scale = float(max(audio))
audio = audio / scale
return audio, rate
| gpl-3.0 |
nkhuyu/blaze | blaze/compute/tests/test_spark.py | 3 | 7842 | from __future__ import absolute_import, division, print_function
import pytest
pyspark = pytest.importorskip('pyspark')
import pandas as pd
from blaze import compute, symbol, summary, exp, by, join, merge
from toolz import identity
data = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
data2 = [['Alice', 'Austin'],
['Bob', 'Boston']]
df = pd.DataFrame(data, columns=['name', 'amount', 'id'])
# this only exists because we need to have a single session scoped spark
# context, otherwise these would simply be global variables
@pytest.fixture
def rdd(sc):
return sc.parallelize(data)
@pytest.fixture
def rdd2(sc):
return sc.parallelize(data2)
t = symbol('t', 'var * {name: string, amount: int, id: int}')
t2 = symbol('t2', 'var * {name: string, city: string}')
# Web Commons Graph Example data
data_idx = [['A', 1],
['B', 2],
['C', 3]]
data_arc = [[1, 3],
[2, 3],
[3, 1]]
t_idx = symbol('idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('arc', 'var * {node_out: int32, node_id: int32}')
def test_spark_symbol(rdd):
assert compute(t, rdd) == rdd
def test_spark_projection(rdd):
assert compute(t['name'], rdd).collect() == [row[0] for row in data]
def test_spark_multicols_projection(rdd):
result = compute(t[['amount', 'name']], rdd).collect()
expected = [(100, 'Alice'), (200, 'Bob'), (50, 'Alice')]
print(result)
print(expected)
assert result == expected
inc = lambda x: x + 1
reduction_exprs = [
t['amount'].sum(),
t['amount'].min(),
t['amount'].max(),
t['amount'].nunique(),
t['name'].nunique(),
t['amount'].count(),
(t['amount'] > 150).any(),
(t['amount'] > 150).all(),
t['amount'].mean(),
t['amount'].var(),
summary(a=t.amount.sum(), b=t.id.count()),
t['amount'].std()]
def test_spark_reductions(rdd):
for expr in reduction_exprs:
result = compute(expr, rdd)
expected = compute(expr, data)
if not result == expected:
print(result)
print(expected)
if isinstance(result, float):
assert abs(result - expected) < 0.001
else:
assert result == expected
exprs = [
t['amount'],
t['amount'] == 100,
t['amount'].truncate(150),
t[t['name'] == 'Alice'],
t[t['amount'] == 0],
t[t['amount'] > 150],
t['amount'] + t['id'],
t['amount'] % t['id'],
exp(t['amount']),
by(t['name'], total=t['amount'].sum()),
by(t['name'], total=(t['amount'] + 1).sum()),
(t['amount'] * 1).label('foo'),
t.map(lambda tup: tup[1] + tup[2], 'real'),
t.like(name='Alice'),
t['amount'].apply(identity, 'var * real', splittable=True),
t['amount'].map(inc, 'int')]
def test_spark_basic(rdd):
check_exprs_against_python(exprs, data, rdd)
def check_exprs_against_python(exprs, data, rdd):
any_bad = False
for expr in exprs:
result = compute(expr, rdd).collect()
expected = list(compute(expr, data))
if not result == expected:
any_bad = True
print("Expression:", expr)
print("Spark:", result)
print("Python:", expected)
assert not any_bad
def test_spark_big_by(sc):
tbig = symbol(
'tbig', 'var * {name: string, sex: string[1], amount: int, id: int}')
big_exprs = [
by(tbig[['name', 'sex']], total=tbig['amount'].sum()),
by(tbig[['name', 'sex']], total=(tbig['id'] + tbig['amount']).sum())]
databig = [['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]]
rddbig = sc.parallelize(databig)
check_exprs_against_python(big_exprs, databig, rddbig)
def test_head(rdd):
assert list(compute(t.head(1), rdd)) == list(compute(t.head(1), data))
def test_sort(rdd):
check_exprs_against_python([
t.sort('amount'),
t.sort('amount', ascending=True),
t.sort(t['amount'], ascending=True),
t.sort(-t['amount'].label('foo') + 1, ascending=True),
t.sort(['amount', 'id'])], data, rdd)
def test_distinct(rdd):
assert set(compute(t['name'].distinct(), rdd).collect()) == \
set(['Alice', 'Bob'])
@pytest.mark.xfail(
raises=NotImplementedError,
reason='cannot specify columns to distinct on yet',
)
def test_distinct_on(rdd):
compute(t.distinct('name'), rdd)
def test_join(rdd, rdd2):
joined = join(t, t2, 'name')
expected = [('Alice', 100, 1, 'Austin'),
('Bob', 200, 2, 'Boston'),
('Alice', 50, 3, 'Austin')]
result = compute(joined, {t: rdd, t2: rdd2}).collect()
assert all(i in expected for i in result)
def test_multi_column_join(sc):
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
rleft = sc.parallelize(left)
rright = sc.parallelize(right)
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {x: int, y: int, w: int}')
j = join(L, R, ['x', 'y'])
result = compute(j, {L: rleft, R: rright})
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
assert set(result.collect()) == set(expected)
def test_groupby(sc):
rddidx = sc.parallelize(data_idx)
rddarc = sc.parallelize(data_arc)
joined = join(t_arc, t_idx, "node_id")
t = by(joined['name'], count=joined['node_id'].count())
a = compute(t, {t_arc: rddarc, t_idx: rddidx})
in_degree = dict(a.collect())
assert in_degree == {'A': 1, 'C': 2}
def test_multi_level_rowfunc_works(rdd):
expr = t['amount'].map(lambda x: x + 1, 'int')
assert compute(expr, rdd).collect() == [x[1] + 1 for x in data]
def test_merge(rdd):
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
assert compute(expr, rdd).collect() == [
(row[0], row[1] * 2) for row in data]
def test_selection_out_of_order(rdd):
expr = t['name'][t['amount'] < 100]
assert compute(expr, rdd).collect() == ['Alice']
def test_recursive_rowfunc_is_used(rdd):
expr = by(t['name'], total=(2 * (t['amount'] + t['id'])).sum())
expected = [('Alice', 2 * (101 + 53)),
('Bob', 2 * (202))]
assert set(compute(expr, rdd).collect()) == set(expected)
def test_outer_join(sc):
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = sc.parallelize(left)
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = sc.parallelize(right)
L = symbol('L', 'var * {id: int, name: string, amount: real}')
R = symbol('R', 'var * {city: string, id: int}')
assert set(compute(join(L, R), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='left'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='right'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
# Full outer join not yet supported
assert set(compute(join(L, R, how='outer'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
| bsd-3-clause |
vivekmishra1991/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
joernhees/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 17 | 34869 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances was deprecated in version 0.18 "
"and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X : array_like
An array with shape (n_samples, n_features)
Returns
-------
D : array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij : arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess was deprecated in version 0.18 and will be "
"removed in 0.20. Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
.. deprecated:: 0.18
This class will be removed in 0.20.
Use the :class:`GaussianProcessRegressor` instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state : int, RandomState instance or None, optional (default=None)
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is the
random number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/stable/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, int(n_eval / batch_size))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, int(n_eval / batch_size))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
jaidevd/scikit-learn | sklearn/datasets/lfw.py | 15 | 18695 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove, rename
from os.path import join, exists, isdir
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
archive_path_temp = archive_path + ".tmp"
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path_temp)
rename(archive_path_temp, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in sorted(listdir(folder_path))]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
| bsd-3-clause |
nrego/westpa | lib/examples/stringmethodexamples/examples/DicksonRingPotential/generate_figures/rate_order_mag.py | 2 | 2380 | import numpy as np
import yaml
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import os
from error_plot import calc_target_rate, calc_we, calc_bruteforce
basedir = '..'
# Font settings
### rcParams are the default parameters for matplotlib
mpl.rcParams['font.size'] = 10.
mpl.rcParams['font.family'] = 'Arial'
mpl.rcParams['axes.labelsize'] = 10.
mpl.rcParams['xtick.labelsize'] = 10.
mpl.rcParams['ytick.labelsize'] = 10.
legfont = fm.FontProperties(size=10)
fsize = (3.375,3.0)
sims = ['we_1.0','we_1.5','we_2.0','we_2.5','we_3.0']
fig = plt.figure(1,figsize=fsize)
ax = fig.add_subplot(111)
betas = np.zeros((len(sims),))
oom = np.zeros_like(betas)
factx = np.zeros_like(betas)
oom_bf = np.zeros_like(betas)
factx_bf = np.zeros_like(betas)
for si,sname in enumerate(sims):
config_file = os.path.join(basedir,'configs/{}_run_config.yaml'.format(sname))
with open(config_file,'r') as f:
config_data = [grp for grp in yaml.load_all(f)]
config_data[:] = [grp for grp in config_data if grp['name'] in sims]
config_data = config_data[0]
betas[si] = config_data['beta']
log_target_rate = calc_target_rate(config_data,basedir)
we_err_avg, we_t = calc_we(config_data,basedir,log_target_rate)
# Find time at WE last crosses order of magnitude estimate of the rate
ii = np.argwhere(we_err_avg[0,:] > 1.0)
jj = 1 if np.max(ii) < 1 else np.max(ii)
we_oom = we_t[jj]
mfpt = 1.0/(10.0**log_target_rate[0])
oom[si] = we_oom/mfpt
# Find time at which WE last crosses factor of 3 estimate of rate
ii = np.argwhere(we_err_avg[0,:] > 0.5)
jj = 1 if np.max(ii) < 1 else np.max(ii)
we_factx = we_t[jj]
mfpt = 1.0/(10.0**log_target_rate[0])
factx[si] = we_factx/mfpt
data, = ax.semilogy(betas,oom,marker='o',ls='-',zorder=1000,color='black',label='$T_1$')
data.set_clip_on(False)
data, = ax.semilogy(betas,factx,marker='v',ls='-',zorder=1000,color='black',label='$T_{0.3}$')
data.set_clip_on(False)
ax.semilogy(betas,np.ones_like(oom),ls=':',lw=2.0,color='black')
ax.set_xlabel('${\\beta}$')
ax.set_ylabel('$T_X$/MFPT')
ax.legend(loc=1,frameon=False,prop=legfont)
ax.axis([1.0,3.0,1.0E-5,100])
fig.set_size_inches(fsize)
plt.tight_layout()
plt.savefig('rate_order_mag.eps',dpi=600,format='eps',bbox_inches='tight')
plt.show()
| gpl-3.0 |
theoryno3/scikit-learn | sklearn/tests/test_cross_validation.py | 5 | 41949 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer, LabelBinarizer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval._check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval._check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval._check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
with warnings.catch_warnings(record=True):
# deprecated sequence of sequence format
cv = cval._check_cv(3, X, y_seq_of_seqs, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_indicator_matrix = LabelBinarizer().fit_transform(y_seq_of_seqs)
cv = cval._check_cv(3, X, y_indicator_matrix, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval._check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
nooperpudd/trading-with-python | lib/yahooFinance.py | 76 | 8290 | # -*- coding: utf-8 -*-
# Author: Jev Kuznetsov <[email protected]>
# License: BSD
"""
Toolset working with yahoo finance data
This module includes functions for easy access to YahooFinance data
Functions
----------
- `getHistoricData` get historic data for a single symbol
- `getQuote` get current quote for a symbol
- `getScreenerSymbols` load symbols from a yahoo stock screener file
Classes
---------
- `HistData` a class for working with multiple symbols
"""
from datetime import datetime, date
import urllib2
from pandas import DataFrame, Index, HDFStore, WidePanel
import numpy as np
import os
from extra import ProgressBar
def parseStr(s):
''' convert string to a float or string '''
f = s.strip()
if f[0] == '"':
return f.strip('"')
elif f=='N/A':
return np.nan
else:
try: # try float conversion
prefixes = {'M':1e6, 'B': 1e9}
prefix = f[-1]
if prefix in prefixes: # do we have a Billion/Million character?
return float(f[:-1])*prefixes[prefix]
else: # no, convert to float directly
return float(f)
except ValueError: # failed, return original string
return s
class HistData(object):
''' a class for working with yahoo finance data '''
def __init__(self, autoAdjust=True):
self.startDate = (2008,1,1)
self.autoAdjust=autoAdjust
self.wp = WidePanel()
def load(self,dataFile):
"""load data from HDF"""
if os.path.exists(dataFile):
store = HDFStore(dataFile)
symbols = [str(s).strip('/') for s in store.keys() ]
data = dict(zip(symbols,[store[symbol] for symbol in symbols]))
self.wp = WidePanel(data)
store.close()
else:
raise IOError('Data file does not exist')
def save(self,dataFile):
""" save data to HDF"""
print 'Saving data to', dataFile
store = HDFStore(dataFile)
for symbol in self.wp.items:
store[symbol] = self.wp[symbol]
store.close()
def downloadData(self,symbols='all'):
''' get data from yahoo '''
if symbols == 'all':
symbols = self.symbols
#store = HDFStore(self.dataFile)
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
try:
df = getSymbolData(symbol,sDate=self.startDate,verbose=False)
if self.autoAdjust:
df = _adjust(df,removeOrig=True)
if len(self.symbols)==0:
self.wp = WidePanel({symbol:df})
else:
self.wp[symbol] = df
except Exception,e:
print e
p.animate(idx+1)
def getDataFrame(self,field='close'):
''' return a slice on wide panel for a given field '''
return self.wp.minor_xs(field)
@property
def symbols(self):
return self.wp.items.tolist()
def __repr__(self):
return str(self.wp)
def getQuote(symbols):
''' get current yahoo quote, return a DataFrame '''
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
if not isinstance(symbols,list):
symbols = [symbols]
header = ['symbol','last','change_pct','PE','time','short_ratio','prev_close','eps','market_cap']
request = str.join('', ['s', 'l1', 'p2' , 'r', 't1', 's7', 'p', 'e' , 'j1'])
data = dict(zip(header,[[] for i in range(len(header))]))
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request)
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
for line in lines:
fields = line.strip().split(',')
#print fields, len(fields)
for i,field in enumerate(fields):
data[header[i]].append( parseStr(field))
idx = data.pop('symbol')
return DataFrame(data,index=idx)
def _historicDataUrll(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3]):
"""
generate url
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
return urlStr
def getHistoricData(symbols, **options):
'''
get data from Yahoo finance and return pandas dataframe
Will get OHLCV data frame if sinle symbol is provided.
If many symbols are provided, it will return a wide panel
Parameters
------------
symbols: Yahoo finanance symbol or a list of symbols
sDate: start date (y,m,d)
eDate: end date (y,m,d)
adjust : T/[F] adjust data based on adj_close
'''
assert isinstance(symbols,(list,str)), 'Input must be a string symbol or a list of symbols'
if isinstance(symbols,str):
return getSymbolData(symbols,**options)
else:
data = {}
print 'Downloading data:'
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
p.animate(idx+1)
data[symbol] = getSymbolData(symbol,verbose=False,**options)
return WidePanel(data)
def getSymbolData(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3], adjust=False, verbose=True):
"""
get data from Yahoo finance and return pandas dataframe
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
return None
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
#print line
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
if verbose:
print 'Got %i days of data' % len(df)
if adjust:
return _adjust(df,removeOrig=True)
else:
return df
def _adjust(df, removeOrig=False):
'''
_adjustust hist data based on adj_close field
'''
c = df['close']/df['adj_close']
df['adj_open'] = df['open']/c
df['adj_high'] = df['high']/c
df['adj_low'] = df['low']/c
if removeOrig:
df=df.drop(['open','close','high','low'],axis=1)
renames = dict(zip(['adj_open','adj_close','adj_high','adj_low'],['open','close','high','low']))
df=df.rename(columns=renames)
return df
def getScreenerSymbols(fileName):
''' read symbols from a .csv saved by yahoo stock screener '''
with open(fileName,'r') as fid:
lines = fid.readlines()
symbols = []
for line in lines[3:]:
fields = line.strip().split(',')
field = fields[0].strip()
if len(field) > 0:
symbols.append(field)
return symbols
| bsd-3-clause |
thilbern/scikit-learn | examples/text/document_classification_20newsgroups.py | 2 | 10746 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.f1_score(y_test, pred)
print("f1-score: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
class L1LinearSVC(LinearSVC):
def fit(self, X, y):
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
self.transformer_ = LinearSVC(penalty="l1",
dual=False, tol=1e-3)
X = self.transformer_.fit_transform(X, y)
return LinearSVC.fit(self, X, y)
def predict(self, X):
X = self.transformer_.transform(X)
return LinearSVC.predict(self, X)
print('=' * 80)
print("LinearSVC with L1-based feature selection")
results.append(benchmark(L1LinearSVC()))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
yosssi/scipy_2015_sklearn_tutorial | notebooks/figures/plot_interactive_forest.py | 40 | 1279 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
X, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=50)
def plot_forest(max_depth=1):
plt.figure()
ax = plt.gca()
h = 0.02
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
if max_depth != 0:
forest = RandomForestClassifier(n_estimators=20, max_depth=max_depth,
random_state=1).fit(X, y)
Z = forest.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=.4)
ax.set_title("max_depth = %d" % max_depth)
else:
ax.set_title("data set")
ax.scatter(X[:, 0], X[:, 1], c=np.array(['b', 'r'])[y], s=60)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
def plot_forest_interactive():
from IPython.html.widgets import interactive, IntSlider
slider = IntSlider(min=0, max=8, step=1, value=0)
return interactive(plot_forest, max_depth=slider)
| cc0-1.0 |
chrsrds/scikit-learn | sklearn/preprocessing/tests/test_data.py | 1 | 93995 | # Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
import warnings
import itertools
import numpy as np
import numpy.linalg as la
from scipy import sparse, stats
from scipy.sparse import random as sparse_random
import pytest
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_allclose_dense_sparse
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import QuantileTransformer
from sklearn.preprocessing.data import quantile_transform
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.preprocessing.data import PowerTransformer
from sklearn.preprocessing.data import power_transform
from sklearn.preprocessing.data import BOUNDS_THRESHOLD
from sklearn.exceptions import NotFittedError
from sklearn.base import clone
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVR
from sklearn.utils import shuffle
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert (i + 1) * chunk_size == n_samples_seen
else:
assert (i * chunk_size + (batch_stop - batch_start) ==
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert interact.powers_.shape == (interact.n_output_features_,
interact.n_input_features_)
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names(
["\u0001F40D", "\u262E", "\u05D0"])
assert_array_equal(["1", "\u0001F40D", "\u262E", "\u05D0"],
feature_names)
def test_polynomial_feature_array_order():
X = np.arange(10).reshape(5, 2)
def is_c_contiguous(a):
return np.isfortran(a.T)
assert is_c_contiguous(PolynomialFeatures().fit_transform(X))
assert is_c_contiguous(PolynomialFeatures(order='C').fit_transform(X))
assert np.isfortran(PolynomialFeatures(order='F').fit_transform(X))
@pytest.mark.parametrize(['deg', 'include_bias', 'interaction_only', 'dtype'],
[(1, True, False, int),
(2, True, False, int),
(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64),
(4, False, False, np.float64),
(4, False, True, np.float64)])
def test_polynomial_features_csc_X(deg, include_bias, interaction_only, dtype):
rng = np.random.RandomState(0)
X = rng.randint(0, 2, (100, 2))
X_csc = sparse.csc_matrix(X)
est = PolynomialFeatures(deg, include_bias=include_bias,
interaction_only=interaction_only)
Xt_csc = est.fit_transform(X_csc.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
assert isinstance(Xt_csc, sparse.csc_matrix)
assert Xt_csc.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csc.A, Xt_dense)
@pytest.mark.parametrize(['deg', 'include_bias', 'interaction_only', 'dtype'],
[(1, True, False, int),
(2, True, False, int),
(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64)])
def test_polynomial_features_csr_X(deg, include_bias, interaction_only, dtype):
rng = np.random.RandomState(0)
X = rng.randint(0, 2, (100, 2))
X_csr = sparse.csr_matrix(X)
est = PolynomialFeatures(deg, include_bias=include_bias,
interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype, copy=False))
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(['deg', 'include_bias', 'interaction_only', 'dtype'],
[(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64)])
def test_polynomial_features_csr_X_floats(deg, include_bias,
interaction_only, dtype):
X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(deg, include_bias=include_bias,
interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(['zero_row_index', 'deg', 'interaction_only'],
[(0, 2, True), (1, 2, True), (2, 2, True),
(0, 3, True), (1, 3, True), (2, 3, True),
(0, 2, False), (1, 2, False), (2, 2, False),
(0, 3, False), (1, 3, False), (2, 3, False)])
def test_polynomial_features_csr_X_zero_row(zero_row_index, deg,
interaction_only):
X_csr = sparse_random(3, 10, 1.0, random_state=0).tocsr()
X_csr[zero_row_index, :] = 0.0
X = X_csr.toarray()
est = PolynomialFeatures(deg, include_bias=False,
interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
# This degree should always be one more than the highest degree supported by
# _csr_expansion.
@pytest.mark.parametrize(['include_bias', 'interaction_only'],
[(True, True), (True, False),
(False, True), (False, False)])
def test_polynomial_features_csr_X_degree_4(include_bias, interaction_only):
X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(4, include_bias=include_bias,
interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(['deg', 'dim', 'interaction_only'],
[(2, 1, True),
(2, 2, True),
(3, 1, True),
(3, 2, True),
(3, 3, True),
(2, 1, False),
(2, 2, False),
(3, 1, False),
(3, 2, False),
(3, 3, False)])
def test_polynomial_features_csr_X_dim_edges(deg, dim, interaction_only):
X_csr = sparse_random(1000, dim, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(deg, interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert scaler.n_samples_seen_ == X.shape[0]
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert scaler.n_samples_seen_ == X.shape[0]
def test_standard_scaler_dtype():
# Ensure scaling does not affect dtype
rng = np.random.RandomState(0)
n_samples = 10
n_features = 3
for dtype in [np.float16, np.float32, np.float64]:
X = rng.randn(n_samples, n_features).astype(dtype)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X)
assert X.dtype == X_scaled.dtype
assert scaler.mean_.dtype == np.float64
assert scaler.scale_.dtype == np.float64
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.full(8, np.log(1e-5), dtype=np.float64)
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.full(10, np.log(1e-5), dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.full(10, 1e-100, dtype=np.float64)
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.full(10, 1e100, dtype=np.float64)
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert scaler.n_samples_seen_ == n_samples
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert X_scaled is not X
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert X_scaled is not X
X_scaled = scaler.fit(X).transform(X, copy=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is X
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
def test_scaler_float16_overflow():
# Test if the scaler will not overflow on float16 numpy arrays
rng = np.random.RandomState(0)
# float16 has a maximum of 65500.0. On the worst case 5 * 200000 is 100000
# which is enough to overflow the data type
X = rng.uniform(5, 10, [200000, 1]).astype(np.float16)
with np.errstate(over='raise'):
scaler = StandardScaler().fit(X)
X_scaled = scaler.transform(X)
# Calculate the float64 equivalent to verify result
X_scaled_f64 = StandardScaler().fit_transform(X.astype(np.float64))
# Overflow calculations may cause -inf, inf, or nan. Since there is no nan
# input, all of the outputs should be finite. This may be redundant since a
# FloatingPointError exception will be thrown on overflow above.
assert np.all(np.isfinite(X_scaled))
# The normal distribution is very unlikely to go above 4. At 4.0-8.0 the
# float16 precision is 2^-8 which is around 0.004. Thus only 2 decimals are
# checked to account for precision differences.
assert_array_almost_equal(X_scaled, X_scaled_f64, decimal=2)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert not s1[0] == s2[0]
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert scaler_batch.var_ == scaler_incr.var_ # Nones
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert scaler.mean_ is not None
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.finfo(float).eps
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert (i + 1) == scaler_incr.n_samples_seen_
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert scaler.n_samples_seen_ == X.shape[0]
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert X_scaled.min() >= 0.
assert X_scaled.max() <= 1.
assert scaler.n_samples_seen_ == X.shape[0]
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert not np.any(np.isnan(X_csr_scaled.data))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert not np.any(np.isnan(X_csc_scaled.data))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert X_scaled is not X
assert X_csr_scaled is not X_csr
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert X_csr_scaled_back is not X_csr
assert X_csr_scaled_back is not X_csr_scaled
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert X_csc_scaled_back is not X_csc
assert X_csc_scaled_back is not X_csc_scaled
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
@pytest.mark.parametrize("with_mean", [True, False])
@pytest.mark.parametrize("with_std", [True, False])
@pytest.mark.parametrize("array_constructor",
[np.asarray, sparse.csc_matrix, sparse.csr_matrix])
def test_scaler_n_samples_seen_with_nan(with_mean, with_std,
array_constructor):
X = np.array([[0, 1, 3],
[np.nan, 6, 10],
[5, 4, np.nan],
[8, 0, np.nan]],
dtype=np.float64)
X = array_constructor(X)
if sparse.issparse(X) and with_mean:
pytest.skip("'with_mean=True' cannot be used with sparse matrix.")
transformer = StandardScaler(with_mean=with_mean, with_std=with_std)
transformer.fit(X)
assert_array_equal(transformer.n_samples_seen_, np.array([3, 4, 2]))
def _check_identity_scalers_attributes(scaler_1, scaler_2):
assert scaler_1.mean_ is scaler_2.mean_ is None
assert scaler_1.var_ is scaler_2.var_ is None
assert scaler_1.scale_ is scaler_2.scale_ is None
assert scaler_1.n_samples_seen_ == scaler_2.n_samples_seen_
def test_scaler_return_identity():
# test that the scaler return identity when with_mean and with_std are
# False
X_dense = np.array([[0, 1, 3],
[5, 6, 0],
[8, 0, 10]],
dtype=np.float64)
X_csr = sparse.csr_matrix(X_dense)
X_csc = X_csr.tocsc()
transformer_dense = StandardScaler(with_mean=False, with_std=False)
X_trans_dense = transformer_dense.fit_transform(X_dense)
transformer_csr = clone(transformer_dense)
X_trans_csr = transformer_csr.fit_transform(X_csr)
transformer_csc = clone(transformer_dense)
X_trans_csc = transformer_csc.fit_transform(X_csc)
assert_allclose_dense_sparse(X_trans_csr, X_csr)
assert_allclose_dense_sparse(X_trans_csc, X_csc)
assert_allclose(X_trans_dense, X_dense)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
transformer_dense.partial_fit(X_dense)
transformer_csr.partial_fit(X_csr)
transformer_csc.partial_fit(X_csc)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
transformer_dense.fit(X_dense)
transformer_csr.fit(X_csr)
transformer_csc.fit(X_csc)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert not np.any(np.isnan(X_csr_scaled.data))
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert not np.any(np.isnan(X_csc_scaled.data))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert X_scaled is not X
assert X_csr_scaled is not X_csr
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert X_csr_scaled_back is not X_csr
assert X_csr_scaled_back is not X_csr_scaled
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert X_csc_scaled_back is not X_csc
assert X_csc_scaled_back is not X_csc_scaled
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [[np.inf, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains infinity or a value too large",
scale, X)
def test_robust_scaler_error_sparse():
X_sparse = sparse.rand(1000, 10)
scaler = RobustScaler(with_centering=True)
err_msg = "Cannot center sparse matrices"
with pytest.raises(ValueError, match=err_msg):
scaler.fit(X_sparse)
@pytest.mark.parametrize("with_centering", [True, False])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("X", [np.random.randn(10, 3),
sparse.rand(10, 3, density=0.5)])
def test_robust_scaler_attributes(X, with_centering, with_scaling):
# check consistent type of attributes
if with_centering and sparse.issparse(X):
pytest.skip("RobustScaler cannot center sparse matrix")
scaler = RobustScaler(with_centering=with_centering,
with_scaling=with_scaling)
scaler.fit(X)
if with_centering:
assert isinstance(scaler.center_, np.ndarray)
else:
assert scaler.center_ is None
if with_scaling:
assert isinstance(scaler.scale_, np.ndarray)
else:
assert scaler.scale_ is None
def test_robust_scaler_col_zero_sparse():
# check that the scaler is working when there is not data materialized in a
# column of a sparse matrix
X = np.random.randn(10, 5)
X[:, 0] = 0
X = sparse.csr_matrix(X)
scaler = RobustScaler(with_centering=False)
scaler.fit(X)
assert scaler.scale_[0] == pytest.approx(1)
X_trans = scaler.transform(X)
assert_allclose(X[:, 0].toarray(), X_trans[:, 0].toarray())
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
@pytest.mark.parametrize("density", [0, 0.05, 0.1, 0.5, 1])
@pytest.mark.parametrize("strictly_signed",
['positive', 'negative', 'zeros', None])
def test_robust_scaler_equivalence_dense_sparse(density, strictly_signed):
# Check the equivalence of the fitting with dense and sparse matrices
X_sparse = sparse.rand(1000, 5, density=density).tocsc()
if strictly_signed == 'positive':
X_sparse.data = np.abs(X_sparse.data)
elif strictly_signed == 'negative':
X_sparse.data = - np.abs(X_sparse.data)
elif strictly_signed == 'zeros':
X_sparse.data = np.zeros(X_sparse.data.shape, dtype=np.float64)
X_dense = X_sparse.toarray()
scaler_sparse = RobustScaler(with_centering=False)
scaler_dense = RobustScaler(with_centering=False)
scaler_sparse.fit(X_sparse)
scaler_dense.fit(X_dense)
assert_allclose(scaler_sparse.scale_, scaler_dense.scale_)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_quantile_transform_iris():
X = iris.data
# uniform output distribution
transformer = QuantileTransformer(n_quantiles=30)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# normal output distribution
transformer = QuantileTransformer(n_quantiles=30,
output_distribution='normal')
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure it is possible to take the inverse of a sparse matrix
# which contain negative value; this is the case in the iris dataset
X_sparse = sparse.csc_matrix(X)
X_sparse_tran = transformer.fit_transform(X_sparse)
X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran)
assert_array_almost_equal(X_sparse.A, X_sparse_tran_inv.A)
def test_quantile_transform_check_error():
X = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X = sparse.csc_matrix(X)
X_neg = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[-2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X_neg = sparse.csc_matrix(X_neg)
assert_raises_regex(ValueError, "Invalid value for 'n_quantiles': 0.",
QuantileTransformer(n_quantiles=0).fit, X)
assert_raises_regex(ValueError, "Invalid value for 'subsample': 0.",
QuantileTransformer(subsample=0).fit, X)
assert_raises_regex(ValueError, "The number of quantiles cannot be"
" greater than the number of samples used. Got"
" 1000 quantiles and 10 samples.",
QuantileTransformer(subsample=10).fit, X)
transformer = QuantileTransformer(n_quantiles=10)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.fit, X_neg)
transformer.fit(X)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.transform, X_neg)
X_bad_feat = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.transform, X_bad_feat)
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.inverse_transform, X_bad_feat)
transformer = QuantileTransformer(n_quantiles=10,
output_distribution='rnd')
# check that an error is raised at fit time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.fit, X)
# check that an error is raised at transform time
transformer.output_distribution = 'uniform'
transformer.fit(X)
X_tran = transformer.transform(X)
transformer.output_distribution = 'rnd'
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.transform, X)
# check that an error is raised at inverse_transform time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.inverse_transform, X_tran)
# check that an error is raised if input is scalar
assert_raise_message(ValueError,
'Expected 2D array, got scalar array instead',
transformer.transform, 10)
# check that a warning is raised is n_quantiles > n_samples
transformer = QuantileTransformer(n_quantiles=100)
warn_msg = "n_quantiles is set to n_samples"
with pytest.warns(UserWarning, match=warn_msg) as record:
transformer.fit(X)
assert len(record) == 1
assert transformer.n_quantiles_ == X.shape[0]
def test_quantile_transform_sparse_ignore_zeros():
X = np.array([[0, 1],
[0, 0],
[0, 2],
[0, 2],
[0, 1]])
X_sparse = sparse.csc_matrix(X)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
# dense case -> warning raise
assert_warns_message(UserWarning, "'ignore_implicit_zeros' takes effect"
" only with sparse matrix. This parameter has no"
" effect.", transformer.fit, X)
X_expected = np.array([[0, 0],
[0, 0],
[0, 1],
[0, 1],
[0, 0]])
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
# consider the case where sparse entries are missing values and user-given
# zeros are to be considered
X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0., 0.5],
[0., 0.],
[0., 1.],
[0., 1.],
[0., 0.5],
[0., 0.],
[0., 0.5],
[0., 1.],
[0., 0.]])
assert_almost_equal(X_expected, X_trans.A)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0, 1],
[0, 0.375],
[0, 0.375],
[0, 0.375],
[0, 1],
[0, 0],
[0, 1]])
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
# check in conjunction with subsampling
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5,
subsample=8,
random_state=0)
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
def test_quantile_transform_dense_toy():
X = np.array([[0, 2, 2.6],
[25, 4, 4.1],
[50, 6, 2.3],
[75, 8, 9.5],
[100, 10, 0.1]])
transformer = QuantileTransformer(n_quantiles=5)
transformer.fit(X)
# using the a uniform output, each entry of X should be map between 0 and 1
# and equally spaced
X_trans = transformer.fit_transform(X)
X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T
assert_almost_equal(np.sort(X_trans, axis=0), X_expected)
X_test = np.array([
[-1, 1, 0],
[101, 11, 10],
])
X_expected = np.array([
[0, 0, 0],
[1, 1, 1],
])
assert_array_almost_equal(transformer.transform(X_test), X_expected)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_quantile_transform_subsampling():
# Test that subsampling the input yield to a consistent results We check
# that the computed quantiles are almost mapped to a [0, 1] vector where
# values are equally spaced. The infinite norm is checked to be smaller
# than a given threshold. This is repeated 5 times.
# dense support
n_samples = 1000000
n_quantiles = 1000
X = np.sort(np.random.sample((n_samples, 1)), axis=0)
ROUND = 5
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert inf_norm < 1e-2
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr)
# sparse support
X = sparse.rand(n_samples, 1, density=.99, format='csc', random_state=0)
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert inf_norm < 1e-1
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr)
def test_quantile_transform_sparse_toy():
X = np.array([[0., 2., 0.],
[25., 4., 0.],
[50., 0., 2.6],
[0., 0., 4.1],
[0., 6., 0.],
[0., 8., 0.],
[75., 0., 2.3],
[0., 10., 0.],
[0., 0., 9.5],
[100., 0., 0.1]])
X = sparse.csc_matrix(X)
transformer = QuantileTransformer(n_quantiles=10)
transformer.fit(X)
X_trans = transformer.fit_transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
transformer_dense = QuantileTransformer(n_quantiles=10).fit(
X.toarray())
X_trans = transformer_dense.transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer_dense.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
@pytest.mark.filterwarnings("ignore: The default value of `copy`") # 0.23
def test_quantile_transform_axis1():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5)
X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5)
assert_array_almost_equal(X_trans_a0, X_trans_a1.T)
def test_quantile_transform_bounds():
# Lower and upper bounds are manually mapped. We checked that in the case
# of a constant feature and binary feature, the bounds are properly mapped.
X_dense = np.array([[0, 0],
[0, 0],
[1, 0]])
X_sparse = sparse.csc_matrix(X_dense)
# check sparse and dense are consistent
X_trans = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_dense)
assert_array_almost_equal(X_trans, X_dense)
X_trans_sp = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_sparse)
assert_array_almost_equal(X_trans_sp.A, X_dense)
assert_array_almost_equal(X_trans, X_trans_sp.A)
# check the consistency of the bounds by learning on 1 matrix
# and transforming another
X = np.array([[0, 1],
[0, 0.5],
[1, 0]])
X1 = np.array([[0, 0.1],
[0, 0.5],
[1, 0.1]])
transformer = QuantileTransformer(n_quantiles=3).fit(X)
X_trans = transformer.transform(X1)
assert_array_almost_equal(X_trans, X1)
# check that values outside of the range learned will be mapped properly.
X = np.random.random((1000, 1))
transformer = QuantileTransformer()
transformer.fit(X)
assert (transformer.transform([[-10]]) ==
transformer.transform([[np.min(X)]]))
assert (transformer.transform([[10]]) ==
transformer.transform([[np.max(X)]]))
assert (transformer.inverse_transform([[-10]]) ==
transformer.inverse_transform(
[[np.min(transformer.references_)]]))
assert (transformer.inverse_transform([[10]]) ==
transformer.inverse_transform(
[[np.max(transformer.references_)]]))
def test_quantile_transform_and_inverse():
X_1 = iris.data
X_2 = np.array([[0.], [BOUNDS_THRESHOLD / 10], [1.5], [2], [3], [3], [4]])
for X in [X_1, X_2]:
transformer = QuantileTransformer(n_quantiles=1000, random_state=0)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv, decimal=9)
def test_quantile_transform_nan():
X = np.array([[np.nan, 0, 0, 1],
[np.nan, np.nan, 0, 0.5],
[np.nan, 1, 1, 0]])
transformer = QuantileTransformer(n_quantiles=10, random_state=42)
transformer.fit_transform(X)
# check that the quantile of the first column is all NaN
assert np.isnan(transformer.quantiles_[:, 0]).all()
# all other column should not contain NaN
assert not np.isnan(transformer.quantiles_[:, 1:]).any()
def test_deprecated_quantile_transform_copy():
future_message = ("The default value of `copy` will change from False to "
"True in 0.23 in order to make it more consistent with "
"the default `copy` values of other functions in "
":mod:`sklearn.preprocessing.data` and prevent "
"unexpected side effects by modifying the value of `X` "
"inplace. To avoid inplace modifications of `X`, it is "
"recommended to explicitly set `copy=True`")
assert_warns_message(FutureWarning, future_message, quantile_transform,
np.array([[0, 1], [0, 0.5], [1, 0]]))
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
assert_raises_regex(ValueError, r'Invalid quantile range: \(',
scaler.fit, iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert not np.any(np.isnan(X_scaled))
X_csr_scaled = scale(X_csr, with_mean=False)
assert not np.any(np.isnan(X_csr_scaled.data))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_1d_array():
X = iris.data[:, 1]
X_trans = robust_scale(X)
assert_array_almost_equal(np.median(X_trans), 0)
q = np.percentile(X_trans, q=(25, 75))
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert scaler.n_samples_seen_ == X.shape[0]
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert scaler.n_samples_seen_ == X.shape[0]
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert (scaler_batch.n_samples_seen_ ==
scaler_incr_csr.n_samples_seen_)
assert (scaler_batch.n_samples_seen_ ==
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert X_norm is not X
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert X_norm is X
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert X_norm1 is not X
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert X_norm2 is X
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert X_norm1 is not X
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert X_norm2 is X
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert X_norm.dtype == dtype
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
# Test return_norm
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
_, norms = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
assert_raises(NotImplementedError, normalize, X_sparse,
norm=norm, return_norm=True)
_, norms = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert np.sum(X_bin == 0) == 4
assert np.sum(X_bin == 1) == 2
X_bin = binarizer.transform(X)
assert sparse.issparse(X) == sparse.issparse(X_bin)
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert X_bin is not X
assert np.sum(X_bin == 0) == 2
assert np.sum(X_bin == 1) == 4
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert X_bin is not X
X_bin = toarray(X_bin)
assert np.sum(X_bin == 0) == 2
assert np.sum(X_bin == 1) == 4
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert X_bin is X
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert X_bin is X_float
X_bin = toarray(X_bin)
assert np.sum(X_bin == 0) == 2
assert np.sum(X_bin == 1) == 4
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert np.sum(X_bin == 0) == 1
assert np.sum(X_bin == 1) == 5
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_cv_pipeline_precomputed():
# Cross-validate a regression on four coplanar points with the same
# value. Use precomputed kernel to ensure Pipeline with KernelCenterer
# is treated as a _pairwise operation.
X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])
y_true = np.ones((4,))
K = X.dot(X.T)
kcent = KernelCenterer()
pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())])
# did the pipeline set the _pairwise attribute?
assert pipeline._pairwise
# test cross-validation, score should be almost perfect
# NB: this test is pretty vacuous -- it's mainly to test integration
# of Pipeline and KernelCenterer
y_pred = cross_val_predict(pipeline, K, y_true, cv=2)
assert_array_almost_equal(y_true, y_pred)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert sparse.isspmatrix_coo(X), X
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert sparse.isspmatrix_csc(X), X
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert sparse.isspmatrix_csr(X), X
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
@pytest.mark.filterwarnings("ignore: The default value of `copy`") # 0.23
def test_quantile_transform_valid_axis():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
assert_raises_regex(ValueError, "axis should be either equal to 0 or 1"
". Got axis=2", quantile_transform, X.T, axis=2)
@pytest.mark.parametrize("method", ['box-cox', 'yeo-johnson'])
def test_power_transformer_notfitted(method):
pt = PowerTransformer(method=method)
X = np.abs(X_1col)
assert_raises(NotFittedError, pt.transform, X)
assert_raises(NotFittedError, pt.inverse_transform, X)
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
@pytest.mark.parametrize('standardize', [True, False])
@pytest.mark.parametrize('X', [X_1col, X_2d])
def test_power_transformer_inverse(method, standardize, X):
# Make sure we get the original input when applying transform and then
# inverse transform
X = np.abs(X) if method == 'box-cox' else X
pt = PowerTransformer(method=method, standardize=standardize)
X_trans = pt.fit_transform(X)
assert_almost_equal(X, pt.inverse_transform(X_trans))
def test_power_transformer_1d():
X = np.abs(X_1col)
for standardize in [True, False]:
pt = PowerTransformer(method='box-cox', standardize=standardize)
X_trans = pt.fit_transform(X)
X_trans_func = power_transform(
X, method='box-cox',
standardize=standardize
)
X_expected, lambda_expected = stats.boxcox(X.flatten())
if standardize:
X_expected = scale(X_expected)
assert_almost_equal(X_expected.reshape(-1, 1), X_trans)
assert_almost_equal(X_expected.reshape(-1, 1), X_trans_func)
assert_almost_equal(X, pt.inverse_transform(X_trans))
assert_almost_equal(lambda_expected, pt.lambdas_[0])
assert len(pt.lambdas_) == X.shape[1]
assert isinstance(pt.lambdas_, np.ndarray)
def test_power_transformer_2d():
X = np.abs(X_2d)
for standardize in [True, False]:
pt = PowerTransformer(method='box-cox', standardize=standardize)
X_trans_class = pt.fit_transform(X)
X_trans_func = power_transform(
X, method='box-cox',
standardize=standardize
)
for X_trans in [X_trans_class, X_trans_func]:
for j in range(X_trans.shape[1]):
X_expected, lmbda = stats.boxcox(X[:, j].flatten())
if standardize:
X_expected = scale(X_expected)
assert_almost_equal(X_trans[:, j], X_expected)
assert_almost_equal(lmbda, pt.lambdas_[j])
# Test inverse transformation
X_inv = pt.inverse_transform(X_trans)
assert_array_almost_equal(X_inv, X)
assert len(pt.lambdas_) == X.shape[1]
assert isinstance(pt.lambdas_, np.ndarray)
def test_power_transformer_boxcox_strictly_positive_exception():
# Exceptions should be raised for negative arrays and zero arrays when
# method is boxcox
pt = PowerTransformer(method='box-cox')
pt.fit(np.abs(X_2d))
X_with_negatives = X_2d
not_positive_message = 'strictly positive'
assert_raise_message(ValueError, not_positive_message,
pt.transform, X_with_negatives)
assert_raise_message(ValueError, not_positive_message,
pt.fit, X_with_negatives)
assert_raise_message(ValueError, not_positive_message,
power_transform, X_with_negatives, 'box-cox')
assert_raise_message(ValueError, not_positive_message,
pt.transform, np.zeros(X_2d.shape))
assert_raise_message(ValueError, not_positive_message,
pt.fit, np.zeros(X_2d.shape))
assert_raise_message(ValueError, not_positive_message,
power_transform, np.zeros(X_2d.shape), 'box-cox')
@pytest.mark.parametrize('X', [X_2d, np.abs(X_2d), -np.abs(X_2d),
np.zeros(X_2d.shape)])
def test_power_transformer_yeojohnson_any_input(X):
# Yeo-Johnson method should support any kind of input
power_transform(X, method='yeo-johnson')
@pytest.mark.parametrize("method", ['box-cox', 'yeo-johnson'])
def test_power_transformer_shape_exception(method):
pt = PowerTransformer(method=method)
X = np.abs(X_2d)
pt.fit(X)
# Exceptions should be raised for arrays with different num_columns
# than during fitting
wrong_shape_message = 'Input data has a different number of features'
assert_raise_message(ValueError, wrong_shape_message,
pt.transform, X[:, 0:1])
assert_raise_message(ValueError, wrong_shape_message,
pt.inverse_transform, X[:, 0:1])
def test_power_transformer_method_exception():
pt = PowerTransformer(method='monty-python')
X = np.abs(X_2d)
# An exception should be raised if PowerTransformer.method isn't valid
bad_method_message = "'method' must be one of"
assert_raise_message(ValueError, bad_method_message,
pt.fit, X)
def test_power_transformer_lambda_zero():
pt = PowerTransformer(method='box-cox', standardize=False)
X = np.abs(X_2d)[:, 0:1]
# Test the lambda = 0 case
pt.lambdas_ = np.array([0])
X_trans = pt.transform(X)
assert_array_almost_equal(pt.inverse_transform(X_trans), X)
def test_power_transformer_lambda_one():
# Make sure lambda = 1 corresponds to the identity for yeo-johnson
pt = PowerTransformer(method='yeo-johnson', standardize=False)
X = np.abs(X_2d)[:, 0:1]
pt.lambdas_ = np.array([1])
X_trans = pt.transform(X)
assert_array_almost_equal(X_trans, X)
@pytest.mark.parametrize("method, lmbda", [('box-cox', .1),
('box-cox', .5),
('yeo-johnson', .1),
('yeo-johnson', .5),
('yeo-johnson', 1.),
])
def test_optimization_power_transformer(method, lmbda):
# Test the optimization procedure:
# - set a predefined value for lambda
# - apply inverse_transform to a normal dist (we get X_inv)
# - apply fit_transform to X_inv (we get X_inv_trans)
# - check that X_inv_trans is roughly equal to X
rng = np.random.RandomState(0)
n_samples = 20000
X = rng.normal(loc=0, scale=1, size=(n_samples, 1))
pt = PowerTransformer(method=method, standardize=False)
pt.lambdas_ = [lmbda]
X_inv = pt.inverse_transform(X)
pt = PowerTransformer(method=method, standardize=False)
X_inv_trans = pt.fit_transform(X_inv)
assert_almost_equal(0, np.linalg.norm(X - X_inv_trans) / n_samples,
decimal=2)
assert_almost_equal(0, X_inv_trans.mean(), decimal=1)
assert_almost_equal(1, X_inv_trans.std(), decimal=1)
def test_yeo_johnson_darwin_example():
# test from original paper "A new family of power transformations to
# improve normality or symmetry" by Yeo and Johnson.
X = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3,
7.5, -6.0]
X = np.array(X).reshape(-1, 1)
lmbda = PowerTransformer(method='yeo-johnson').fit(X).lambdas_
assert np.allclose(lmbda, 1.305, atol=1e-3)
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
def test_power_transformer_nans(method):
# Make sure lambda estimation is not influenced by NaN values
# and that transform() supports NaN silently
X = np.abs(X_1col)
pt = PowerTransformer(method=method)
pt.fit(X)
lmbda_no_nans = pt.lambdas_[0]
# concat nans at the end and check lambda stays the same
X = np.concatenate([X, np.full_like(X, np.nan)])
X = shuffle(X, random_state=0)
pt.fit(X)
lmbda_nans = pt.lambdas_[0]
assert_almost_equal(lmbda_no_nans, lmbda_nans, decimal=5)
X_trans = pt.transform(X)
assert_array_equal(np.isnan(X_trans), np.isnan(X))
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
@pytest.mark.parametrize('standardize', [True, False])
def test_power_transformer_fit_transform(method, standardize):
# check that fit_transform() and fit().transform() return the same values
X = X_1col
if method == 'box-cox':
X = np.abs(X)
pt = PowerTransformer(method, standardize)
assert_array_almost_equal(pt.fit(X).transform(X), pt.fit_transform(X))
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
@pytest.mark.parametrize('standardize', [True, False])
def test_power_transformer_copy_True(method, standardize):
# Check that neither fit, transform, fit_transform nor inverse_transform
# modify X inplace when copy=True
X = X_1col
if method == 'box-cox':
X = np.abs(X)
X_original = X.copy()
assert X is not X_original # sanity checks
assert_array_almost_equal(X, X_original)
pt = PowerTransformer(method, standardize, copy=True)
pt.fit(X)
assert_array_almost_equal(X, X_original)
X_trans = pt.transform(X)
assert X_trans is not X
X_trans = pt.fit_transform(X)
assert_array_almost_equal(X, X_original)
assert X_trans is not X
X_inv_trans = pt.inverse_transform(X_trans)
assert X_trans is not X_inv_trans
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
@pytest.mark.parametrize('standardize', [True, False])
def test_power_transformer_copy_False(method, standardize):
# check that when copy=False fit doesn't change X inplace but transform,
# fit_transform and inverse_transform do.
X = X_1col
if method == 'box-cox':
X = np.abs(X)
X_original = X.copy()
assert X is not X_original # sanity checks
assert_array_almost_equal(X, X_original)
pt = PowerTransformer(method, standardize, copy=False)
pt.fit(X)
assert_array_almost_equal(X, X_original) # fit didn't change X
X_trans = pt.transform(X)
assert X_trans is X
if method == 'box-cox':
X = np.abs(X)
X_trans = pt.fit_transform(X)
assert X_trans is X
X_inv_trans = pt.inverse_transform(X_trans)
assert X_trans is X_inv_trans
def test_power_transform_default_method():
X = np.abs(X_2d)
future_warning_message = (
"The default value of 'method' "
"will change from 'box-cox'"
)
assert_warns_message(FutureWarning, future_warning_message,
power_transform, X)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
X_trans_default = power_transform(X)
X_trans_boxcox = power_transform(X, method='box-cox')
assert_array_equal(X_trans_boxcox, X_trans_default)
| bsd-3-clause |
numpy/numpy | numpy/core/fromnumeric.py | 2 | 122777 | """Module containing non-deprecated functions borrowed from Numeric.
"""
import functools
import types
import warnings
import numpy as np
from . import multiarray as mu
from . import overrides
from . import umath as um
from . import numerictypes as nt
from .multiarray import asarray, array, asanyarray, concatenate
from . import _methods
_dt_ = nt.sctype2char
# functions that are methods
__all__ = [
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
_gentype = types.GeneratorType
# save away Python sum
_sum_ = sum
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
bound = getattr(obj, method, None)
if bound is None:
return _wrapit(obj, method, *args, **kwds)
try:
return bound(*args, **kwds)
except TypeError:
# A TypeError occurs if the object does have such a method in its
# class, but its signature is not identical to that of NumPy's. This
# situation has occurred in the case of a downstream library like
# 'pandas'.
#
# Call _wrapit from within the except clause to ensure a potential
# exception has a traceback chain.
return _wrapit(obj, method, *args, **kwds)
def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
passkwargs = {k: v for k, v in kwargs.items()
if v is not np._NoValue}
if type(obj) is not mu.ndarray:
try:
reduction = getattr(obj, method)
except AttributeError:
pass
else:
# This branch is needed for reductions like any which don't
# support a dtype.
if dtype is not None:
return reduction(axis=axis, dtype=dtype, out=out, **passkwargs)
else:
return reduction(axis=axis, out=out, **passkwargs)
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
def _take_dispatcher(a, indices, axis=None, out=None, mode=None):
return (a, out)
@array_function_dispatch(_take_dispatcher)
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : array_like (Ni..., M, Nk...)
The source array.
indices : array_like (Nj...)
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional (Ni..., Nj..., Nk...)
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype. Note that `out` is always
buffered if `mode='raise'`; use other modes for better performance.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray (Ni..., Nj..., Nk...)
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
take_along_axis : Take elements by matching the array and the index arrays
Notes
-----
By eliminating the inner loop in the description above, and using `s_` to
build simple slice objects, `take` can be expressed in terms of applying
fancy indexing to each 1-d slice::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
for ii in ndindex(Ni):
for kk in ndindex(Nj):
out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices]
For this reason, it is equivalent to (but faster than) the following use
of `apply_along_axis`::
out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a)
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
def _reshape_dispatcher(a, newshape, order=None):
return (a,)
# not deprecated --- copy if necessary, view otherwise
@array_function_dispatch(_reshape_dispatcher)
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to read / write the
elements using Fortran-like index order, with the first index
changing fastest, and the last index changing slowest. Note that
the 'C' and 'F' options take no account of the memory layout of
the underlying array, and only refer to the order of indexing.
'A' means to read / write the elements in Fortran-like index
order if `a` is Fortran *contiguous* in memory, C-like order
otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy. Note there is no guarantee of the *memory layout* (C- or
Fortran- contiguous) of the returned array.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raised when the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose makes the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modifying
# the initial object.
>>> c = b.view()
>>> c.shape = (20)
Traceback (most recent call last):
...
AttributeError: Incompatible shape for in-place modification. Use
`.reshape()` to make a copy with the desired shape.
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
You can think of reshaping as first raveling the array (using the given
index order), then inserting the elements from the raveled array into the
new array using the same kind of index ordering as was used for the
raveling.
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
array([[0, 4, 3],
[2, 1, 5]])
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
array([[0, 4, 3],
[2, 1, 5]])
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
return _wrapfunc(a, 'reshape', newshape, order=order)
def _choose_dispatcher(a, choices, out=None, mode=None):
yield a
yield from choices
yield out
@array_function_dispatch(_choose_dispatcher)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a list of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of ``n`` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each ``i``. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode='raise'`` (the default), then, first of all, each element of
``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose
that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)``
position in ``Ba`` - then the value at the same position in the new array
is the value in ``Bchoices[i]`` at that same position;
* if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed)
integer; negative integers are mapped to 0; values greater than ``n-1``
are mapped to ``n-1``; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in ``[0, n-1]``, where ``n`` is the
number of choices, unless ``mode=wrap`` or ``mode=clip``, in which
cases any integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype. Note that `out` is always
buffered if ``mode='raise'``; use other modes for better performance.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside ``[0, n-1]`` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod ``n``
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
numpy.take_along_axis : Preferable if `choices` is an array
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
def _repeat_dispatcher(a, repeats, axis=None):
return (a,)
@array_function_dispatch(_repeat_dispatcher)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
unique : Find the unique elements of an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _wrapfunc(a, 'repeat', repeats, axis=axis)
def _put_dispatcher(a, ind, v, mode=None):
return (a, ind, v)
@array_function_dispatch(_put_dispatcher)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers. In 'raise' mode,
if an exception occurs the target array may still be modified.
See Also
--------
putmask, place
put_along_axis : Put elements by matching the array and the index arrays
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
try:
put = a.put
except AttributeError as e:
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(a).__name__)) from e
return put(ind, v, mode=mode)
def _swapaxes_dispatcher(a, axis1, axis2):
return (a,)
@array_function_dispatch(_swapaxes_dispatcher)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier NumPy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
return _wrapfunc(a, 'swapaxes', axis1, axis2)
def _transpose_dispatcher(a, axes=None):
return (a,)
@array_function_dispatch(_transpose_dispatcher)
def transpose(a, axes=None):
"""
Reverse or permute the axes of an array; returns the modified array.
For an array a with two axes, transpose(a) gives the matrix transpose.
Refer to `numpy.ndarray.transpose` for full documentation.
Parameters
----------
a : array_like
Input array.
axes : tuple or list of ints, optional
If specified, it must be a tuple or list which contains a permutation of
[0,1,..,N-1] where N is the number of axes of a. The i'th axis of the
returned array will correspond to the axis numbered ``axes[i]`` of the
input. If not specified, defaults to ``range(a.ndim)[::-1]``, which
reverses the order of the axes.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
ndarray.transpose : Equivalent method
moveaxis
argsort
Notes
-----
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
Transposing a 1-D array returns an unchanged view of the original array.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
>>> x = np.ones((2, 3, 4, 5))
>>> np.transpose(x).shape
(5, 4, 3, 2)
"""
return _wrapfunc(a, 'transpose', axes)
def _partition_dispatcher(a, kth, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_partition_dispatcher)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
Creates a copy of the array with its elements rearranged in such a
way that the value of the element in k-th position is in the
position it would be in a sorted array. All elements smaller than
the k-th element are moved before this element and all equal or
greater are moved behind it. The ordering of the elements in the two
partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to be sorted.
kth : int or sequence of ints
Element index to partition by. The k-th value of the element
will be in its final sorted position and all smaller elements
will be moved before it and all equal or greater elements behind
it. The order of all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string. Not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
partitioned_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.partition : Method to sort an array in-place.
argpartition : Indirect partition.
sort : Full sorting
Notes
-----
The various selection algorithms are characterized by their average
speed, worst case performance, work space size, and whether they are
stable. A stable sort keeps items with the same key in the same
relative order. The available algorithms have the following
properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
================= ======= ============= ============ =======
'introselect' 1 O(n) 0 no
================= ======= ============= ============ =======
All the partition algorithms make temporary copies of the data when
partitioning along any but the last axis. Consequently,
partitioning along the last axis is faster and uses less space than
partitioning along any other axis.
The sort order for complex numbers is lexicographic. If both the
real and imaginary parts are non-nan then the order is determined by
the real parts except when they are equal, in which case the order
is determined by the imaginary parts.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> np.partition(a, 3)
array([2, 1, 3, 4])
>>> np.partition(a, (1, 3))
array([1, 2, 3, 4])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
else:
a = asanyarray(a).copy(order="K")
a.partition(kth, axis=axis, kind=kind, order=order)
return a
def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_argpartition_dispatcher)
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the
algorithm specified by the `kind` keyword. It returns an array of
indices of the same shape as `a` that index data along the given
axis in partitioned order.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to sort.
kth : int or sequence of ints
Element index to partition by. The k-th element will be in its
final sorted position and all smaller elements will be moved
before it and all larger elements behind it. The order all
elements in the partitions is undefined. If provided with a
sequence of k-th it will partition all of them into their sorted
position at once.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If
None, the flattened array is used.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string, and not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.
More generally, ``np.take_along_axis(a, index_array, axis=a)`` always
yields the partitioned `a`, irrespective of dimensionality.
See Also
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
argsort : Full indirect sort.
take_along_axis : Apply ``index_array`` from argpartition
to an array as if by calling partition.
Notes
-----
See `partition` for notes on the different selection algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
Multi-dimensional array:
>>> x = np.array([[3, 4, 2], [1, 3, 1]])
>>> index_array = np.argpartition(x, kth=1, axis=-1)
>>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1)
array([[2, 3, 4],
[1, 1, 3]])
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
def _sort_dispatcher(a, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_sort_dispatcher)
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort or radix sort under the covers and, in general,
the actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The four algorithms implemented in NumPy have the following
properties:
=========== ======= ============= ============ ========
kind speed worst case work space stable
=========== ======= ============= ============ ========
'quicksort' 1 O(n^2) 0 no
'heapsort' 3 O(n*log(n)) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'timsort' 2 O(n*log(n)) ~n/2 yes
=========== ======= ============= ============ ========
.. note:: The datatype determines which of 'mergesort' or 'timsort'
is actually used, even if 'mergesort' is specified. User selection
at a finer scale is not currently available.
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
When sorting does not make enough progress it switches to
`heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
This implementation makes quicksort O(n*log(n)) in the worst case.
'stable' automatically chooses the best stable sorting algorithm
for the data type being sorted.
It, along with 'mergesort' is currently mapped to
`timsort <https://en.wikipedia.org/wiki/Timsort>`_
or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
depending on the data type.
API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
.. versionadded:: 1.17.0
Timsort is added for better performance on already or nearly
sorted data. On random data timsort is almost identical to
mergesort. It is now used for stable sort while quicksort is still the
default sort if none is chosen. For timsort details, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
.. versionchanged:: 1.18.0
NaT now sorts to the end of arrays for consistency with NaN.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
else:
a = asanyarray(a).copy(order="K")
a.sort(axis=axis, kind=kind, order=order)
return a
def _argsort_dispatcher(a, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_argsort_dispatcher)
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
take_along_axis : Apply ``index_array`` from argsort
to an array as if by calling sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def _argmax_dispatcher(a, axis=None, out=None):
return (a, out)
@array_function_dispatch(_argmax_dispatcher)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmax to an array as if by calling max.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
Indexes of the maximal elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
>>> ind
(1, 2)
>>> a[ind]
15
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmax(x, axis=-1)
>>> # Same as np.max(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[4],
[3]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([4, 3])
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def _argmin_dispatcher(a, axis=None, out=None):
return (a, out)
@array_function_dispatch(_argmin_dispatcher)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmin to an array as if by calling min.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
Indices of the minimum elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
>>> ind
(0, 0)
>>> a[ind]
10
>>> b = np.arange(6) + 10
>>> b[4] = 10
>>> b
array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmin(x, axis=-1)
>>> # Same as np.min(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[2],
[0]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([2, 0])
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def _searchsorted_dispatcher(a, v, side=None, sorter=None):
return (a, v, sorter)
@array_function_dispatch(_searchsorted_dispatcher)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Assuming that `a` is sorted:
====== ============================
`side` returned index `i` satisfies
====== ============================
left ``a[i-1] < v <= a[i]``
right ``a[i-1] <= v < a[i]``
====== ============================
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
This function uses the same algorithm as the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
def _resize_dispatcher(a, new_shape):
return (a,)
@array_function_dispatch(_resize_dispatcher)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated iterating over the array in C-order.
See Also
--------
np.reshape : Reshape an array without changing the total size.
np.pad : Enlarge and pad an array.
np.repeat : Repeat elements of an array.
ndarray.resize : resize an array in-place.
Notes
-----
When the total size of the array does not change `~numpy.reshape` should
be used. In most other cases either indexing (to reduce the size)
or padding (to increase the size) may be a more appropriate solution.
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, iterating
over `a` in C-order, disregarding axes (and cycling back from the start if
the new shape is larger). This functionality is therefore not suitable to
resize images, or data where each axis represents a separate and distinct
entity.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(2,3))
array([[0, 1, 2],
[3, 0, 1]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
new_size = 1
for dim_length in new_shape:
new_size *= dim_length
if dim_length < 0:
raise ValueError('all elements of `new_shape` must be non-negative')
if a.size == 0 or new_size == 0:
# First case must zero fill. The second would have repeats == 0.
return np.zeros_like(a, shape=new_shape)
repeats = -(-new_size // a.size) # ceil division
a = concatenate((a,) * repeats)[:new_size]
return reshape(a, new_shape)
def _squeeze_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_squeeze_dispatcher)
def squeeze(a, axis=None):
"""
Remove axes of length one from `a`.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the entries of length one in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`. Note that if all axes are squeezed,
the result is a 0d array and not a scalar.
Raises
------
ValueError
If `axis` is not None, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding entries of length one
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=0).shape
(3, 1)
>>> np.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
>>> x = np.array([[1234]])
>>> x.shape
(1, 1)
>>> np.squeeze(x)
array(1234) # 0d array
>>> np.squeeze(x).shape
()
>>> np.squeeze(x)[()]
1234
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze', axis=axis)
if axis is None:
return squeeze()
else:
return squeeze(axis=axis)
def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None):
return (a,)
@array_function_dispatch(_diagonal_dispatcher)
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
In versions of NumPy prior to 1.7, this function always returned a new,
independent array containing a copy of the values in the diagonal.
In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
but depending on this fact is deprecated. Writing to the resulting
array continues to work as it used to, but a FutureWarning is issued.
Starting in NumPy 1.9 it returns a read-only view on the original array.
Attempting to write to the resulting array will produce an error.
In some future release, it will return a read/write view and writing to
the returned array will alter your original array. The returned array
will have the same type as the input array.
If you don't write to the array returned by this function, then you can
just ignore all of the above.
If you depend on the current behavior, then we suggest copying the
returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
of just ``np.diagonal(a)``. This will work with both past and future
versions of NumPy.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D, then a 1-D array containing the diagonal and of the
same type as `a` is returned unless `a` is a `matrix`, in which case
a 1-D array rather than a (2-D) `matrix` is returned in order to
maintain backward compatibility.
If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
are removed, and a new axis inserted at the end corresponding to the
diagonal.
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
The anti-diagonal can be obtained by reversing the order of elements
using either `numpy.flipud` or `numpy.fliplr`.
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.fliplr(a).diagonal() # Horizontal flip
array([2, 4, 6])
>>> np.flipud(a).diagonal() # Vertical flip
array([6, 4, 2])
Note that the order in which the diagonal is retrieved varies depending
on the flip function.
"""
if isinstance(a, np.matrix):
# Make diagonal of matrix 1-D to preserve backward compatibility.
return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
else:
return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
def _trace_dispatcher(
a, offset=None, axis1=None, axis2=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_trace_dispatcher)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
else:
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
def _ravel_dispatcher(a, order=None):
return (a,)
@array_function_dispatch(_ravel_dispatcher)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
As of NumPy 1.10, the returned array will have the same type as the input
array. (for example, a masked array will be returned for a masked array
input)
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means
to index the elements in row-major, C-style order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to index the elements
in column-major, Fortran-style order, with the
first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of
the memory layout of the underlying array, and only refer to
the order of axis indexing. 'A' means to read the elements in
Fortran-like index order if `a` is Fortran *contiguous* in
memory, C-like order otherwise. 'K' means to read the
elements in the order they occur in memory, except for
reversing the data when strides are negative. By default, 'C'
index order is used.
Returns
-------
y : array_like
y is an array of the same subtype as `a`, with shape ``(a.size,)``.
Note that matrices are special cased for backward compatibility, if `a`
is a matrix, then y is a 1-D ndarray.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
ndarray.reshape : Change the shape of an array without changing its data.
Notes
-----
In row-major, C-style order, in two dimensions, the row index
varies the slowest, and the column index the quickest. This can
be generalized to multiple dimensions, where row-major order
implies that the index along the first axis varies slowest, and
the index along the last quickest. The opposite holds for
column-major, Fortran-style index ordering.
When a view is desired in as many cases as possible, ``arr.reshape(-1)``
may be preferable.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.ravel(x)
array([1, 2, 3, 4, 5, 6])
>>> x.reshape(-1)
array([1, 2, 3, 4, 5, 6])
>>> np.ravel(x, order='F')
array([1, 4, 2, 5, 3, 6])
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> np.ravel(x.T)
array([1, 4, 2, 5, 3, 6])
>>> np.ravel(x.T, order='A')
array([1, 2, 3, 4, 5, 6])
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
>>> a = np.arange(3)[::-1]; a
array([2, 1, 0])
>>> a.ravel(order='C')
array([2, 1, 0])
>>> a.ravel(order='K')
array([2, 1, 0])
>>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
array([[[ 0, 2, 4],
[ 1, 3, 5]],
[[ 6, 8, 10],
[ 7, 9, 11]]])
>>> a.ravel(order='C')
array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
>>> a.ravel(order='K')
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
if isinstance(a, np.matrix):
return asarray(a).ravel(order=order)
else:
return asanyarray(a).ravel(order=order)
def _nonzero_dispatcher(a):
return (a,)
@array_function_dispatch(_nonzero_dispatcher)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
.. note::
When called on a zero-d array or scalar, ``nonzero(a)`` is treated
as ``nonzero(atleast_1d(a))``.
.. deprecated:: 1.17.0
Use `atleast_1d` explicitly if this behavior is deliberate.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]])
>>> np.nonzero(x)
(array([0, 1, 2, 2]), array([0, 1, 0, 1]))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9])
>>> a[a > 3] # prefer this spelling
array([4, 5, 6, 7, 8, 9])
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return _wrapfunc(a, 'nonzero')
def _shape_dispatcher(a):
return (a,)
@array_function_dispatch(_shape_dispatcher)
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
len
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def _compress_dispatcher(condition, a, axis=None, out=None):
return (condition, a, out)
@array_function_dispatch(_compress_dispatcher)
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
extract : Equivalent method when working on 1-D arrays
:ref:`ufuncs-output-type`
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs):
return (a, a_min, a_max)
@array_function_dispatch(_clip_dispatcher)
def clip(a, a_min, a_max, out=None, **kwargs):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``.
No check is performed to ensure ``a_min < a_max``.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min, a_max : array_like or None
Minimum and maximum value. If ``None``, clipping is not performed on
the corresponding edge. Only one of `a_min` and `a_max` may be
``None``. Both are broadcast against `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
.. versionadded:: 1.17.0
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
:ref:`ufuncs-output-type`
Notes
-----
When `a_min` is greater than `a_max`, `clip` returns an
array in which all values are equal to `a_max`,
as shown in the second example.
Examples
--------
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> np.clip(a, 8, 1)
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs)
def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
@array_function_dispatch(_sum_dispatcher)
def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
initial=np._NoValue, where=np._NoValue):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a sum is performed on all of the axes
specified in the tuple instead of a single axis or all the axes as
before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
Starting value for the sum. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.17.0
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
add.reduce : Equivalent functionality of `add`.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
The sum of an empty array is the neutral element 0:
>>> np.sum([])
0.0
For floating point numbers the numerical precision of sum (and
``np.add.reduce``) is in general limited by directly adding each number
individually to the result causing rounding errors in every step.
However, often numpy will use a numerically better approach (partial
pairwise summation) leading to improved precision in many use-cases.
This improved precision is always provided when no ``axis`` is given.
When ``axis`` is given, it will depend on which axis is summed.
Technically, to provide the best speed possible, the improved precision
is only used when the summation is along the fast axis in memory.
Note that the exact precision may vary depending on other parameters.
In contrast to NumPy, Python's ``math.fsum`` function uses a slower but
more precise approach to summation.
Especially when summing a large number of lower precision floating point
numbers, such as ``float32``, numerical errors can become significant.
In such cases it can be advisable to use `dtype="float64"` to use a higher
precision for the output.
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
>>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1)
array([1., 5.])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
You can also start the sum with a value other than zero:
>>> np.sum([10], initial=5)
15
"""
if isinstance(a, _gentype):
# 2018-02-25, 1.15.0
warnings.warn(
"Calling np.sum(generator) is deprecated, and in the future will give a different result. "
"Use np.sum(np.fromiter(generator)) or the python sum builtin instead.",
DeprecationWarning, stacklevel=3)
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
initial=initial, where=where)
def _any_dispatcher(a, axis=None, out=None, keepdims=None, *,
where=np._NoValue):
return (a, where, out)
@array_function_dispatch(_any_dispatcher)
def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (``axis=None``) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `any` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in checking for any `True` values.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False])
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> np.any([[True, False], [False, False]], where=[[False], [True]])
False
>>> o=np.array(False)
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array(True), array(True))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
return _wrapreduction(a, np.logical_or, 'any', axis, None, out,
keepdims=keepdims, where=where)
def _all_dispatcher(a, axis=None, out=None, keepdims=None, *,
where=None):
return (a, where, out)
@array_function_dispatch(_all_dispatcher)
def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (``axis=None``) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` for more
details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `all` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in checking for all `True` values.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False])
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> np.all([[True, True], [False, True]], where=[[True], [False]])
True
>>> o=np.array(False)
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28293632, 28293632, array(True)) # may vary
"""
return _wrapreduction(a, np.logical_and, 'all', axis, None, out,
keepdims=keepdims, where=where)
def _cumsum_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_cumsum_dispatcher)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
diff : Calculate the n-th discrete difference along given axis.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point
values since ``sum`` may use a pairwise summation routine, reducing
the roundoff-error. See `sum` for more information.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
``cumsum(b)[-1]`` may not be equal to ``sum(b)``
>>> b = np.array([1, 2e-9, 3e-9] * 1000000)
>>> b.cumsum()[-1]
1000000.0050045159
>>> b.sum()
1000000.0050000029
"""
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
def _ptp_dispatcher(a, axis=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_ptp_dispatcher)
def ptp(a, axis=None, out=None, keepdims=np._NoValue):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
.. warning::
`ptp` preserves the data type of the array. This means the
return value for an input of signed integers with n bits
(e.g. `np.int8`, `np.int16`, etc) is also a signed integer
with n bits. In that case, peak-to-peak values greater than
``2**(n-1)-1`` will be returned as negative values. An example
with a work-around is shown below.
Parameters
----------
a : array_like
Input values.
axis : None or int or tuple of ints, optional
Axis along which to find the peaks. By default, flatten the
array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.15.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `ptp` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.array([[4, 9, 2, 10],
... [6, 9, 7, 12]])
>>> np.ptp(x, axis=1)
array([8, 6])
>>> np.ptp(x, axis=0)
array([2, 0, 5, 2])
>>> np.ptp(x)
10
This example shows that a negative value can be returned when
the input is an array of signed integers.
>>> y = np.array([[1, 127],
... [0, 127],
... [-1, 127],
... [-2, 127]], dtype=np.int8)
>>> np.ptp(y, axis=1)
array([ 126, 127, -128, -127], dtype=int8)
A work-around is to use the `view()` method to view the result as
unsigned integers with the same bit width:
>>> np.ptp(y, axis=1).view(np.uint8)
array([126, 127, 128, 129], dtype=uint8)
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
ptp = a.ptp
except AttributeError:
pass
else:
return ptp(axis=axis, out=out, **kwargs)
return _methods._ptp(a, axis=axis, out=out, **kwargs)
def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
where=None):
return (a, out)
@array_function_dispatch(_amax_dispatcher)
def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the maximum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amax` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The minimum value of an output element. Must be present to allow
computation on empty slice. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to compare for the maximum. See `~numpy.ufunc.reduce`
for details.
.. versionadded:: 1.17.0
Returns
-------
amax : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
argmax :
Return the indices of the maximum values.
nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Don't use `amax` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``amax(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a) # Maximum of the flattened array
3
>>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> np.amax(a, where=[False, True], initial=-1, axis=0)
array([-1, 3])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.amax(b, where=~np.isnan(b), initial=-1)
4.0
>>> np.nanmax(b)
4.0
You can use an initial value to compute the maximum of an empty slice, or
to initialize it to a different value:
>>> np.max([[-50], [10]], axis=-1, initial=0)
array([ 0, 10])
Notice that the initial value is used as one of the elements for which the
maximum is determined, unlike for the default argument Python's max
function, which is only used for empty iterables.
>>> np.max([5], initial=6)
6
>>> max([5], default=6)
5
"""
return _wrapreduction(a, np.maximum, 'max', axis, None, out,
keepdims=keepdims, initial=initial, where=where)
def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
where=None):
return (a, out)
@array_function_dispatch(_amin_dispatcher)
def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the minimum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amin` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The maximum value of an output element. Must be present to allow
computation on empty slice. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to compare for the minimum. See `~numpy.ufunc.reduce`
for details.
.. versionadded:: 1.17.0
Returns
-------
amin : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
argmin :
Return the indices of the minimum values.
nanmax, maximum, fmax
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding min value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmin.
Don't use `amin` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``amin(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> np.amin(a, where=[False, True], initial=10, axis=0)
array([10, 1])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.amin(b, where=~np.isnan(b), initial=10)
0.0
>>> np.nanmin(b)
0.0
>>> np.min([[-50], [10]], axis=-1, initial=0)
array([-50, 0])
Notice that the initial value is used as one of the elements for which the
minimum is determined, unlike for the default argument Python's max
function, which is only used for empty iterables.
Notice that this isn't the same as Python's ``default`` argument.
>>> np.min([6], initial=5)
5
>>> min([6], default=5)
6
"""
return _wrapreduction(a, np.minimum, 'min', axis, None, out,
keepdims=keepdims, initial=initial, where=where)
def _alen_dispathcer(a):
return (a,)
@array_function_dispatch(_alen_dispathcer)
def alen(a):
"""
Return the length of the first dimension of the input array.
.. deprecated:: 1.18
`numpy.alen` is deprecated, use `len` instead.
Parameters
----------
a : array_like
Input array.
Returns
-------
alen : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
# NumPy 1.18.0, 2019-08-02
warnings.warn(
"`np.alen` is deprecated, use `len` instead",
DeprecationWarning, stacklevel=2)
try:
return len(a)
except TypeError:
return len(array(a, ndmin=1))
def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
@array_function_dispatch(_prod_dispatcher)
def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
initial=np._NoValue, where=np._NoValue):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The starting value for this product. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to include in the product. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.17.0
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
:ref:`ufuncs-output-type`
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x)
16 # may vary
The product of an empty array is the neutral element 1:
>>> np.prod([])
1.0
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
Or select specific elements to include:
>>> np.prod([1., np.nan, 3.], where=[True, False, True])
3.0
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == int
True
You can also start the product with a value other than one:
>>> np.prod([1, 2], initial=5)
10
"""
return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
keepdims=keepdims, initial=initial, where=where)
def _cumprod_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_cumprod_dispatcher)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
:ref:`ufuncs-output-type`
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def _ndim_dispatcher(a):
return (a,)
@array_function_dispatch(_ndim_dispatcher)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def _size_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_size_dispatcher)
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def _around_dispatcher(a, decimals=None, out=None):
return (a, out)
@array_function_dispatch(_around_dispatcher)
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See :ref:`ufuncs-output-type` for more
details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
``np.around`` uses a fast but sometimes inexact algorithm to round
floating-point datatypes. For positive `decimals` it is equivalent to
``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has
error due to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling by powers
of ten. For instance, note the extra "1" in the following:
>>> np.round(56294995342131.5, 3)
56294995342131.51
If your goal is to print such values with a fixed number of decimals, it is
preferable to use numpy's float printing routines to limit the number of
printed decimals:
>>> np.format_float_positional(56294995342131.5, precision=3)
'56294995342131.5'
The float printing routines use an accurate but much more computationally
demanding algorithm to compute the number of digits after the decimal
point.
Alternatively, Python's builtin `round` function uses a more accurate
but slower algorithm for 64-bit floating point values:
>>> round(56294995342131.5, 3)
56294995342131.5
>>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997
(16.06, 16.05)
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _wrapfunc(a, 'round', decimals=decimals, out=out)
def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, *,
where=None):
return (a, where, out)
@array_function_dispatch(_mean_dispatcher)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `mean` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
std, var, nanmean, nanstd, nanvar
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
By default, `float16` results are computed using `float32` intermediates
for extra precision.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([2., 3.])
>>> np.mean(a, axis=1)
array([1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.54999924
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806 # may vary
Specifying a where argument:
>>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]])
>>> np.mean(a)
12.0
>>> np.mean(a, where=[[True], [False], [False]])
9.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
mean = a.mean
except AttributeError:
pass
else:
return mean(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def _std_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
keepdims=None, *, where=None):
return (a, where, out)
@array_function_dispatch(_std_dispatcher)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the standard deviation.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
:ref:`ufuncs-output-type`
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(x))``, where
``x = abs(a - a.mean())**2``.
The average squared deviation is typically calculated as ``x.sum() / N``,
where ``N = len(x)``. If, however, `ddof` is specified, the divisor
``N - ddof`` is used instead. In standard statistical practice, ``ddof=1``
provides an unbiased estimator of the variance of the infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables. The standard deviation computed in this
function is the square root of the estimated variance, so even with
``ddof=1``, it will not be an unbiased estimate of the standard deviation
per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925494177 # may vary
Specifying a where argument:
>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.std(a)
2.614064523559687 # may vary
>>> np.std(a, where=[[True], [True], [False]])
2.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
std = a.std
except AttributeError:
pass
else:
return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
def _var_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
keepdims=None, *, where=None):
return (a, where, out)
@array_function_dispatch(_var_dispatcher)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float64`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the variance. See `~numpy.ufunc.reduce` for
details.
.. versionadded:: 1.20.0
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std, mean, nanmean, nanstd, nanvar
:ref:`ufuncs-output-type`
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(x)``, where ``x = abs(a - a.mean())**2``.
The mean is typically calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
0.20250003
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932944759 # may vary
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
Specifying a where argument:
>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.var(a)
6.833333333333333 # may vary
>>> np.var(a, where=[[True], [True], [False]])
4.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
var = a.var
except AttributeError:
pass
else:
return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
# Aliases of other functions. These have their own definitions only so that
# they can have unique docstrings.
@array_function_dispatch(_around_dispatcher)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return around(a, decimals=decimals, out=out)
@array_function_dispatch(_prod_dispatcher, verify=False)
def product(*args, **kwargs):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
return prod(*args, **kwargs)
@array_function_dispatch(_cumprod_dispatcher, verify=False)
def cumproduct(*args, **kwargs):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
return cumprod(*args, **kwargs)
@array_function_dispatch(_any_dispatcher, verify=False)
def sometrue(*args, **kwargs):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function; see for details.
"""
return any(*args, **kwargs)
@array_function_dispatch(_all_dispatcher, verify=False)
def alltrue(*args, **kwargs):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
return all(*args, **kwargs)
| bsd-3-clause |
BridgitD/school-dropout-predictions | pipeline/__1_and_2_Read_and_Describe_Data.py | 1 | 6001 | """
Joshua Mausolf - CAPP 30254 Assignment pa3.
Welcome to this relatively consumer friendly Python Script.
This file contains the source code to run the summary statistics.
The summary statistics output have been saved in the folder SUMMARY REPORT.
To analyze your data, there are two steps:
1. First, choose your dataset.
2. Second, run the following command in your terminal:
python __1_and_2_Read_and_Describe_Data.py
"""
#1 #SET DATASET
#Examples:
#dataset = 'data/cs-training.csv' #Original data
#dataset = 'cs-training#3B.csv' #Post-impute data
dataset = 'data/cohort1_all.csv'
#2 RUN THIS COMMAND (Do not use quotes or #.)
# "python __1_and_2_Read_and_Describe_Data.py"
#_______________________________________________________________________________________#
## ******-- SOURCE CODE -- DO NOT MODIFY --***** ##
#_______________________________________________________________________________________#
"""
Below is the source code to run the summary statistics.
To the consumer: please do not modify.
"""
import sys, os
import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
#______________ FUNCTIONS __________________________#
def camel_to_snake(column_name):
"""
Converts a string that is camelCase into snake_case
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', column_name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
#Draw Histogram Function
def bar(variable, dataset):
#Define Data
data = pd.read_csv(dataset, index_col=0, low_memory=False)
data.columns = [camel_to_snake(col) for col in data.columns]
#Generate Graph
fig =data.groupby(variable).size().plot(kind='bar')
fig.set_xlabel(variable) #defines the x axis label
fig.set_ylabel('Number of Observations') #defines y axis label
fig.set_title(variable+' Distribution') #defines graph title
plt.draw()
plt.savefig("output/histograms/"+variable+"_bar.jpg")
plt.close('all')
def histogram1(variable, dataset, color, bins):
#Define Data
data = pd.read_csv(dataset, index_col=0, low_memory=False)
data.columns = [camel_to_snake(col) for col in data.columns]
#Generate Graph
fig = data[variable].hist(bins=bins, color=color)
fig.set_xlabel(variable) #defines the x axis label
fig.set_ylabel('Number of Observations') #defines y axis label
fig.set_title(variable+' Distribution') #defines graph title
plt.draw()
plt.savefig("output/histograms/"+variable+"_histogram1_"+str(bins)+".jpg")
plt.clf()
def histogram2(variable, dataset, color, np1, np2):
#Define Data
data = pd.read_csv(dataset, index_col=0, low_memory=False)
data.columns = [camel_to_snake(col) for col in data.columns]
#Generate Graph
fig = data[variable].hist(bins=np.arange(np1, np2), color=color)
fig.set_xlabel(variable) #defines the x axis label
fig.set_ylabel('Number of Observations') #defines y axis label
fig.set_title(variable+' Distribution') #defines graph title
plt.draw()
plt.savefig("output/histograms/"+variable+"_histogram2_"+str(np1)+".jpg")
plt.clf()
def line_count(dataset):
with open(dataset, 'rU') as data_file:
reader = csv.reader(data_file)
lines = list(reader)
#Total File Rows
XR = len(lines)
return XR
def dataset_describe(dataset):
with open(dataset, 'rU') as data_file:
reader = csv.reader(data_file)
lines = list(reader)
#Total File Rows
XR = len(lines)
print "Total requested lines:", XR-1
#Total Number of Variables
variables = lines[0]
#print variables[1]
numVar = len(variables)
print "Total number of variables: ", numVar
non_ID_var = variables[1: 12]
def summarize_dataset(dataset):
"""Select dataset to summarize. Use this function to summarize a dataset.
To focus on specific variables, please use summary_statistics instead."""
#Define Data
data = pd.read_csv(dataset, index_col=0, low_memory=False)
data.columns = [camel_to_snake(col) for col in data.columns]
for variable in data.columns:
print "_"*50
print "Summary Statistics "+str(variable)+": "
count = (data[str(variable)].count())
Number_variable_lines = line_count(dataset)-1
print "Missing values: ", (Number_variable_lines - count)
print "Describe "+str(variable)+": ", '\n', (data[str(variable)].describe())
print "Mode: ", (data[str(variable)].mode())
#Histogram
if count > 1:
histogram1(str(variable), dataset, 'c', 5)
histogram1(str(variable), dataset, 'g', 10)
histogram2(str(variable), dataset, 'b', 1.5, 10)
histogram2(str(variable), dataset, 'r', 1, 10)
def summary_statistics(variable, dataset, bin1=5, bin2=10):
"""Select variable to summarize. Please input the dataset.
Histogram bins can be modified. Default is 5 and 10."""
#Define Data
data = pd.read_csv(dataset, index_col=0, low_memory=False)
data.columns = [camel_to_snake(col) for col in data.columns]
print "_"*50
print "Summary Statistics "+str(variable)+": "
count = (data[str(variable)].count())
Number_variable_lines = line_count(dataset)-1
print "Missing values: ", (Number_variable_lines - count)
print "Describe "+str(variable)+": ", '\n', (data[str(variable)].describe())
print "Mode: ", (data[str(variable)].mode())
#Histogram
try:
if count > 1:
histogram1(str(variable), dataset, 'c', bin1)
histogram1(str(variable), dataset, 'g', bin2)
histogram2(str(variable), dataset, 'b', (bin1/float(4)), bin2)
histogram2(str(variable), dataset, 'r', (bin1/float(5)), bin2)
except:
pass
#______________ LOAD and DESCRIBE DATA __________________________#
# Describe Dataset Lines and Variables
dataset_describe(dataset)
# Load Data to Pandas
data = pd.read_csv(dataset, index_col=0, low_memory=False)
data.columns = [camel_to_snake(col) for col in data.columns]
# Generate Summary Statistics
for col in data.columns:
summary_statistics(col, 'data/cohort1_all.csv', 5, 10)
#summarize_dataset('data/cs-training.csv')
#bar('serious_dlqin2yrs', 'data/cs-training.csv')
#bar('serious_dlqin2yrs', dataset)
| mit |
xiaojingyi/tushare | tushare/datayes/fund.py | 17 | 7543 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Fund():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def Fund(self, etfLof='', listStatusCd='', secID='', ticker='', category='',
operationMode='', field=''):
"""
获取基金的基本档案信息,包含基金名称、交易代码、分级情况、所属类别、保本情况、上市信息、相关机构、投资描述等信息。
收录了2005年以来的历史数据,数据更新频率为不定期。
"""
code, result = self.client.getData(vs.FUND%(etfLof, listStatusCd, secID,
ticker, category, operationMode, field))
return _ret_data(code, result)
def FundNav(self, dataDate='', secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取某只基金的历史净值数据(货币型、短期理财债券型除外),包括了单位份额净值、累计净值与复权净值。
收录了2005年以来的历史数据,数据更新频率为日。不输入日期则默认获取近一年以来的历史数据。
"""
code, result = self.client.getData(vs.FUNDNAV%(dataDate, secID, ticker,
beginDate, endDate, field))
return _ret_data(code, result)
def FundDivm(self, dataDate='', secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取某只货币型基金或短期理财债券型基金的历史收益情况,包含了每万份收益,七日年化收益率等信息。
收录了2005年以来的历史数据,数据更新频率为日。不输入日期则默认获取近一年以来的历史数据。
"""
code, result = self.client.getData(vs.FUNDDIVM%(dataDate, secID, ticker,
beginDate, endDate, field))
return _ret_data(code, result)
def FundDiv(self, secID='', ticker='', adjustedType='', beginDate='', endDate='', field=''):
"""
获取基金的净值调整信息,包括基金分红和基金拆分两种调整情况。分红包含每份分红,除息日,分红在投资日;拆分包含份额折算比例,拆分日。
收录了2005年以来的历史数据,数据更新频率为不定期。
"""
code, result = self.client.getData(vs.FUNDDIV%(secID, ticker, adjustedType,
beginDate, endDate, field))
return _ret_data(code, result)
def FundAssets(self, reportDate='', secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取基金定期披露的资产配置情况,包含了资产总值、资产净值,以及资产总值中权益类、固定收益类、现金及其他四种资产的市值与占比情况。
收录了2005年以来的历史数据,数据更新频率为季度。获取方式支持:
1)输入一个或多个secID/ticker,并输入beginDate和endDate,可以查询到指定基金,一段时间的资产配置;
2)输入reportDate,不输入其他参数,可以查询到输入日期的全部基金资产配置
"""
code, result = self.client.getData(vs.FUNDASSETS%(reportDate, secID, ticker,
beginDate, endDate, field))
return _ret_data(code, result)
def FundHoldings(self, reportDate='', secID='', ticker='', beginDate='', endDate='',
secType='', field=''):
"""
获取基金定期披露的持仓明细,包含所持有的股票、债券、基金的持仓明细数据。收录了2005年以来的历史数据,数据更新频率为季度。获取方式支持:
1)输入一个或多个secID/ticker,并输入beginDate和endDate,可以查询到指定基金,一段时间的基金持仓;
2)输入reportDate,不输入其他参数,可以查询到输入日期的全部基金持仓数据。
"""
code, result = self.client.getData(vs.FUNDHOLDINGS%(reportDate, secID, ticker,
beginDate, endDate, secType, field))
return _ret_data(code, result)
def FundETFPRList(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取ETF基金交易日的申赎清单基本信息,包含标的指数名称,上一交易日的现金差额、最小申赎单位净值、
单位净值,交易日当日的预估现金差额、最小申赎单位、现金替代比例上限、是否允许申购赎回、是否公布IOPV等信息。
收录了2005年以来的历史数据,数据更新频率为日。不输入日期则默认获取近两天的数据。
"""
code, result = self.client.getData(vs.FUNDETFPRLIST%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def FundETFCons(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取ETF基金每个交易日的跟踪的标的指数成分券清单,包含成分券的代码、简称、股票数量、现金替代溢价比、固定替代金额等信息。
收录了2005年以来的历史数据,数据更新频率为日。不输入日期则默认获取近两天的数据。
"""
code, result = self.client.getData(vs.FUNDETFCONS%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def FundRating(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取上海证券基金评级信息。收录了10年以来的历史数据,数据更新频率为月。不输入日期则默认获取近一年以来的历史数据。
"""
code, result = self.client.getData(vs.FUNDRATING%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def FundSharesChg(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取场内基金的份额变动信息,包含基金名称、交易代码、交易市场、截止日期、流通份额等信息。收录了2005年以来的历史数据,数据更新频率为日。
"""
code, result = self.client.getData(vs.FUNDSHARESCHG%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def FundLeverageInfo(self, exchangeCDLeverage='', secID='', ticker='', field=''):
"""
获取分级基金的基本信息,包含母、子基金名称、交易代码、分拆比例、折算等信息。
"""
code, result = self.client.getData(vs.FUNDLEVERAGEINFO%(exchangeCDLeverage, secID, ticker, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
ahadmushir/whatsCooking | attemp1.py | 1 | 3074 | #trying to predict the classification by the highest number match..
#first by taking the max ingredients for each cusine and then by finding the differences in the test
import json
import pandas
import csv
trainHandle = open('train.json').read()
testHandle = open('test.json').read()
j = json.loads(trainHandle)
jTest = json.loads(testHandle)
dataTest = pandas.read_json('test.json')
############################
#getting unique cusines
unqCusines = list()
count1 = 0
try:
while True:
a = j[count1]['cuisine']
if a not in unqCusines:
unqCusines.append(a)
count1 = count1 + 1
except:
print unqCusines
##########################
check1 = list()
getIDMax = list()
try:
for cus in unqCusines:
count2 = 0
small = -1
try:
while True:
c = j[count2]['cuisine']
if cus == c:
ingL = j[count2]['ingredients']
if len(ingL) > small:
if len(check1) != 0:
check1.pop()
small = len(ingL)
idd = j[count2]['id']
check1.append(idd)
count2 = count2 + 1
except:
print cus, small
getIDMax.append(idd)
continue
except:
print 'please chal ja'
print check1
#got ID of toppers
print getIDMax
##############################
overallList = list()
for idz in getIDMax:
c3 = 0
try:
while True:
idd1 = j[c3]['id']
idinIng = j[c3]['ingredients']
idCus = j[c3]['cuisine']
if idz == idd1:
nameO = str(idCus)
nameO = list()
nameO.append(idCus)
for itz in idinIng:
nameO.append(itz)
overallList.append(nameO)
c3 = c3 + 1
except:
continue
#got the selected training tuples, all in a cute little list
print overallList[0][1:]
##############################
#print json.dumps(jTest, indent = 4)
alphaList = list()
with open('submission1.csv', 'a') as nw:
try:
testCounter = 0
smallAgain = 0
namee = 'None'
while True:
omegaList = list()
tId = jTest[testCounter]['id']
tIng = jTest[testCounter]['ingredients']
c5 = 0
while c5 != 20:
tempList = list()
for itt in tIng:
checkingList = overallList[c5][1:]
for isItThere in checkingList:
if itt == isItThere:
tempList.append(itt)
if len(tempList) >= smallAgain:
smallAgain = len(tempList)
namee = str(overallList[c5][0])
while len(tempList) != 0:
tempList.pop()
c5 = c5 + 1
omegaList.append(tId)
omegaList.append(namee)
alphaList.append([tId,namee])
print omegaList
#if testCounter == 200:
# break
#alphaList.append(omegaList)
testCounter = testCounter + 1
#b = csv.writer(nw)
#b.writerows(omegaList)
while len(omegaList) != 0:
omegaList.pop()
except:
print 'fingers crossed'
def enterInFile(Alist):
checkinggstr = list()
for itm in Alist:
ii = str(itm)
checkinggstr.append(ii)
nw = open('submission1.csv', 'a')
b = csv.writer(nw)
b.writerows(checkinggstr)
for ch in alphaList:
print ch
nw = open('submission1.csv', 'a')
b = csv.writer(nw)
b.writerows(alphaList)
print len(alphaList)
print len(dataTest)
| apache-2.0 |
yaoli/sklearn-theano | examples/plot_single_localization.py | 9 | 1558 | """
=======================================
Drawing bounding boxes for localization
=======================================
Drawing a bounding box on a detected object is crucial to properly finding
objects in images. One very simple way to do this is by simply finding all
points with a matching classification, then creating a box using the minimum
and maximum values for X and Y of the matching points.
For more detail about how this works in detail, see the
``plot_localization_tutorial`` example.
"""
print(__doc__)
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from sklearn_theano.datasets import load_sample_image
from sklearn_theano.feature_extraction import OverfeatLocalizer
from sklearn_theano.feature_extraction import get_all_overfeat_labels
def convert_points_to_box(points, color, alpha):
upper_left_point = (points[:, 0].min(), points[:, 1].min())
width = points[:, 0].max() - points[:, 0].min()
height = points[:, 1].max() - points[:, 1].min()
return Rectangle(upper_left_point, width, height, ec=color,
fc=color, alpha=alpha)
X = load_sample_image("sloth.jpg")
sloth_label = [label for label in get_all_overfeat_labels()
if 'three-toed sloth' in label][0]
clf = OverfeatLocalizer(match_strings=[sloth_label])
sloth_points = clf.predict(X)[0]
sloth_box = convert_points_to_box(sloth_points, 'orange', .4)
plt.imshow(X)
ax = plt.gca()
ax.autoscale(enable=False)
ax.add_patch(sloth_box)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
plt.show()
| bsd-3-clause |
franzpl/sweep | lin_sweep_kaiser_window_bandlimited_script5/merge_scripts.py | 2 | 1696 | #!/usr/bin/env python3
""" Script to merge scripts"""
import numpy as np
import matplotlib.pyplot as plt
script5 = np.genfromtxt('lin_sweep_kaiser_window_bandlimited_script5.txt')
script5_1 = np.genfromtxt('lin_sweep_kaiser_window_bandlimited_script5_1.txt')
fade_in_list = script5[:, 0]
# Script5
pnr_list = script5[:, 1]
spectrum_distance_list = script5[:, 2]
# Script5_1 (unwindowed deconvolution)
pnr_unwindowed_deconvolution_list = script5_1[:, 1]
spectrum_distance_unwindowed_deconvolution_list = script5_1[:, 2]
plt.plot(fade_in_list, pnr_list, label='Deconvolution: Excitation windowed')
plt.plot(
fade_in_list,
pnr_unwindowed_deconvolution_list,
label='Deconvolution: Excitation unwindowed')
plt.grid()
plt.title('Peak to noise ratio depending on Fade in')
plt.xlabel('Fade in / ms')
plt.ylabel('Peak to noise ratio / dB')
plt.ticklabel_format(useOffset=False)
plt.legend(loc='lower left')
plt.xlim([-10, 1000])
plt.savefig('pnr.png')
plt.close()
NFFT_dirac = 88201
max_measurement = 5974410.59739
plt.plot(fade_in_list, -10 * np.log10(1 / NFFT_dirac *
np.asarray(spectrum_distance_list) / max_measurement), label='Deconvolution: Excitation windowed')
plt.plot(fade_in_list,
-10 * np.log10(1 / NFFT_dirac * np.asarray(spectrum_distance_unwindowed_deconvolution_list) /
max_measurement), label='Deconvolution: Excitation unwindowed')
plt.grid()
plt.title('Spectrum Distance depending on Fade in')
plt.xlabel('Fade in / ms')
plt.ylabel('(Spectrum Distance / max(Spectrum Distance)) / dB')
plt.ticklabel_format(useOffset=False)
plt.legend(loc='lower right')
plt.xlim([-10, 1000])
plt.savefig('spectral_distance.png')
| mit |
jldinh/multicell | src/multicell/rendering.py | 1 | 10347 | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 17 22:38:13 2016
@author: jl
"""
#from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import time
from multicell.utilities import print_flush
from openalea.container import topomesh_algo
import os
from openalea.tissueshape import centroid
def unit_vector(vector):
return vector / np.linalg.norm(vector)
def border_vector(corner, opposite, centroid):
# adjacent = opposite - corner
hypothenuse = centroid - corner
# projection_length = np.dot(adjacent, hypothenuse) / np.linalg.norm(adjacent)
# opposite_length = np.sqrt(np.linalg.norm(hypothenuse)**2 - projection_length**2)
# result = hypothenuse / opposite_length
# print result
# return result
return unit_vector(hypothenuse)
class MatplotlibRenderer(object):
"""A class handling the rendering of a Simulation object in Matplotlib"""
def __init__(self, sim, max_cmap=None, view_size=None, view=(None, None), axes=True, clipping=((0,0,0), (0,0,0))):
"""
Creates a Matplotlib renderer using a Simulation object
Parameters
----------
sim : Simulation
The Simulation object containing the data to render.
"""
self.sim = sim
self.max_cmap = max_cmap
self.view_size = view_size
self.view = view
self.axes = axes
self.clipping = clipping
def _render(self, name=None, save=False, max_percentile=None):
"""
Low level method to render a tissue, colored by concentrations.
Concentrations are taken from the table of concentrations of the
Simulation. Uses Jet as ColorMap.
Parameters
----------
name : string
Name of the species whose concentrations must be
rendered
save : bool
Whether to save a picture or not
"""
if not any(self.clipping[1]):
outer_fids = [fid for fid in self.sim.mesh.wisps(2) if self.sim.mesh.nb_regions(2, fid) == 1]
non_clipped_cids = set(self.sim.mesh.wisps(3))
else:
non_clipped_cids = set()
reference = np.array(self.clipping[0])
normal = np.array(self.clipping[1])
for cid in self.sim.mesh.wisps(3):
vector = centroid(self.sim.mesh, self.sim.get_pos(), 3, cid) - reference
if np.dot(vector, normal) > 0:
non_clipped_cids.add(cid)
outer_fids = []
for fid in self.sim.mesh.wisps(2):
neighbors = set(self.sim.mesh.regions(2, fid))
count = 0
for cid in neighbors:
if cid in non_clipped_cids:
count += 1
if count == 1:
outer_fids.append(fid)
outer_eids = set()
for fid in outer_fids:
outer_eids.update(self.sim.mesh.borders(2, fid))
cell_delimiting_eids = set()
for eid in outer_eids:
delimited_cells = set(self.sim.mesh.regions(1, eid, 2)) & non_clipped_cids
if len(delimited_cells) > 1:
cell_delimiting_eids.add(eid)
if name == None:
face_values = [0.5 for fid in outer_fids]
array = np.zeros(self.sim.n_cells)
max_cmap = 1
else:
environment = self.sim.compute_environment()
associated_cids = [(set(self.sim.mesh.regions(2, fid)) & non_clipped_cids).pop() for fid in outer_fids]
array = environment[name]
face_values = [array.get_species(array.variables_list[0], cid) for cid in associated_cids]
if self.max_cmap is not None:
max_cmap = self.max_cmap
elif max_percentile is None:
max_cmap = np.max(array)
else:
max_cmap = np.percentile(array, max_percentile)
if max_cmap == 0:
max_cmap = 1
sm = matplotlib.cm.ScalarMappable(matplotlib.colors.Normalize(vmin=0, vmax=max_cmap, clip=True), "jet")
sm.set_array(face_values)
facecolors = [sm.to_rgba(x) for x in face_values]
# poly_pids = [[pid for pid in topomesh_algo.ordered_pids(self.sim.mesh, fid)] for fid in outer_fids]
min_coords = np.min(self.sim.pos.values(), axis=0)
max_coords = np.max(self.sim.pos.values(), axis=0)
if self.view_size is not None:
max_half_amplitude = self.view_size / 2
else:
max_half_amplitude = max(max_coords - min_coords) / 2
shrinkage = 0.005 * max_half_amplitude
cell_delimiting_pids = set()
for eid in cell_delimiting_eids:
cell_delimiting_pids.update(set(self.sim.mesh.borders(1, eid)))
polys = []
for fid in outer_fids:
ordered_pids = list(topomesh_algo.ordered_pids(self.sim.mesh, fid))
original_pos = [self.sim.pos[pid] for pid in ordered_pids]
cid = (set(self.sim.mesh.regions(2, fid)) & non_clipped_cids).pop()
bary = centroid(self.sim.mesh, self.sim.get_pos(), 3, cid)
shrinked_pos = [original_pos[i] + (shrinkage * border_vector(original_pos[i], original_pos[(i+1)%len(ordered_pids)], bary)) for i in xrange(len(ordered_pids))]
polys.append(shrinked_pos)
# polys.append(original_pos)
for eid in cell_delimiting_eids:
delimited_cids = list(set(self.sim.mesh.regions(1, eid, 2)) & non_clipped_cids)
centroids = [centroid(self.sim.mesh, self.sim.get_pos(), 3, cid) for cid in delimited_cids]
edge_pids = list(self.sim.mesh.borders(1, eid))
edge_poly = [self.sim.get_pos()[edge_pids[0]],
self.sim.get_pos()[edge_pids[0]] + shrinkage * border_vector(self.sim.get_pos()[edge_pids[0]], self.sim.get_pos()[edge_pids[1]], centroids[0]),
self.sim.get_pos()[edge_pids[1]] + shrinkage * border_vector(self.sim.get_pos()[edge_pids[1]], self.sim.get_pos()[edge_pids[0]], centroids[0]),
self.sim.get_pos()[edge_pids[1]],
self.sim.get_pos()[edge_pids[1]] + shrinkage * border_vector(self.sim.get_pos()[edge_pids[1]], self.sim.get_pos()[edge_pids[0]], centroids[1]),
self.sim.get_pos()[edge_pids[0]] + shrinkage * border_vector(self.sim.get_pos()[edge_pids[0]], self.sim.get_pos()[edge_pids[1]], centroids[1])]
polys.append(edge_poly)
facecolors.append((0,0,0,1))
#
# cell_delimiting_eids_as_pid_sets = set()
# for eid in cell_delimiting_eids:
# edge = tuple(self.sim.mesh.borders(1, eid))
# cell_delimiting_eids_as_pid_sets.update([edge, tuple(reversed(edge))])
# linewidths = []
# for poly in poly_pids:
# if np.any(poly[-1] != poly[0]):
# poly.append(poly[0])
# n = len(poly)
# for i in xrange(n-1):
# a = poly[i]
# if i < n - 1:
# b = poly[i+1]
# else:
# b = poly[0]
# edge = (a, b)
## print edge
# if edge in cell_delimiting_eids_as_pid_sets:
# linewidths.append(1)
# else:
# linewidths.append(0)
# segments = [[self.sim.pos[pid] for pid in self.sim.mesh.borders(1, eid)] for eid in cell_delimiting_eids]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect('equal')
ax.view_init(*self.view)
if not self.axes:
ax.set_axis_off()
# print linewidths
# print np.mean(linewidths)
# linewidths+=[0.5]*10
poly = Poly3DCollection(polys, facecolors=facecolors, linewidth=0)
# edge_collection = Line3DCollection(segments, colors=((0,0,0,1),), linewidth=1.)
ax.add_collection3d(poly)
# ax.add_collection3d(edge_collection)
center = (max_coords + min_coords) / 2
boundaries = np.tile(center[:, np.newaxis], (1, 2))
boundaries[:, 0] -= max_half_amplitude
boundaries[:, 1] += max_half_amplitude
ax.set_xlabel('X')
ax.set_xlim3d(boundaries[0][0], boundaries[0][1])
ax.set_ylabel('Y')
ax.set_ylim3d(boundaries[1][0], boundaries[1][1])
ax.set_zlabel('Z')
ax.set_zlim3d(boundaries[2][0], boundaries[2][1])
if name is not None:
fig.colorbar(sm, shrink=0.5, aspect=10)
ax.set_title(name)
# plt.subplots_adjust(left=0., right=1., top=1., bottom=0.)
plt.tight_layout()
plt.show()
if save:
directory = "figures"
if not os.path.exists(directory):
os.makedirs(directory)
timestamp = time.strftime('%Y%m%d_%H%M%S') + "_" + str(time.time() * 1000 % 1)[2:]
plt.savefig("figures/" + name + "-" + timestamp + ".png")
# Viewer.frameGL.saveImage("figures/" + name + "-" + timestamp + ".png")
return max_cmap
def display(self, name=None, save=False, max_percentile=None):
"""
Display the tissue with cells colored based on their concentrations.
Also displays the lowest and highest concentrations in the tissue for
all species
Parameters
----------
name : string
Name of the species to display
save : bool
Whether a picture should be saved or not
"""
max_cmap = self._render(name, save=save, max_percentile=max_percentile)
print_flush("Time point: %s" % self.sim.current_t)
if name <> None:
environment = self.sim.compute_environment()
for s in self.sim.intensive_cell_variables:
concs = environment[s][0]
print_flush("%s: from %s to %s" % (s, min(concs), max(concs)))
if max_percentile <> None:
print_flush("Max value displayed for %s: %s" % (name, max_cmap))
| mit |
cython-testbed/pandas | pandas/tests/frame/test_dtypes.py | 2 | 41374 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import timedelta
import numpy as np
from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp,
Categorical, compat, concat, option_context)
from pandas.compat import u
from pandas import _np_version_under1p14
from pandas.core.arrays import integer_array
from pandas.core.dtypes.dtypes import DatetimeTZDtype, CategoricalDtype
from pandas.tests.frame.common import TestData
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(params=[str, compat.text_type])
def text_dtype(request):
return request.param
class TestDataFrameDataTypes(TestData):
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df['a'] = df['a'].astype(np.bool_)
df['b'] = df['b'].astype(np.int32)
df['c'] = df['c'].astype(np.float64)
result = pd.concat([df, df])
assert result['a'].dtype == np.bool_
assert result['b'].dtype == np.int32
assert result['c'].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result['a'].dtype == np.object_
assert result['b'].dtype == np.float64
assert result['c'].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
assert_series_equal(norows_df.dtypes, pd.Series(
np.object, index=list("abc")))
assert_series_equal(norows_df.ftypes, pd.Series(
'object:dense', index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
assert_series_equal(norows_int_df.dtypes, pd.Series(
np.dtype('int32'), index=list("abc")))
assert_series_equal(norows_int_df.ftypes, pd.Series(
'int32:dense', index=list("abc")))
odict = compat.OrderedDict
df = pd.DataFrame(odict([('a', 1), ('b', True), ('c', 1.0)]),
index=[1, 2, 3])
ex_dtypes = pd.Series(odict([('a', np.int64),
('b', np.bool),
('c', np.float64)]))
ex_ftypes = pd.Series(odict([('a', 'int64:dense'),
('b', 'bool:dense'),
('c', 'float64:dense')]))
assert_series_equal(df.dtypes, ex_dtypes)
assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
assert_series_equal(df[:0].dtypes, ex_dtypes)
assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame({'A': date_range('20130101', periods=3),
'B': date_range('20130101', periods=3,
tz='US/Eastern'),
'C': date_range('20130101', periods=3, tz='CET')})
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series([np.dtype('datetime64[ns]'),
DatetimeTZDtype('datetime64[ns, US/Eastern]'),
DatetimeTZDtype('datetime64[ns, CET]')],
['A', 'B', 'C'])
assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = compat.OrderedDict
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_),
('b', np.float_),
('c', np.float_)])))
assert_series_equal(df.iloc[:, 2:].dtypes,
pd.Series(odict([('c', np.float_)])))
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_),
('b', np.float_),
('c', np.float_)])))
def test_select_dtypes_include_using_list_like(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(include=[np.number])
ei = df[['b', 'c', 'd', 'k']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=['timedelta'])
ei = df[['b', 'c', 'd']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, 'category'],
exclude=['timedelta'])
ei = df[['b', 'c', 'd', 'f']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=['datetime'])
ei = df[['g']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=['datetime64'])
ei = df[['g']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=['datetimetz'])
ei = df[['h', 'i']]
assert_frame_equal(ri, ei)
pytest.raises(NotImplementedError,
lambda: df.select_dtypes(include=['period']))
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True]})
re = df.select_dtypes(exclude=[np.number])
ee = df[['a', 'e']]
assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
exclude = np.datetime64,
include = np.bool_, 'integer'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'c', 'e']]
assert_frame_equal(r, e)
exclude = 'datetime',
include = 'bool', 'int64', 'int32'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'e']]
assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(include=np.number)
ei = df[['b', 'c', 'd', 'k']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include='datetime')
ei = df[['g']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include='datetime64')
ei = df[['g']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include='category')
ei = df[['f']]
assert_frame_equal(ri, ei)
pytest.raises(NotImplementedError,
lambda: df.select_dtypes(include='period'))
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(exclude=np.number)
ei = df[['a', 'e', 'f', 'g', 'h', 'i', 'j']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude='category')
ei = df[['a', 'b', 'c', 'd', 'e', 'g', 'h', 'i', 'j', 'k']]
assert_frame_equal(ri, ei)
pytest.raises(NotImplementedError,
lambda: df.select_dtypes(exclude='period'))
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(include=np.number, exclude='floating')
ei = df[['b', 'c', 'k']]
assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(include=np.number,
exclude=['floating', 'timedelta'])
ei = df[['b', 'c']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, 'category'],
exclude='floating')
ei = df[['b', 'c', 'f', 'k']]
assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = compat.OrderedDict
df = DataFrame(odict([('a', list('abc')),
('b', list(range(1, 4))),
('c', np.arange(3, 6).astype('u1')),
('d', np.arange(4.0, 7.0, dtype='float64')),
('e', [True, False, True]),
('f', pd.date_range('now', periods=3).values)]))
df.columns = ['a', 'a', 'b', 'b', 'b', 'c']
expected = DataFrame({'a': list(range(1, 4)),
'b': np.arange(3, 6).astype('u1')})
result = df.select_dtypes(include=[np.number], exclude=['floating'])
assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
df['g'] = df.f.diff()
assert not hasattr(np, 'u8')
r = df.select_dtypes(include=['i8', 'O'], exclude=['timedelta'])
e = df[['a', 'b']]
assert_frame_equal(r, e)
r = df.select_dtypes(include=['i8', 'O', 'timedelta64[ns]'])
e = df[['a', 'b', 'g']]
assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})
with tm.assert_raises_regex(ValueError, 'at least one of '
'include or exclude '
'must be nonempty'):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assert_raises_regex(ValueError, '.+ is too specific'):
df.select_dtypes(include=['datetime64[D]'])
with tm.assert_raises_regex(ValueError, '.+ is too specific'):
df.select_dtypes(exclude=['datetime64[as]'])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='CET')),
index=range(5))
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=['datetime64[ns]'])
expected = df3.reindex(columns=[])
assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [
str, "str", np.string_, "S1", "unicode", np.unicode_, "U1",
compat.text_type
])
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame({"a": list("abc"),
"g": list(u("abc")),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values})
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with tm.assert_raises_regex(TypeError, msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame({'a': list('abc'),
'g': list(u('abc')),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assert_raises_regex(TypeError, 'data type.'
'*not understood'):
df.select_dtypes(['blargy, blarg, blarg'])
def test_select_dtypes_typecodes(self):
# GH 11990
df = mkdf(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes['AllFloat'])
assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self):
self.mixed_frame['bool'] = self.mixed_frame['A'] > 0
result = self.mixed_frame.dtypes
expected = Series({k: v.dtype
for k, v in compat.iteritems(self.mixed_frame)},
index=result.index)
assert_series_equal(result, expected)
# compat, GH 8722
with option_context('use_inf_as_na', True):
df = DataFrame([[1]])
result = df.dtypes
assert_series_equal(result, Series({0: np.dtype('int64')}))
def test_ftypes(self):
frame = self.mixed_float
expected = Series(dict(A='float32:dense',
B='float32:dense',
C='float16:dense',
D='float64:dense')).sort_values()
result = frame.ftypes.sort_values()
assert_series_equal(result, expected)
def test_astype(self):
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
casted = self.frame.astype(np.int32)
expected = DataFrame(self.frame.values.astype(np.int32),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
self.frame['foo'] = '5'
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
# mixed casting
def _check_cast(df, v):
assert (list({s.dtype.name for
_, s in compat.iteritems(df)})[0] == v)
mn = self.all_mixed._get_numeric_data().copy()
mn['little_float'] = np.array(12345., dtype='float16')
mn['big_float'] = np.array(123456789101112., dtype='float64')
casted = mn.astype('float64')
_check_cast(casted, 'float64')
casted = mn.astype('int64')
_check_cast(casted, 'int64')
casted = self.mixed_float.reindex(columns=['A', 'B']).astype('float32')
_check_cast(casted, 'float32')
casted = mn.reindex(columns=['little_float']).astype('float16')
_check_cast(casted, 'float16')
casted = self.mixed_float.reindex(columns=['A', 'B']).astype('float16')
_check_cast(casted, 'float16')
casted = mn.astype('float32')
_check_cast(casted, 'float32')
casted = mn.astype('int32')
_check_cast(casted, 'int32')
# to object
casted = mn.astype('O')
_check_cast(casted, 'object')
def test_astype_with_exclude_string(self):
df = self.frame.copy()
expected = self.frame.astype(int)
df['string'] = 'foo'
casted = df.astype(int, errors='ignore')
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
df = self.frame.copy()
expected = self.frame.astype(np.int32)
df['string'] = 'foo'
casted = df.astype(np.int32, errors='ignore')
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
def test_astype_with_view(self):
tf = self.mixed_float.reindex(columns=['A', 'B', 'C'])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32)
# this is the only real reason to do it this way
tf = np.round(self.frame).astype(np.int32)
casted = tf.astype(np.float32, copy=False)
# TODO(wesm): verification?
tf = self.frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("val", [np.nan, np.inf])
def test_astype_cast_nan_inf_int(self, val, dtype):
# see gh-14265
#
# Check NaN and inf --> raise error when converting to int.
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
df = DataFrame([val])
with tm.assert_raises_regex(ValueError, msg):
df.astype(dtype)
def test_astype_str(self, text_dtype):
# see gh-9757
a = Series(date_range("2010-01-04", periods=5))
b = Series(date_range("3/6/2012 00:00", periods=5, tz="US/Eastern"))
c = Series([Timedelta(x, unit="d") for x in range(5)])
d = Series(range(5))
e = Series([0.0, 0.2, 0.4, 0.6, 0.8])
df = DataFrame({"a": a, "b": b, "c": c, "d": d, "e": e})
# Datetime-like
# Test str and unicode on Python 2.x and just str on Python 3.x
result = df.astype(text_dtype)
expected = DataFrame({
"a": list(map(text_dtype,
map(lambda x: Timestamp(x)._date_repr, a._values))),
"b": list(map(text_dtype, map(Timestamp, b._values))),
"c": list(map(text_dtype,
map(lambda x: Timedelta(x)._repr_base(format="all"),
c._values))),
"d": list(map(text_dtype, d._values)),
"e": list(map(text_dtype, e._values)),
})
assert_frame_equal(result, expected)
def test_astype_str_float(self, text_dtype):
# see gh-11302
result = DataFrame([np.NaN]).astype(text_dtype)
expected = DataFrame(["nan"])
assert_frame_equal(result, expected)
result = DataFrame([1.12345678901234567890]).astype(text_dtype)
# < 1.14 truncates
# >= 1.14 preserves the full repr
val = ("1.12345678901" if _np_version_under1p14
else "1.1234567890123457")
expected = DataFrame([val])
assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# GH7271 & GH16717
a = Series(date_range('2010-01-04', periods=5))
b = Series(range(5))
c = Series([0.0, 0.2, 0.4, 0.6, 0.8])
d = Series(['1.0', '2', '3.14', '4', '5.4'])
df = DataFrame({'a': a, 'b': b, 'c': c, 'd': d})
original = df.copy(deep=True)
# change type of a subset of columns
dt1 = dtype_class({'b': 'str', 'd': 'float32'})
result = df.astype(dt1)
expected = DataFrame({
'a': a,
'b': Series(['0', '1', '2', '3', '4']),
'c': c,
'd': Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype='float32')})
assert_frame_equal(result, expected)
assert_frame_equal(df, original)
dt2 = dtype_class({'b': np.float32, 'c': 'float32', 'd': np.float64})
result = df.astype(dt2)
expected = DataFrame({
'a': a,
'b': Series([0.0, 1.0, 2.0, 3.0, 4.0], dtype='float32'),
'c': Series([0.0, 0.2, 0.4, 0.6, 0.8], dtype='float32'),
'd': Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype='float64')})
assert_frame_equal(result, expected)
assert_frame_equal(df, original)
# change all columns
dt3 = dtype_class({'a': str, 'b': str, 'c': str, 'd': str})
assert_frame_equal(df.astype(dt3),
df.astype(str))
assert_frame_equal(df, original)
# error should be raised when using something other than column labels
# in the keys of the dtype dict
dt4 = dtype_class({'b': str, 2: str})
dt5 = dtype_class({'e': str})
pytest.raises(KeyError, df.astype, dt4)
pytest.raises(KeyError, df.astype, dt5)
assert_frame_equal(df, original)
# if the dtypes provided are the same as the original dtypes, the
# resulting DataFrame should be the same as the original DataFrame
dt6 = dtype_class({col: df[col].dtype for col in df.columns})
equiv = df.astype(dt6)
assert_frame_equal(df, equiv)
assert_frame_equal(df, original)
# GH 16717
# if dtypes provided is empty, the resulting DataFrame
# should be the same as the original DataFrame
dt7 = dtype_class({})
result = df.astype(dt7)
assert_frame_equal(df, equiv)
assert_frame_equal(df, original)
def test_astype_duplicate_col(self):
a1 = Series([1, 2, 3, 4, 5], name='a')
b = Series([0.1, 0.2, 0.4, 0.6, 0.8], name='b')
a2 = Series([0, 1, 2, 3, 4], name='a')
df = concat([a1, b, a2], axis=1)
result = df.astype(str)
a1_str = Series(['1', '2', '3', '4', '5'], dtype='str', name='a')
b_str = Series(['0.1', '0.2', '0.4', '0.6', '0.8'], dtype=str,
name='b')
a2_str = Series(['0', '1', '2', '3', '4'], dtype='str', name='a')
expected = concat([a1_str, b_str, a2_str], axis=1)
assert_frame_equal(result, expected)
result = df.astype({'a': 'str'})
expected = concat([a1_str, b, a2_str], axis=1)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('dtype', [
'category',
CategoricalDtype(),
CategoricalDtype(ordered=True),
CategoricalDtype(ordered=False),
CategoricalDtype(categories=list('abcdef')),
CategoricalDtype(categories=list('edba'), ordered=False),
CategoricalDtype(categories=list('edcb'), ordered=True)], ids=repr)
def test_astype_categorical(self, dtype):
# GH 18099
d = {'A': list('abbc'), 'B': list('bccd'), 'C': list('cdde')}
df = DataFrame(d)
result = df.astype(dtype)
expected = DataFrame({k: Categorical(d[k], dtype=dtype) for k in d})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cls", [
pd.api.types.CategoricalDtype,
pd.api.types.DatetimeTZDtype,
pd.api.types.IntervalDtype
])
def test_astype_categoricaldtype_class_raises(self, cls):
df = DataFrame({"A": ['a', 'a', 'b', 'c']})
xpr = "Expected an instance of {}".format(cls.__name__)
with tm.assert_raises_regex(TypeError, xpr):
df.astype({"A": cls})
with tm.assert_raises_regex(TypeError, xpr):
df['A'].astype(cls)
@pytest.mark.parametrize("dtype", ['Int64', 'Int32', 'Int16'])
def test_astype_extension_dtypes(self, dtype):
# GH 22578
df = pd.DataFrame([[1., 2.], [3., 4.], [5., 6.]], columns=['a', 'b'])
expected1 = pd.DataFrame({'a': integer_array([1, 3, 5],
dtype=dtype),
'b': integer_array([2, 4, 6],
dtype=dtype)})
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype('int64').astype(dtype), expected1)
tm.assert_frame_equal(df.astype(dtype).astype('float64'), df)
df = pd.DataFrame([[1., 2.], [3., 4.], [5., 6.]], columns=['a', 'b'])
df['b'] = df['b'].astype(dtype)
expected2 = pd.DataFrame({'a': [1., 3., 5.],
'b': integer_array([2, 4, 6],
dtype=dtype)})
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype('int64').astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ['Int64', 'Int32', 'Int16'])
def test_astype_extension_dtypes_1d(self, dtype):
# GH 22578
df = pd.DataFrame({'a': [1., 2., 3.]})
expected1 = pd.DataFrame({'a': integer_array([1, 2, 3],
dtype=dtype)})
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype('int64').astype(dtype), expected1)
df = pd.DataFrame({'a': [1., 2., 3.]})
df['a'] = df['a'].astype(dtype)
expected2 = pd.DataFrame({'a': integer_array([1, 2, 3],
dtype=dtype)})
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype('int64').astype(dtype), expected1)
@pytest.mark.parametrize('dtype', [
{100: 'float64', 200: 'uint64'}, 'category', 'float64'])
def test_astype_column_metadata(self, dtype):
# GH 19920
columns = pd.UInt64Index([100, 200, 300], name='foo')
df = DataFrame(np.arange(15).reshape(5, 3), columns=columns)
df = df.astype(dtype)
tm.assert_index_equal(df.columns, columns)
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D'])
def test_astype_from_datetimelike_to_objectt(self, dtype, unit):
# tests astype to object dtype
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(object)
assert (result.dtypes == object).all()
if dtype.startswith('M8'):
assert result.iloc[0, 0] == pd.to_datetime(1, unit=unit)
else:
assert result.iloc[0, 0] == pd.to_timedelta(1, unit=unit)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D'])
def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units from numeric origination
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=arr_dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D'])
def test_astype_to_datetime_unit(self, unit):
# tests all units from datetime origination
# gh-19223
dtype = "M8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ['ns'])
def test_astype_to_timedelta_unit_ns(self, unit):
# preserver the timedelta conversion
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ['us', 'ms', 's', 'h', 'm', 'D'])
def test_astype_to_timedelta_unit(self, unit):
# coerce to float
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(df.values.astype(dtype).astype(float))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D'])
def test_astype_to_incorrect_datetimelike(self, unit):
# trying to astype a m to a M, or vice-versa
# gh-19224
dtype = "M8[{}]".format(unit)
other = "m8[{}]".format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=dtype))
with pytest.raises(TypeError):
df.astype(other)
df = DataFrame(np.array([[1, 2, 3]], dtype=other))
with pytest.raises(TypeError):
df.astype(dtype)
def test_timedeltas(self):
df = DataFrame(dict(A=Series(date_range('2012-1-1', periods=3,
freq='D')),
B=Series([timedelta(days=i) for i in range(3)])))
result = df.get_dtype_counts().sort_index()
expected = Series(
{'datetime64[ns]': 1, 'timedelta64[ns]': 1}).sort_index()
assert_series_equal(result, expected)
df['C'] = df['A'] + df['B']
expected = Series(
{'datetime64[ns]': 2, 'timedelta64[ns]': 1}).sort_values()
result = df.get_dtype_counts().sort_values()
assert_series_equal(result, expected)
# mixed int types
df['D'] = 1
expected = Series({'datetime64[ns]': 2,
'timedelta64[ns]': 1,
'int64': 1}).sort_values()
result = df.get_dtype_counts().sort_values()
assert_series_equal(result, expected)
def test_arg_for_errors_in_astype(self):
# issue #14878
df = DataFrame([1, 2, 3])
with pytest.raises(ValueError):
df.astype(np.float64, errors=True)
with tm.assert_produces_warning(FutureWarning):
df.astype(np.int8, raise_on_error=False)
df.astype(np.int8, errors='ignore')
@pytest.mark.parametrize('input_vals', [
([1, 2]),
(['1', '2']),
(list(pd.date_range('1/1/2011', periods=2, freq='H'))),
(list(pd.date_range('1/1/2011', periods=2, freq='H',
tz='US/Eastern'))),
([pd.Interval(left=0, right=5)]),
])
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements are converted to strings when
# dtype is str, 'str', or 'U'
result = DataFrame({'A': input_vals}, dtype=string_dtype)
expected = DataFrame({'A': input_vals}).astype({'A': string_dtype})
assert_frame_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype)
expected = DataFrame({"A": ['1.0', '2.0', None]}, dtype=object)
assert_frame_equal(result, expected)
@pytest.mark.parametrize("data, expected", [
# empty
(DataFrame(), True),
# multi-same
(DataFrame({"A": [1, 2], "B": [1, 2]}), True),
# multi-object
(DataFrame({"A": np.array([1, 2], dtype=object),
"B": np.array(["a", "b"], dtype=object)}), True),
# multi-extension
(DataFrame({"A": pd.Categorical(['a', 'b']),
"B": pd.Categorical(['a', 'b'])}), True),
# differ types
(DataFrame({"A": [1, 2], "B": [1., 2.]}), False),
# differ sizes
(DataFrame({"A": np.array([1, 2], dtype=np.int32),
"B": np.array([1, 2], dtype=np.int64)}), False),
# multi-extension differ
(DataFrame({"A": pd.Categorical(['a', 'b']),
"B": pd.Categorical(['b', 'c'])}), False),
])
def test_is_homogeneous_type(self, data, expected):
assert data._is_homogeneous_type is expected
def test_asarray_homogenous(self):
df = pd.DataFrame({"A": pd.Categorical([1, 2]),
"B": pd.Categorical([1, 2])})
result = np.asarray(df)
# may change from object in the future
expected = np.array([[1, 1], [2, 2]], dtype='object')
tm.assert_numpy_array_equal(result, expected)
class TestDataFrameDatetimeWithTZ(TestData):
def test_interleave(self):
# interleave with object
result = self.tzframe.assign(D='foo').values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500',
tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100', tz='CET')],
['foo', 'foo', 'foo']], dtype=object).T
tm.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
result = self.tzframe.values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500',
tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100',
tz='CET')]], dtype=object).T
tm.assert_numpy_array_equal(result, expected)
def test_astype(self):
# astype
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500',
tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100',
tz='CET')]],
dtype=object).T
result = self.tzframe.astype(object)
assert_frame_equal(result, DataFrame(
expected, index=self.tzframe.index, columns=self.tzframe.columns))
result = self.tzframe.astype('datetime64[ns]')
expected = DataFrame({'A': date_range('20130101', periods=3),
'B': (date_range('20130101', periods=3,
tz='US/Eastern')
.tz_convert('UTC')
.tz_localize(None)),
'C': (date_range('20130101', periods=3,
tz='CET')
.tz_convert('UTC')
.tz_localize(None))})
expected.iloc[1, 1] = pd.NaT
expected.iloc[1, 2] = pd.NaT
assert_frame_equal(result, expected)
def test_astype_str(self):
# str formatting
result = self.tzframe.astype(str)
expected = DataFrame([['2013-01-01', '2013-01-01 00:00:00-05:00',
'2013-01-01 00:00:00+01:00'],
['2013-01-02', 'NaT', 'NaT'],
['2013-01-03', '2013-01-03 00:00:00-05:00',
'2013-01-03 00:00:00+01:00']],
columns=self.tzframe.columns)
tm.assert_frame_equal(result, expected)
with option_context('display.max_columns', 20):
result = str(self.tzframe)
assert ('0 2013-01-01 2013-01-01 00:00:00-05:00 '
'2013-01-01 00:00:00+01:00') in result
assert ('1 2013-01-02 '
'NaT NaT') in result
assert ('2 2013-01-03 2013-01-03 00:00:00-05:00 '
'2013-01-03 00:00:00+01:00') in result
| bsd-3-clause |
SciLifeLab/bcbio-nextgen | bcbio/install.py | 1 | 28284 | """Handle installation and updates of bcbio-nextgen, third party software and data.
Enables automated installation tool and in-place updates to install additional
data and software.
"""
import argparse
import collections
import contextlib
import datetime
from distutils.version import LooseVersion
import os
import shutil
import string
import subprocess
import sys
import requests
import yaml
from bcbio import broad, utils
from bcbio.pipeline import genome
from bcbio.variation import effects
from bcbio.provenance import programs
from bcbio.distributed.transaction import file_transaction
REMOTES = {
"requirements": "https://raw.github.com/chapmanb/bcbio-nextgen/master/requirements.txt",
"gitrepo": "git://github.com/chapmanb/bcbio-nextgen.git",
"cloudbiolinux": "https://github.com/chapmanb/cloudbiolinux.git",
"genome_resources": "https://raw.github.com/chapmanb/bcbio-nextgen/master/config/genomes/%s-resources.yaml",
"snpeff_dl_url": ("http://downloads.sourceforge.net/project/snpeff/databases/v{snpeff_ver}/"
"snpEff_v{snpeff_ver}_{genome}.zip")}
SUPPORTED_GENOMES = ["GRCh37", "hg19", "mm10", "mm9", "rn5", "canFam3", "dm3",
"Zv9", "phix", "sacCer3", "xenTro3", "TAIR10", "WBcel235",
"pseudomonas_aeruginosa_ucbpp_pa14"]
SUPPORTED_INDEXES = ["bowtie", "bowtie2", "bwa", "novoalign", "snap", "star", "ucsc", "seq"]
Tool = collections.namedtuple("Tool", ["name", "fname"])
def upgrade_bcbio(args):
"""Perform upgrade of bcbio to latest release, or from GitHub development version.
Handles bcbio, third party tools and data.
"""
args = add_install_defaults(args)
pip_bin = os.path.join(os.path.dirname(sys.executable), "pip")
if args.upgrade in ["skip"]:
pass
elif args.upgrade in ["stable", "system"]:
_update_conda_packages()
print("Upgrading bcbio-nextgen to latest stable version")
sudo_cmd = [] if args.upgrade == "stable" else ["sudo"]
subprocess.check_call(sudo_cmd + [pip_bin, "install", "-r", REMOTES["requirements"]])
print("Upgrade of bcbio-nextgen code complete.")
elif args.upgrade in ["deps"]:
_update_conda_packages()
else:
_update_conda_packages()
print("Upgrading bcbio-nextgen to latest development version")
subprocess.check_call([pip_bin, "install", "git+%s#egg=bcbio-nextgen" % REMOTES["gitrepo"]])
subprocess.check_call([pip_bin, "install", "--upgrade", "--no-deps",
"git+%s#egg=bcbio-nextgen" % REMOTES["gitrepo"]])
print("Upgrade of bcbio-nextgen development code complete.")
try:
_set_matplotlib_default_backend()
except OSError:
pass
if args.tooldir:
with bcbio_tmpdir():
print("Upgrading third party tools to latest versions")
_symlink_bcbio(args, script="bcbio_nextgen.py")
_symlink_bcbio(args, script="bcbio_setup_genome.py")
upgrade_thirdparty_tools(args, REMOTES)
print("Third party tools upgrade complete.")
if args.install_data:
if len(args.genomes) == 0:
print("Data not installed, no genomes provided with `--genomes` flag")
elif len(args.aligners) == 0:
print("Data not installed, no aligners provided with `--aligners` flag")
else:
with bcbio_tmpdir():
print("Upgrading bcbio-nextgen data files")
upgrade_bcbio_data(args, REMOTES)
print("bcbio-nextgen data upgrade complete.")
if args.isolate and args.tooldir:
print("Installation directory not added to current PATH")
print(" Add:\n {t}/bin to PATH\n {t}/lib to LD_LIBRARY_PATH\n"
" {t}/lib/perl5:{t}/lib/perl5/site_perl to PERL5LIB".format(t=args.tooldir))
save_install_defaults(args)
args.datadir = _get_data_dir()
_install_container_bcbio_system(args.datadir)
print("Upgrade completed successfully.")
return args
def _set_matplotlib_default_backend():
"""
matplotlib will try to print to a display if it is available, but don't want
to run it in interactive mode. we tried setting the backend to 'Agg'' before
importing, but it was still resulting in issues. we replace the existing
backend with 'agg' in the default matplotlibrc. This is a hack until we can
find a better solution
"""
if _matplotlib_installed():
import matplotlib
matplotlib.use('Agg', force=True)
config = matplotlib.matplotlib_fname()
with file_transaction(config) as tx_out_file:
with open(config) as in_file, open(tx_out_file, "w") as out_file:
for line in in_file:
if line.split(":")[0].strip() == "backend":
out_file.write("backend: agg\n")
else:
out_file.write(line)
def _matplotlib_installed():
try:
import matplotlib
except importError:
return False
return True
def _symlink_bcbio(args, script="bcbio_nextgen.py"):
"""Ensure a bcbio-nextgen script symlink in final tool directory.
"""
bcbio_anaconda = os.path.join(os.path.dirname(sys.executable), script)
bcbio_final = os.path.join(args.tooldir, "bin", script)
sudo_cmd = ["sudo"] if args.sudo else []
if not os.path.exists(bcbio_final):
if os.path.lexists(bcbio_final):
subprocess.check_call(sudo_cmd + ["rm", "-f", bcbio_final])
subprocess.check_call(sudo_cmd + ["ln", "-s", bcbio_anaconda, bcbio_final])
def _install_container_bcbio_system(datadir):
"""Install limited bcbio_system.yaml file for setting core and memory usage.
Adds any non-specific programs to the exposed bcbio_system.yaml file, only
when upgrade happening inside a docker container.
"""
base_file = os.path.join(datadir, "config", "bcbio_system.yaml")
if not os.path.exists(base_file):
return
expose_file = os.path.join(datadir, "galaxy", "bcbio_system.yaml")
expose = set(["memory", "cores", "jvm_opts"])
with open(base_file) as in_handle:
config = yaml.load(in_handle)
if os.path.exists(expose_file):
with open(expose_file) as in_handle:
expose_config = yaml.load(in_handle)
else:
expose_config = {"resources": {}}
for pname, vals in config["resources"].iteritems():
expose_vals = {}
for k, v in vals.iteritems():
if k in expose:
expose_vals[k] = v
if len(expose_vals) > 0 and pname not in expose_config["resources"]:
expose_config["resources"][pname] = expose_vals
with open(expose_file, "w") as out_handle:
yaml.safe_dump(expose_config, out_handle, default_flow_style=False, allow_unicode=False)
return expose_file
def _default_deploy_args(args):
toolplus = {"data": {"bio_nextgen": []}}
custom_add = collections.defaultdict(list)
for x in args.toolplus:
if not x.fname:
for k, vs in toolplus.get(x.name, {}).iteritems():
custom_add[k].extend(vs)
return {"flavor": "ngs_pipeline_minimal",
"custom_add": dict(custom_add),
"vm_provider": "novm",
"hostname": "localhost",
"fabricrc_overrides": {"edition": "minimal",
"use_sudo": args.sudo,
"keep_isolated": args.isolate,
"distribution": args.distribution or "__auto__",
"dist_name": "__auto__"}}
def _update_conda_packages():
"""If installed in an anaconda directory, upgrade conda packages.
"""
conda_bin = os.path.join(os.path.dirname(sys.executable), "conda")
pkgs = ["biopython", "boto", "cnvkit", "cpat", "cython", "ipython", "lxml",
"matplotlib", "msgpack-python", "nose", "numpy", "pandas", "patsy", "pycrypto",
"pip", "pysam", "pyvcf", "pyyaml", "pyzmq", "reportlab", "requests", "scikit-learn",
"scipy", "seaborn", "setuptools", "sqlalchemy", "statsmodels", "toolz", "tornado"]
channels = ["-c", "https://conda.binstar.org/bcbio"]
if os.path.exists(conda_bin):
subprocess.check_call([conda_bin, "install", "--yes", "numpy"])
subprocess.check_call([conda_bin, "install", "--yes"] + channels + pkgs)
def _get_data_dir():
base_dir = os.path.realpath(os.path.dirname(os.path.dirname(sys.executable)))
if "anaconda" not in os.path.basename(base_dir) and "virtualenv" not in os.path.basename(base_dir):
raise ValueError("Cannot update data for bcbio-nextgen not installed by installer.\n"
"bcbio-nextgen needs to be installed inside an anaconda environment \n"
"located in the same directory as `galaxy` `genomes` and `gemini_data` directories.")
return os.path.dirname(base_dir)
def get_gemini_dir():
try:
data_dir = _get_data_dir()
return os.path.join(data_dir, "gemini_data")
except ValueError:
return None
def upgrade_bcbio_data(args, remotes):
"""Upgrade required genome data files in place.
"""
data_dir = _get_data_dir()
s = _default_deploy_args(args)
s["actions"] = ["setup_biodata"]
tooldir = args.tooldir or get_defaults().get("tooldir")
if tooldir:
s["fabricrc_overrides"]["system_install"] = tooldir
s["fabricrc_overrides"]["data_files"] = data_dir
s["fabricrc_overrides"]["galaxy_home"] = os.path.join(data_dir, "galaxy")
cbl = get_cloudbiolinux(remotes)
s["genomes"] = _get_biodata(cbl["biodata"], args)
sys.path.insert(0, cbl["dir"])
cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"])
cbl_deploy.deploy(s)
_upgrade_genome_resources(s["fabricrc_overrides"]["galaxy_home"],
remotes["genome_resources"])
_upgrade_snpeff_data(s["fabricrc_overrides"]["galaxy_home"], args, remotes)
_upgrade_vep_data(s["fabricrc_overrides"]["galaxy_home"], tooldir)
toolplus = set([x.name for x in args.toolplus])
if 'data' in toolplus:
gemini = os.path.join(os.path.dirname(sys.executable), "gemini")
extras = []
if "cadd" in toolplus:
extras.extend(["--extra", "cadd_score"])
subprocess.check_call([gemini, "update", "--dataonly"] + extras)
def _upgrade_genome_resources(galaxy_dir, base_url):
"""Retrieve latest version of genome resource YAML configuration files.
"""
for dbkey, ref_file in genome.get_builds(galaxy_dir):
# Check for a remote genome resources file
remote_url = base_url % dbkey
r = requests.get(remote_url)
if r.status_code == requests.codes.ok:
local_file = os.path.join(os.path.dirname(ref_file), os.path.basename(remote_url))
if os.path.exists(local_file):
with open(local_file) as in_handle:
local_config = yaml.load(in_handle)
remote_config = yaml.load(r.text)
needs_update = remote_config["version"] > local_config.get("version", 0)
if needs_update:
shutil.move(local_file, local_file + ".old%s" % local_config.get("version", 0))
else:
needs_update = True
if needs_update:
print("Updating %s genome resources configuration" % dbkey)
with open(local_file, "w") as out_handle:
out_handle.write(r.text)
def _upgrade_vep_data(galaxy_dir, tooldir):
for dbkey, ref_file in genome.get_builds(galaxy_dir):
effects.prep_vep_cache(dbkey, ref_file, tooldir)
def _upgrade_snpeff_data(galaxy_dir, args, remotes):
"""Install or upgrade snpEff databases, localized to reference directory.
"""
for dbkey, ref_file in genome.get_builds(galaxy_dir):
resource_file = os.path.join(os.path.dirname(ref_file), "%s-resources.yaml" % dbkey)
if os.path.exists(resource_file):
with open(resource_file) as in_handle:
resources = yaml.load(in_handle)
snpeff_db, snpeff_base_dir = effects.get_db({"genome_resources": resources,
"reference": {"fasta": {"base": ref_file}}})
if snpeff_db:
snpeff_db_dir = os.path.join(snpeff_base_dir, snpeff_db)
if not os.path.exists(snpeff_db_dir):
print("Installing snpEff database %s in %s" % (snpeff_db, snpeff_base_dir))
dl_url = remotes["snpeff_dl_url"].format(snpeff_ver=_get_snpeff_version(args), genome=snpeff_db)
dl_file = os.path.basename(dl_url)
with utils.chdir(snpeff_base_dir):
subprocess.check_call(["wget", "-c", "-O", dl_file, dl_url])
subprocess.check_call(["unzip", dl_file])
os.remove(dl_file)
dl_dir = os.path.join(snpeff_base_dir, "data", snpeff_db)
os.rename(dl_dir, snpeff_db_dir)
os.rmdir(os.path.join(snpeff_base_dir, "data"))
def _get_snpeff_version(args):
tooldir = args.tooldir or get_defaults()["tooldir"]
raw_version = programs.get_version_manifest("snpeff")
if not raw_version:
config = {"resources": {"snpeff": {"jvm_opts": ["-Xms500m", "-Xmx1g"],
"dir": os.path.join(tooldir, "share", "java", "snpeff")}}}
raw_version = programs.java_versioner("snpeff", "snpEff",
stdout_flag="snpEff version SnpEff")(config)
snpeff_version = "".join([x for x in raw_version
if x in set(string.digits + ".")]).replace(".", "_")
assert snpeff_version, "Did not find snpEff version information"
return snpeff_version
def _get_biodata(base_file, args):
with open(base_file) as in_handle:
config = yaml.load(in_handle)
config["install_liftover"] = False
config["genome_indexes"] = args.aligners
config["genomes"] = [_add_biodata_flags(g, args) for g in config["genomes"] if g["dbkey"] in args.genomes]
return config
def _add_biodata_flags(g, args):
toolplus = set([x.name for x in args.toolplus])
if g["dbkey"] in ["hg19", "GRCh37"]:
for flag in ["dbnsfp"]:
if flag in toolplus:
g[flag] = True
return g
def upgrade_thirdparty_tools(args, remotes):
"""Install and update third party tools used in the pipeline.
Creates a manifest directory with installed programs on the system.
"""
s = {"fabricrc_overrides": {"system_install": args.tooldir,
"local_install": os.path.join(args.tooldir, "local_install"),
"distribution": args.distribution,
"use_sudo": args.sudo,
"edition": "minimal"}}
s = _default_deploy_args(args)
s["actions"] = ["install_biolinux"]
s["fabricrc_overrides"]["system_install"] = args.tooldir
s["fabricrc_overrides"]["local_install"] = os.path.join(args.tooldir, "local_install")
cbl = get_cloudbiolinux(remotes)
sys.path.insert(0, cbl["dir"])
cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"])
cbl_deploy.deploy(s)
manifest_dir = os.path.join(_get_data_dir(), "manifest")
print("Creating manifest of installed packages in %s" % manifest_dir)
cbl_manifest = __import__("cloudbio.manifest", fromlist=["manifest"])
if os.path.exists(manifest_dir):
for fname in os.listdir(manifest_dir):
if not fname.startswith("toolplus"):
os.remove(os.path.join(manifest_dir, fname))
cbl_manifest.create(manifest_dir, args.tooldir)
print("Installing additional tools")
_install_toolplus(args, manifest_dir)
def _install_toolplus(args, manifest_dir):
"""Install additional tools we cannot distribute, updating local manifest.
"""
toolplus_manifest = os.path.join(manifest_dir, "toolplus-packages.yaml")
system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml")
toolplus_dir = os.path.join(_get_data_dir(), "toolplus")
for tool in args.toolplus:
if tool.name == "data":
_install_gemini(args.tooldir, _get_data_dir(), args)
elif tool.name == "kraken":
_install_kraken_db(_get_data_dir(), args)
elif tool.name in set(["gatk", "mutect"]):
_install_gatk_jar(tool.name, tool.fname, toolplus_manifest, system_config, toolplus_dir)
elif tool.name in set(["protected"]): # back compatibility
pass
elif tool.name in set(["cadd", "dbnsfp"]): # larger data targets
pass
else:
raise ValueError("Unexpected toolplus argument: %s %s" (tool.name, tool.fname))
def get_gatk_jar_version(name, fname):
if name == "gatk":
return broad.get_gatk_version(fname)
elif name == "mutect":
return broad.get_mutect_version(fname)
else:
raise ValueError("Unexpected GATK input: %s" % name)
def _install_gatk_jar(name, fname, manifest, system_config, toolplus_dir):
"""Install a jar for GATK or associated tools like MuTect.
"""
if not fname.endswith(".jar"):
raise ValueError("--toolplus argument for %s expects a jar file: %s" % (name, fname))
version = get_gatk_jar_version(name, fname)
store_dir = utils.safe_makedir(os.path.join(toolplus_dir, name, version))
shutil.copyfile(fname, os.path.join(store_dir, os.path.basename(fname)))
_update_system_file(system_config, name, {"dir": store_dir})
_update_manifest(manifest, name, version)
def _update_manifest(manifest_file, name, version):
"""Update the toolplus manifest file with updated name and version
"""
if os.path.exists(manifest_file):
with open(manifest_file) as in_handle:
manifest = yaml.load(in_handle)
else:
manifest = {}
manifest[name] = {"name": name, "version": version}
with open(manifest_file, "w") as out_handle:
yaml.safe_dump(manifest, out_handle, default_flow_style=False, allow_unicode=False)
def _update_system_file(system_file, name, new_kvs):
"""Update the bcbio_system.yaml file with new resource information.
"""
bak_file = system_file + ".bak%s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
shutil.copyfile(system_file, bak_file)
with open(system_file) as in_handle:
config = yaml.load(in_handle)
new_rs = {}
for rname, r_kvs in config.get("resources", {}).iteritems():
if rname == name:
for k, v in new_kvs.iteritems():
r_kvs[k] = v
new_rs[rname] = r_kvs
config["resources"] = new_rs
with open(system_file, "w") as out_handle:
yaml.safe_dump(config, out_handle, default_flow_style=False, allow_unicode=False)
def _install_gemini(tooldir, datadir, args):
"""Install gemini layered on top of bcbio-nextgen, sharing anaconda framework.
"""
# check if we have an up to date version, upgrading if needed
gemini = os.path.join(os.path.dirname(sys.executable), "gemini")
if os.path.exists(gemini):
vurl = "https://raw.github.com/arq5x/gemini/master/requirements.txt"
r = requests.get(vurl)
for line in r.text.split():
if line.startswith("gemini=="):
latest_version = line.split("==")[-1]
cur_version = subprocess.check_output([gemini, "-v"], stderr=subprocess.STDOUT).strip().split()[-1]
if LooseVersion(latest_version) > LooseVersion(cur_version):
subprocess.check_call([gemini, "update"])
# install from scratch inside existing Anaconda python
else:
url = "https://raw.github.com/arq5x/gemini/master/gemini/scripts/gemini_install.py"
script = os.path.basename(url)
subprocess.check_call(["wget", "-O", script, url, "--no-check-certificate"])
cmd = [sys.executable, "-E", script, tooldir, datadir, "--notools", "--nodata", "--sharedpy"]
if not args.sudo:
cmd.append("--nosudo")
subprocess.check_call(cmd)
os.remove(script)
def _install_kraken_db(datadir, args):
"""Install kraken minimal DB in genome folder.
"""
kraken = os.path.join(datadir, "genomes/kraken")
url = "https://ccb.jhu.edu/software/kraken/dl/minikraken.tgz"
compress = os.path.join(kraken, os.path.basename(url))
base, ext = utils.splitext_plus(os.path.basename(url))
db = os.path.join(kraken, base)
tooldir = args.tooldir or get_defaults()["tooldir"]
if os.path.exists(os.path.join(tooldir, "bin", "kraken")):
if not os.path.exists(kraken):
utils.safe_makedir(kraken)
if not os.path.exists(db):
if not os.path.exists(compress):
subprocess.check_call(["wget", "-O", compress, url, "--no-check-certificate"])
cmd = ["tar", "-xzvf", compress, "-C", kraken]
subprocess.check_call(cmd)
shutil.move(os.path.join(kraken, "minikraken_20140330"), os.path.join(kraken, "minikraken"))
utils.remove_safe(compress)
else:
raise argparse.ArgumentTypeError("kraken not installed in tooldir %s." %
os.path.join(tooldir, "bin", "kraken"))
# ## Store a local configuration file with upgrade details
def _get_install_config():
"""Return the YAML configuration file used to store upgrade information.
"""
try:
data_dir = _get_data_dir()
except ValueError:
return None
config_dir = utils.safe_makedir(os.path.join(data_dir, "config"))
return os.path.join(config_dir, "install-params.yaml")
def save_install_defaults(args):
"""Save installation information to make future upgrades easier.
"""
install_config = _get_install_config()
if install_config is None:
return
if utils.file_exists(install_config):
with open(install_config) as in_handle:
cur_config = yaml.load(in_handle)
else:
cur_config = {}
if args.tooldir:
cur_config["tooldir"] = args.tooldir
cur_config["sudo"] = args.sudo
cur_config["isolate"] = args.isolate
for attr in ["genomes", "aligners"]:
if not cur_config.get(attr):
cur_config[attr] = []
for x in getattr(args, attr):
if x not in cur_config[attr]:
cur_config[attr].append(x)
# toolplus -- save non-filename inputs
attr = "toolplus"
if not cur_config.get(attr):
cur_config[attr] = []
for x in getattr(args, attr):
if not x.fname:
if x.name not in cur_config[attr]:
cur_config[attr].append(x.name)
with open(install_config, "w") as out_handle:
yaml.safe_dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False)
def add_install_defaults(args):
"""Add any saved installation defaults to the upgrade.
"""
# Ensure we install data if we've specified any secondary installation targets
if len(args.genomes) > 0 or len(args.aligners) > 0 or len(args.toolplus) > 0:
args.install_data = True
install_config = _get_install_config()
if install_config is None or not utils.file_exists(install_config):
return args
with open(install_config) as in_handle:
default_args = yaml.load(in_handle)
# if we are upgrading to development, also upgrade the tools
if args.upgrade in ["development"]:
args.tools = True
if args.tools and args.tooldir is None:
if "tooldir" in default_args:
args.tooldir = str(default_args["tooldir"])
else:
raise ValueError("Default tool directory not yet saved in config defaults. "
"Specify the '--tooldir=/path/to/tools' to upgrade tools. "
"After a successful upgrade, the '--tools' parameter will "
"work for future upgrades.")
for attr in ["genomes", "aligners", "toolplus"]:
for x in default_args.get(attr, []):
x = Tool(x, None) if attr == "toolplus" else str(x)
new_val = getattr(args, attr)
if x not in getattr(args, attr):
new_val.append(x)
setattr(args, attr, new_val)
if "sudo" in default_args and not args.sudo is False:
args.sudo = default_args["sudo"]
if "isolate" in default_args and not args.isolate is True:
args.isolate = default_args["isolate"]
return args
def get_defaults():
install_config = _get_install_config()
if install_config is None or not utils.file_exists(install_config):
return {}
with open(install_config) as in_handle:
return yaml.load(in_handle)
def _check_toolplus(x):
"""Parse options for adding non-standard/commercial tools like GATK and MuTecT.
"""
std_choices = set(["data", "cadd", "dbnsfp", "kraken"])
if x in std_choices:
return Tool(x, None)
elif "=" in x and len(x.split("=")) == 2:
name, fname = x.split("=")
fname = os.path.normpath(os.path.realpath(fname))
if not os.path.exists(fname):
raise argparse.ArgumentTypeError("Unexpected --toolplus argument for %s. File does not exist: %s"
% (name, fname))
return Tool(name, fname)
else:
raise argparse.ArgumentTypeError("Unexpected --toolplus argument. Expect toolname=filename.")
def add_subparser(subparsers):
parser = subparsers.add_parser("upgrade", help="Install or upgrade bcbio-nextgen")
parser.add_argument("--tooldir",
help="Directory to install 3rd party software tools. Leave unspecified for no tools",
type=lambda x: (os.path.abspath(os.path.expanduser(x))), default=None)
parser.add_argument("--tools",
help="Boolean argument specifying upgrade of tools. Uses previously saved install directory",
action="store_true", default=False)
parser.add_argument("-u", "--upgrade", help="Code version to upgrade",
choices=["stable", "development", "system", "deps", "skip"], default="skip")
parser.add_argument("--toolplus", help="Specify additional tool categories to install",
action="append", default=[], type=_check_toolplus)
parser.add_argument("--genomes", help="Genomes to download",
action="append", default=[], choices=SUPPORTED_GENOMES)
parser.add_argument("--aligners", help="Aligner indexes to download",
action="append", default=[],
choices = SUPPORTED_INDEXES)
parser.add_argument("--data", help="Upgrade data dependencies",
dest="install_data", action="store_true", default=False)
parser.add_argument("--sudo", help="Use sudo for the installation, enabling install of system packages",
dest="sudo", action="store_true", default=False)
parser.add_argument("--isolate", help="Created an isolated installation without PATH updates",
dest="isolate", action="store_true", default=False)
parser.add_argument("--distribution", help="Operating system distribution",
default="",
choices=["ubuntu", "debian", "centos", "scientificlinux", "macosx"])
return parser
def get_cloudbiolinux(remotes):
base_dir = os.path.join(os.getcwd(), "cloudbiolinux")
if not os.path.exists(base_dir):
subprocess.check_call(["git", "clone", remotes["cloudbiolinux"]])
return {"biodata": os.path.join(base_dir, "config", "biodata.yaml"),
"dir": base_dir}
@contextlib.contextmanager
def bcbio_tmpdir():
orig_dir = os.getcwd()
work_dir = os.path.join(os.getcwd(), "tmpbcbio-install")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
os.chdir(work_dir)
yield work_dir
os.chdir(orig_dir)
shutil.rmtree(work_dir)
| mit |
huaxz1986/git_book | chapters/Ensemble/adaboost_classifier.py | 1 | 6389 | # -*- coding: utf-8 -*-
"""
集成学习
~~~~~~~~~~~~~~~~
AdaBoostClassifier
:copyright: (c) 2016 by the huaxz1986.
:license: lgpl-3.0, see LICENSE for more details.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets,cross_validation,ensemble
def load_data_classification():
'''
加载用于分类问题的数据集
:return: 一个元组,用于分类问题。元组元素依次为:训练样本集、测试样本集、训练样本集对应的标记、测试样本集对应的标记
'''
digits=datasets.load_digits() # 使用 scikit-learn 自带的 digits 数据集
return cross_validation.train_test_split(digits.data,digits.target,
test_size=0.25,random_state=0,stratify=digits.target) # 分层采样拆分成训练集和测试集,测试集大小为原始数据集大小的 1/4
def test_AdaBoostClassifier(*data):
'''
测试 AdaBoostClassifier 的用法,绘制 AdaBoostClassifier 的预测性能随基础分类器数量的影响
:param data: 可变参数。它是一个元组,这里要求其元素依次为:训练样本集、测试样本集、训练样本的标记、测试样本的标记
:return: None
'''
X_train,X_test,y_train,y_test=data
clf=ensemble.AdaBoostClassifier(learning_rate=0.1)
clf.fit(X_train,y_train)
## 绘图
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
estimators_num=len(clf.estimators_)
X=range(1,estimators_num+1)
ax.plot(list(X),list(clf.staged_score(X_train,y_train)),label="Traing score")
ax.plot(list(X),list(clf.staged_score(X_test,y_test)),label="Testing score")
ax.set_xlabel("estimator num")
ax.set_ylabel("score")
ax.legend(loc="best")
ax.set_title("AdaBoostClassifier")
plt.show()
def test_AdaBoostClassifier_base_classifier(*data):
'''
测试 AdaBoostClassifier 的预测性能随基础分类器数量和基础分类器的类型的影响
:param data: 可变参数。它是一个元组,这里要求其元素依次为:训练样本集、测试样本集、训练样本的标记、测试样本的标记
:return: None
'''
from sklearn.naive_bayes import GaussianNB
X_train,X_test,y_train,y_test=data
fig=plt.figure()
ax=fig.add_subplot(2,1,1)
########### 默认的个体分类器 #############
clf=ensemble.AdaBoostClassifier(learning_rate=0.1)
clf.fit(X_train,y_train)
## 绘图
estimators_num=len(clf.estimators_)
X=range(1,estimators_num+1)
ax.plot(list(X),list(clf.staged_score(X_train,y_train)),label="Traing score")
ax.plot(list(X),list(clf.staged_score(X_test,y_test)),label="Testing score")
ax.set_xlabel("estimator num")
ax.set_ylabel("score")
ax.legend(loc="lower right")
ax.set_ylim(0,1)
ax.set_title("AdaBoostClassifier with Decision Tree")
####### Gaussian Naive Bayes 个体分类器 ########
ax=fig.add_subplot(2,1,2)
clf=ensemble.AdaBoostClassifier(learning_rate=0.1,base_estimator=GaussianNB())
clf.fit(X_train,y_train)
## 绘图
estimators_num=len(clf.estimators_)
X=range(1,estimators_num+1)
ax.plot(list(X),list(clf.staged_score(X_train,y_train)),label="Traing score")
ax.plot(list(X),list(clf.staged_score(X_test,y_test)),label="Testing score")
ax.set_xlabel("estimator num")
ax.set_ylabel("score")
ax.legend(loc="lower right")
ax.set_ylim(0,1)
ax.set_title("AdaBoostClassifier with Gaussian Naive Bayes")
plt.show()
def test_AdaBoostClassifier_learning_rate(*data):
'''
测试 AdaBoostClassifier 的预测性能随学习率的影响
:param data: 可变参数。它是一个元组,这里要求其元素依次为:训练样本集、测试样本集、训练样本的标记、测试样本的标记
:return: None
'''
X_train,X_test,y_train,y_test=data
learning_rates=np.linspace(0.01,1)
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
traing_scores=[]
testing_scores=[]
for learning_rate in learning_rates:
clf=ensemble.AdaBoostClassifier(learning_rate=learning_rate,n_estimators=500)
clf.fit(X_train,y_train)
traing_scores.append(clf.score(X_train,y_train))
testing_scores.append(clf.score(X_test,y_test))
ax.plot(learning_rates,traing_scores,label="Traing score")
ax.plot(learning_rates,testing_scores,label="Testing score")
ax.set_xlabel("learning rate")
ax.set_ylabel("score")
ax.legend(loc="best")
ax.set_title("AdaBoostClassifier")
plt.show()
def test_AdaBoostClassifier_algorithm(*data):
'''
测试 AdaBoostClassifier 的预测性能随学习率和 algorithm 参数的影响
:param data: 可变参数。它是一个元组,这里要求其元素依次为:训练样本集、测试样本集、训练样本的标记、测试样本的标记
:return: None
'''
X_train,X_test,y_train,y_test=data
algorithms=['SAMME.R','SAMME']
fig=plt.figure()
learning_rates=[0.05,0.1,0.5,0.9]
for i,learning_rate in enumerate(learning_rates):
ax=fig.add_subplot(2,2,i+1)
for i ,algorithm in enumerate(algorithms):
clf=ensemble.AdaBoostClassifier(learning_rate=learning_rate,
algorithm=algorithm)
clf.fit(X_train,y_train)
## 绘图
estimators_num=len(clf.estimators_)
X=range(1,estimators_num+1)
ax.plot(list(X),list(clf.staged_score(X_train,y_train)),
label="%s:Traing score"%algorithms[i])
ax.plot(list(X),list(clf.staged_score(X_test,y_test)),
label="%s:Testing score"%algorithms[i])
ax.set_xlabel("estimator num")
ax.set_ylabel("score")
ax.legend(loc="lower right")
ax.set_title("learing rate:%f"%learning_rate)
fig.suptitle("AdaBoostClassifier")
plt.show()
if __name__=='__main__':
X_train,X_test,y_train,y_test=load_data_classification() # 获取分类数据
test_AdaBoostClassifier(X_train,X_test,y_train,y_test) # 调用 test_AdaBoostClassifier
# test_AdaBoostClassifier_base_classifier(X_train,X_test,y_train,y_test) # 调用 test_AdaBoostClassifier_base_classifier
# test_AdaBoostClassifier_learning_rate(X_train,X_test,y_train,y_test) # 调用 test_AdaBoostClassifier_learning_rate
# test_AdaBoostClassifier_algorithm(X_train,X_test,y_train,y_test) # 调用 test_AdaBoostClassifier_algorithm
| gpl-3.0 |
jmmease/pandas | asv_bench/benchmarks/strings.py | 6 | 2873 | from .pandas_vb_common import *
import string
import itertools as IT
import pandas.util.testing as testing
class StringMethods(object):
goal_time = 0.2
def make_series(self, letters, strlen, size):
return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
def setup(self):
self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
self.s = self.make_series(string.ascii_uppercase, strlen=10, size=10000).str.join('|')
def time_cat(self):
self.many.str.cat(sep=',')
def time_center(self):
self.many.str.center(100)
def time_contains_few(self):
self.few.str.contains('matchthis')
def time_contains_few_noregex(self):
self.few.str.contains('matchthis', regex=False)
def time_contains_many(self):
self.many.str.contains('matchthis')
def time_contains_many_noregex(self):
self.many.str.contains('matchthis', regex=False)
def time_count(self):
self.many.str.count('matchthis')
def time_endswith(self):
self.many.str.endswith('matchthis')
def time_extract(self):
self.many.str.extract('(\\w*)matchthis(\\w*)')
def time_findall(self):
self.many.str.findall('[A-Z]+')
def time_get(self):
self.many.str.get(0)
def time_join_split(self):
self.many.str.join('--').str.split('--')
def time_join_split_expand(self):
self.many.str.join('--').str.split('--', expand=True)
def time_len(self):
self.many.str.len()
def time_match(self):
self.many.str.match('mat..this')
def time_pad(self):
self.many.str.pad(100, side='both')
def time_repeat(self):
self.many.str.repeat(list(IT.islice(IT.cycle(range(1, 4)), len(self.many))))
def time_replace(self):
self.many.str.replace('(matchthis)', '\x01\x01')
def time_slice(self):
self.many.str.slice(5, 15, 2)
def time_startswith(self):
self.many.str.startswith('matchthis')
def time_strip(self):
self.many.str.strip('matchthis')
def time_rstrip(self):
self.many.str.rstrip('matchthis')
def time_lstrip(self):
self.many.str.lstrip('matchthis')
def time_title(self):
self.many.str.title()
def time_upper(self):
self.many.str.upper()
def time_lower(self):
self.many.str.lower()
def time_get_dummies(self):
self.s.str.get_dummies('|')
class StringEncode(object):
goal_time = 0.2
def setup(self):
self.ser = Series(testing.makeUnicodeIndex())
def time_encode_decode(self):
self.ser.str.encode('utf-8').str.decode('utf-8')
| bsd-3-clause |
wkfwkf/statsmodels | tools/code_maintenance.py | 37 | 2307 | """
Code maintenance script modified from PyMC
"""
#!/usr/bin/env python
import sys
import os
# This is a function, not a test case, because it has to be run from inside
# the source tree to work well.
mod_strs = ['IPython', 'pylab', 'matplotlib', 'scipy','Pdb']
dep_files = {}
for mod_str in mod_strs:
dep_files[mod_str] = []
def remove_whitespace(fname):
# Remove trailing whitespace
fd = open(fname,mode='U') # open in universal newline mode
lines = []
for line in fd.readlines():
lines.append( line.rstrip() )
fd.close()
fd = open(fname,mode='w')
fd.seek(0)
for line in lines:
fd.write(line+'\n')
fd.close()
# print 'Removed whitespace from %s'%fname
def find_whitespace(fname):
fd = open(fname, mode='U')
for line in fd.readlines():
#print repr(line)
if ' \n' in line:
print fname
break
# print
print_only = True
# ====================
# = Strip whitespace =
# ====================
for dirname, dirs, files in os.walk('.'):
if dirname[1:].find('.')==-1:
# print dirname
for fname in files:
if fname[-2:] in ['c', 'f'] or fname[-3:]=='.py' or fname[-4:] in ['.pyx', '.txt', '.tex', '.sty', '.cls'] or fname.find('.')==-1:
# print fname
if print_only:
find_whitespace(dirname + '/' + fname)
else:
remove_whitespace(dirname + '/' + fname)
"""
# ==========================
# = Check for dependencies =
# ==========================
for dirname, dirs, files in os.walk('pymc'):
for fname in files:
if fname[-3:]=='.py' or fname[-4:]=='.pyx':
if dirname.find('sandbox')==-1 and fname != 'test_dependencies.py'\
and dirname.find('examples')==-1:
for mod_str in mod_strs:
if file(dirname+'/'+fname).read().find(mod_str)>=0:
dep_files[mod_str].append(dirname+'/'+fname)
print 'Instances of optional dependencies found are:'
for mod_str in mod_strs:
print '\t'+mod_str+':'
for fname in dep_files[mod_str]:
print '\t\t'+fname
if len(dep_files['Pdb'])>0:
raise ValueError, 'Looks like Pdb was not commented out in '+', '.join(dep_files[mod_str])
"""
| bsd-3-clause |
ptorrey/torrey_cmf | examples/plot_illustris_cmf.py | 2 | 1236 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import torrey_cmf
tc = torrey_cmf.number_density()
redshift_list = np.arange(7)
n_m_bin = 100
l_min_m = 7.0
l_max_m = 12.0
r = l_max_m - l_min_m
mass_array = np.arange(l_min_m, l_max_m, r / n_m_bin)
fontsize=14
cm = plt.get_cmap('nipy_spectral')
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(1,1,1)
fig.subplots_adjust(left=0.16, right=0.99, top=0.99, bottom=0.12)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim([10.0**l_min_m, 10.0**l_max_m])
ax.set_ylim([3e-6, 2e-1])
ax.set_xlabel(r'M${}_*$ (M${}_\odot$)', fontsize=14)
ax.set_ylabel(r'N(>M) (Mpc${}^{-3}$)', fontsize=14)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
for index,z in enumerate(redshift_list):
color = cm(1.*index/(1.0*redshift_list.shape[0]))
nd = 10.0**tc.cmf_fit(mass_array, z)
valid_range = (mass_array > 7.0) & (mass_array < 12.0) & (nd > 3e-5)
ax.plot( 10.0**mass_array[valid_range], nd[valid_range],
lw=2, label='z={:.1f}'.format(z))
ax.legend(loc=0, prop={'size':fontsize-3})
fig.savefig('./illustris_cmf.pdf')
| gpl-2.0 |
neet-ai/sources | lstm_first_test/stock_lstm.py | 1 | 2788 | import pandas as pd
import numpy as np
import math
import random
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers.recurrent import LSTM
#% matplotlib inline
import matplotlib.pyplot as plt
random.seed(0)
# 乱数の係数
random_factor = 0.05
# サイクルあたりのステップ数
steps_per_cycle = 80
# 生成するサイクル数
number_of_cycles = 50
# 入力の長さ
length_of_sequences = 100
# 入力値・出力値の次元の大きさ
in_out_neurons = 1
# 隠れ要素のニューロン数
hidden_neurons = 300
np_ary = np.arange(steps_per_cycle * number_of_cycles + 1);
data = np.load(input("「株価データ.npy」を入力 : "))
arg_data = data[1:] / data[:len(data)-1] - 1
df = pd.DataFrame(arg_data, columns=["stock"])
#df = pd.DataFrame(np_ary, columns=["x"])
#pi_t = 2 * math.pi / steps_per_cycle
#df["sin(x)"] = df.x.apply(lambda t: math.sin(t * pi_t + random.uniform(-1.0, +1.0) * random_factor))
df[["stock"]].head(steps_per_cycle * 2).plot()
################################################################
def Input_Ans_Extract(data, input_num = 100):
InputList, AnsList = [], []
for i in range(len(data) - input_num):
InputData = data.iloc[i:i+input_num].as_matrix()
AnsData = data.iloc[i+input_num].as_matrix()
InputList.append(InputData)
AnsList.append(AnsData)
InputList_np = np.array(InputList)
AnsList_np = np.array(AnsList)
return InputList_np, AnsList_np
def Data_Split(df, test_size=0.1, input_num = 100):
train_size = round(len(df) * (1 - test_size))
train_size = int(train_size)
Input_train, Ans_train = Input_Ans_Extract(df.iloc[0:train_size], input_num)
Input_test, Ans_test = Input_Ans_Extract(df.iloc[train_size:], input_num)
return (Input_train, Ans_train), (Input_test, Ans_test)
(Input_train, Ans_train), (Input_test, Ans_test) = Data_Split(df[["stock"]], input_num = length_of_sequences)
################################################################
model = Sequential()
model.add(LSTM(hidden_neurons, batch_input_shape=(None, length_of_sequences, in_out_neurons), return_sequences=False))
model.add(Dense(in_out_neurons))
model.add(Activation("linear"))
model.compile(loss="mean_squared_error", optimizer="rmsprop")
model.fit(Input_train, Ans_train, batch_size=60, nb_epoch=3, validation_split=0.05)
################################################################
predicted = model.predict(Input_test)
################################################################
dataf = pd.DataFrame(predicted[:200])
dataf.columns = ["predict"]
dataf.plot()
dataf["answer"] = Ans_test[:200]
dataf.plot()
plt.show() | mit |
DmitryOdinoky/sms-tools | lectures/06-Harmonic-model/plots-code/piano-autocorrelation.py | 26 | 1114 | import matplotlib.pyplot as plt
import numpy as np
import math
import time, os, sys
import essentia.standard as ess
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/piano.wav')
start = 13860
M = 800
xp = x[start:start+M]/float(max(x[start:start+M]))
r = ess.AutoCorrelation(normalization = 'standard')(xp)
r = r / max(r)
peaks = ess.PeakDetection(threshold =.11, interpolate = False, minPosition = .01)(r)
plt.figure(1, figsize=(9, 7))
plt.subplot(211)
plt.plot(np.arange(M)/float(fs), xp, lw=1.5)
plt.axis([0, (M-1)/float(fs), min(xp), max(xp)])
plt.xlabel('time (sec)')
plt.ylabel('amplitude')
plt.title('x (piano.wav)')
plt.subplot(212)
plt.plot(np.arange(M)/float(fs), r, 'r', lw=1.5)
plt.plot(peaks[0]*(M-1)/float(fs),peaks[1], 'x', color='k', markeredgewidth=1.5)
plt.axis([0, (M-1)/float(fs), min(r), max(r)])
plt.title('autocorrelation function + peaks')
plt.xlabel('lag time (sec)')
plt.ylabel('correlation')
plt.tight_layout()
plt.savefig('piano-autocorrelation.png')
plt.show() | agpl-3.0 |
qifeigit/scikit-learn | sklearn/ensemble/gradient_boosting.py | 126 | 65552 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
gbrammer/unicorn | survey_paper.py | 2 | 170922 | import os
#import pyfits
import astropy.io.fits as pyfits
import numpy as np
import glob
import shutil
import matplotlib.pyplot as plt
USE_PLOT_GUI=False
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib
import threedhst
import threedhst.eazyPy as eazy
import threedhst.catIO as catIO
import unicorn
import re
root = None
def throughput():
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER')
xg141, yg141 = np.loadtxt('g141.dat', unpack=True)
xf140, yf140 = np.loadtxt('f140w.dat', unpack=True)
xf814, yf814 = np.loadtxt('f814w.dat', unpack=True)
xg800l, yg800l = np.loadtxt('g800l.dat', unpack=True)
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=8, aspect=1./3, left=0.105, bottom=0.08, top=0.01, right=0.01)
ax = fig.add_subplot(111)
ax.plot(xg141, yg141, color='black', linewidth=2, alpha=0.5)
ax.fill(xg141, yg141, color='red', linewidth=2, alpha=0.1)
ax.plot(xf140, yf140, color='black', linewidth=2, alpha=0.7)
ax.plot(xg800l, yg800l, color='black', linewidth=2, alpha=0.5)
ax.fill(xg800l, yg800l, color='blue', linewidth=2, alpha=0.1)
ax.plot(xf814, yf814, color='black', linewidth=2, alpha=0.7)
em_lines = [3727, 4861, 4959, 5007, 6563.]
offset = np.array([0,-0.05,0,0,0])
offset = 0.25+np.array([-0.2,-0.05,0,-0.01,-0.1])
xoffset = np.array([0,-120,200,150,0])
line_scale = np.array([1,1,1./2.98,1,1])
em_names = ['[OII]',r'H$\beta$','','[OIII]',r'H$\alpha$']
dlam = 30
zi = 1
show_spectra = True
colors=['blue','green','red']
if show_spectra:
for zi in [1,2,3]:
sedx, sedy = np.loadtxt('templates/EAZY_v1.0_lines/eazy_v1.0_sed4_nolines.dat', unpack=True)
sedy *= 1.*sedy.max()
dl = dlam/(1+zi)
#dl = dlam
for i,em_line in enumerate(em_lines):
em_gauss = 1./np.sqrt(2*np.pi*dl**2)*np.exp(-1*(sedx-em_line)**2/2/dl**2)
sedy += em_gauss/em_gauss.max()*0.6*line_scale[i]
ax.plot(sedx*(1+zi), sedy*0.4+0.5, color=colors[zi-1], alpha=0.7, linewidth=2)
ax.text(5500.,1.18-zi*0.13,r'$z=%d$' %(zi), color=colors[zi-1], fontsize=11)
for i in range(len(em_lines)):
ax.text(em_lines[i]*(1+1)+xoffset[i], 1+offset[i], em_names[i], horizontalalignment='center', fontsize=10)
show_continuous = False
if show_continuous:
em_lines = [3727, 5007, 6563.]
zgrid = np.arange(1000)/1000.*4
for line in em_lines:
ax.plot(line*(1+zgrid), zgrid/4.*0.8+0.5, linewidth=2, alpha=0.5, color='black')
for zi in [0,1,2,3]:
ax.plot([0.1,2.e4],np.array([zi,zi])/4.*0.8+0.5, linestyle='--', color='black', alpha=0.2)
ax.text(5800, 0.08,'G800L',rotation=33., color='black', alpha=0.7)
ax.text(5800, 0.08,'G800L',rotation=33., color='blue', alpha=0.4)
ax.text(7100, 0.03,'F814W',rotation=80., color='black', alpha=0.9)
ax.text(1.115e4, 0.17,'G141',rotation=15., color='black', alpha=0.7)
ax.text(1.115e4, 0.17,'G141',rotation=15., color='red', alpha=0.4)
ax.text(1.21e4, 0.03,'F140W',rotation=88., color='black', alpha=0.9)
ax.set_xlim(4500, 1.79e4)
ax.set_ylim(0,1.4)
ax.set_xlabel(r'$\lambda$ [\AA]')
ax.set_ylabel('throughput')
#ax.set_yticklabels([]);
ytick = ax.set_yticks([0,0.25,0.5,0.75,1.0])
fig.savefig('throughput.pdf')
plt.rcParams['text.usetex'] = False
#
def throughput_v2():
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
#os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER')
# xg141, yg141 = np.loadtxt('g141.dat', unpack=True)
# xf140, yf140 = np.loadtxt('f140w.dat', unpack=True)
# xf814, yf814 = np.loadtxt('f814w.dat', unpack=True)
# xg800l, yg800l = np.loadtxt('g800l.dat', unpack=True)
import pysynphot as S
bp = S.ObsBandpass('wfc3,ir,g141')
xg141, yg141 = bp.wave, bp.throughput
bp = S.ObsBandpass('wfc3,ir,g102')
xg102, yg102 = bp.wave, bp.throughput
bp = S.ObsBandpass('wfc3,ir,f140w')
xf140, yf140 = bp.wave, bp.throughput
bp = S.ObsBandpass('acs,wfc1,f814w')
xf814, yf814 = bp.wave, bp.throughput
bp = S.ObsBandpass('acs,wfc1,g800l')
xg800l, yg800l = bp.wave, bp.throughput
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'Serif'
plt.rcParams['font.serif'] = 'Times'
plt.ioff()
fig = unicorn.catalogs.plot_init(square=True, xs=8, aspect=1./3, left=0.095, bottom=0.1, top=0.095, right=0.01)
#ax = fig.add_subplot(111)
#ax = fig.add_axes(((x0+(dx+x0)*0), y0+0.5, dx, 0.5-top_panel-y0))
ysplit = 0.65
ax = fig.add_axes((0.06, 0.135, 0.935, (ysplit-0.135)))
ax.plot(xg141, yg141, color='black', linewidth=2, alpha=0.5)
ax.fill(xg141, yg141, color='red', linewidth=2, alpha=0.1)
ax.plot(xf140, yf140, color='black', linewidth=2, alpha=0.7)
ax.plot(xg102, yg102, color='black', linewidth=2, alpha=0.5)
ax.fill(xg102, yg102, color='orange', linewidth=2, alpha=0.1)
ax.plot(xg800l, yg800l, color='black', linewidth=2, alpha=0.5)
ax.fill(xg800l, yg800l, color='blue', linewidth=2, alpha=0.1)
ax.plot(xf814, yf814, color='black', linewidth=2, alpha=0.7)
em_names = ['[OII]',r'H$\beta$','','[OIII]',r'H$\alpha$']
dlam = 30
zi = 1
yy = 0.1
ax.text(5800, 0.12+yy,'G800L',rotation=48., color='black', alpha=0.7)
ax.text(5800, 0.12+yy,'G800L',rotation=48., color='blue', alpha=0.4)
ax.text(7100, 0.14+yy,'F814W',rotation=80., color='black', alpha=0.9)
# ax.text(1.115e4, 0.17+yy,'G141',rotation=15., color='black', alpha=0.7)
# ax.text(1.115e4, 0.17+yy,'G141',rotation=15., color='red', alpha=0.4)
ax.text(1.3e4, 0.29+yy,'G141',rotation=10., color='black', alpha=0.7)
ax.text(1.3e4, 0.29+yy,'G141',rotation=10., color='red', alpha=0.4)
ax.text(1.e4, 0.24+yy,'G102',rotation=15., color='black', alpha=0.7)
ax.text(1.e4, 0.24+yy,'G102',rotation=15., color='orange', alpha=0.4)
ax.text(1.21e4, 0.14+yy,'F140W',rotation=88., color='black', alpha=0.9)
#ax.set_yticklabels([]);
#ax2 = ax.twiny()
ax2 = fig.add_axes((0.06, ysplit+0.02, 0.935, (0.86-ysplit)))
ax2.xaxis.set_label_position('top')
ax2.xaxis.set_ticks_position('top')
### H-alpha
xbox = np.array([0,1,1,0,0])
dy= 0.333333
y0= 1.0
ybox = np.array([0,0,1,1,0])*dy
width_acs = np.array([6000.,9000.])
width_wfc3 = np.array([1.1e4,1.65e4])
width_g102 = np.array([0.82e4,1.13e4])
line_names = [r'H$\alpha$',r'H$\beta$ / [OIII]','[OII]']
for i, l0 in enumerate([6563.,4934.,3727]):
zline = width_acs/l0-1
ax2.fill(xbox*(zline[1]-zline[0])+zline[0],y0-ybox-i*dy, color='blue', alpha=0.1)
zline = width_wfc3/l0-1
ax2.fill(xbox*(zline[1]-zline[0])+zline[0],y0-ybox-i*dy, color='red', alpha=0.1)
zline = width_g102/l0-1
ax2.fill(xbox*(zline[1]-zline[0])+zline[0],y0-ybox-i*dy, color='orange', alpha=0.1)
ax2.plot([0,4],np.array([0,0])+y0-(i+1)*dy, color='black')
ax2.text(3.7,y0-(i+0.5)*dy, line_names[i], horizontalalignment='right', verticalalignment='center')
ax.set_xlim(4500, 1.79e4)
ax.set_ylim(0,0.65)
ytick = ax.set_yticks([0,0.2,0.4,0.6])
ax.set_xlabel(r'$\lambda$ [\AA]')
ax.set_ylabel('Throughput')
minorLocator = MultipleLocator(1000)
ax.xaxis.set_minor_locator(minorLocator)
minorLocator = MultipleLocator(0.1)
ax.yaxis.set_minor_locator(minorLocator)
ax2.set_xlim(0,3.8)
ytick = ax2.set_yticks([])
ax2.set_ylim(0,1)
minorLocator = MultipleLocator(0.1)
ax2.xaxis.set_minor_locator(minorLocator)
ax2.set_xlabel(r'$z_\mathrm{line}$')
#fig.savefig('throughput.eps')
fig.savefig('throughput.pdf')
plt.rcParams['text.usetex'] = False
def orbit_structure():
"""
Show the POSTARG offsets in WFC3 / ACS
"""
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER')
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
wfc3_color, acs_color = 'red','blue'
wfc3_color, acs_color = 'blue','green'
fig = unicorn.catalogs.plot_init(square=True, xs=4.4, aspect=1, left=0.09, bottom=0.08)
ax = fig.add_subplot(111)
a11 = 0.1355
b10 = 0.1211 # arcsec / pix, from instrument HB
#dxs = np.array([0,-20,-13,7]) + np.int(np.round(xsh[0]))*0
#dys = np.array([0,-7,-20,-13]) + np.int(np.round(ysh[0]))*0
x3dhst = np.array([0, 1.355, 0.881, -0.474])/a11
y3dhst = np.array([0, 0.424, 1.212, 0.788])/b10
xgoodsn = np.array([0,0.6075, 0.270, -0.3375])/a11
ygoodsn = np.array([0,0.1815, 0.6655, 0.484])/b10
#### SN fields:
test = """
files=`ls ibfuw1*flt.fits.gz`
for file in $files; do result=`dfitsgz $file |fitsort FILTER APERTURE POSTARG1 POSTARG2 | grep -v POST`; echo "${file} ${result}"; done
"""
xmarshall = np.array([-0.34, -0.540, 0.0, 0.608, 0.273])/a11
ymarshall = np.array([-0.34, -0.243, 0.0, 0.244, 0.302])/b10
xgeorge = np.array([0.0, -0.608, 0.273, -0.340, 0.540])/a11
ygeorge = np.array([0.0, 0.244, 0.302, -0.340, -0.243])/b10
x41 = np.array([0.273, -0.608, 0.540, -0.340, -0.340, 0.540])/a11
y41 = np.array([0.302, 0.244, -0.243, -0.301, -0.301, -0.243])/b10
xprimo = np.array([0.0, 0.474, 0.290, 0.764, -0.290, 0.184])/a11
yprimo = np.array([0.0, 0.424, -0.290, 0.134, 0.290, 0.714])/b10
xers = np.array([-10.012, 9.988, 9.971, -9.958])/a11
yers = np.array([5.058, 5.050, -5.045, -5.045])/b10
xcooper = np.array([0, 0.6075, 0.270 ,-0.3375])/a11
ycooper = np.array([0, 0.1815, 0.6655, 0.484])/b10
xstanford = np.array([-0.169, 0.372, 0.169, -0.372])/a11
ystanford = np.array([-0.242, 0.06064, 0.242, 0.06064])/b10
xstanford += 0.2
xoff = x3dhst
yoff = y3dhst
print np.round(xoff*10)/10.*2
print np.round(yoff*10)/10.*2
plt.plot(np.round(xoff*10)/10. % 1, np.round(yoff*10)/10. % 1)
plt.xlim(-0.1,1.1); plt.ylim(-0.1,1.1)
ax.plot(xoff, yoff, marker='o', markersize=10, color=wfc3_color, alpha=0.8, zorder=10)
if 1 == 1:
for i in range(4):
ax.text(xoff[i], yoff[i]+0.5, 'F140W + G141', horizontalalignment='center', backgroundcolor='white', zorder=20)
ax.set_xlabel(r'$x$ offset [pix]')
ax.set_ylabel(r'$y$ offset [pix]')
scale = 4
x0 = -2.9
y0 = -5.5
ax.fill(np.array([0,1,1,0])*scale+x0, np.array([0,0,1,1])*scale+y0, color='white', zorder=10)
ax.fill(np.array([0,1,1,0])*scale+x0, np.array([0,0,1,1])*scale+y0, color='black', alpha=0.1, zorder=11)
ax.plot(np.array([0.5,0.5])*scale+x0, np.array([0,1])*scale+y0, color='black', alpha=0.2, zorder=12)
ax.plot(np.array([0,1])*scale+x0, np.array([0.5,0.5])*scale+y0, color='black', alpha=0.2, zorder=12)
ax.plot(np.abs(xoff-np.cast[int](xoff))*scale+x0, np.abs(yoff-np.cast[int](yoff))*scale+y0, marker='o', markersize=10, color=wfc3_color, alpha=0.8, zorder=13)
ax.text(x0+scale/2., y0-1, 'WFC3 Primary', horizontalalignment='center')
#plt.xlim(-5,11)
#plt.ylim(-5,11)
#### ACS:
# XPOS = x*a11 + y*a10
# YPOS = x*b11 + y*b10
#
# a10 a11 b10 b11
# WFC: 0.0000 0.0494 0.0494 0.0040
a10, a11, b10, b11 = 0.0000,0.0494,0.0494,0.0040
#xoff = np.cumsum(xpostarg)
#yoff = np.cumsum(ypostarg)
acsang = 45.
acsang = 92.16- -45.123
xpos_acs, ypos_acs = threedhst.utils.xyrot(xpostarg, ypostarg, acsang)
xpix_acs = xpos_acs / a11
ypix_acs = (ypos_acs-xpix_acs*b11)/b10
x0 = 5.5
#y0 = -4.5
ax.fill(np.array([0,1,1,0])*scale+x0, np.array([0,0,1,1])*scale+y0, color='white', zorder=10)
ax.fill(np.array([0,1,1,0])*scale+x0, np.array([0,0,1,1])*scale+y0, color='black', alpha=0.1, zorder=11)
ax.plot(np.array([0.5,0.5])*scale+x0, np.array([0,1])*scale+y0, color='black', alpha=0.2, zorder=12)
ax.plot(np.array([0,1])*scale+x0, np.array([0.5,0.5])*scale+y0, color='black', alpha=0.2, zorder=12)
ax.plot(np.abs(xpix_acs-np.cast[int](xpix_acs))*scale+x0, np.abs(ypix_acs-np.cast[int](ypix_acs))*scale+y0, marker='o', markersize=10, color=acs_color, alpha=0.8, zorder=13)
#ax.plot(np.array([0,0.5,1,0.5])*scale+x0, np.array([0,0.5,0.5,1])*scale+y0, marker='o', marker='None', color=acs_color, linestyle='--', alpha=0.6, zorder=13)
ax.plot(np.array([0,0.5,0.5,1])*scale+x0, np.array([0,1,0.5,0.5])*scale+y0, marker='o', color=acs_color, linestyle='--', alpha=0.6, zorder=13)
ax.text(x0+scale/2., y0-1, 'ACS Parallel', horizontalalignment='center')
#plt.grid(alpha=0.5, zorder=1, markevery=5)
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
ax.set_xlim(-5.9,12.5)
#ax.set_ylim(-5.9,12.5)
ax.set_ylim(-6.9,11.5)
majorLocator = MultipleLocator(5)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(1)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_major_formatter(majorFormatter)
ax.xaxis.grid(alpha=0.5, zorder=1, which='major')
ax.xaxis.grid(alpha=0.2, zorder=1, which='minor')
ax.yaxis.set_major_locator(majorLocator)
ax.yaxis.set_minor_locator(minorLocator)
ax.yaxis.set_major_formatter(majorFormatter)
ax.yaxis.grid(alpha=0.5, zorder=1, which='major')
ax.yaxis.grid(alpha=0.2, zorder=1, which='minor')
fig.savefig('dither_box.pdf')
plt.rcParams['text.usetex'] = False
def exptimes():
"""
Extract the range of exposure times from the formatted Phase-II files
"""
os.system('grep F814W 12???.pro |grep " S " > f814w.exptime')
os.system('grep G800L 12???.pro |grep " S " > g800l.exptime')
os.system('grep F140W *.pro |grep MULTIAC |grep NSAMP > f140w.exptime')
os.system('grep G141 *.pro |grep MULTIAC |grep NSAMP > g141.exptime')
### G141
fp = open('g141.exptime')
lines = fp.readlines()
fp.close()
nsamp = []
object = []
for line in lines:
nsamp.append(line.split('NSAMP=')[1][0:2])
object.append(line.split()[1])
expsamp = np.zeros(17)
expsamp[12] = 1002.94
expsamp[13] = 1102.94
expsamp[14] = 1202.94
expsamp[15] = 1302.94
expsamp[16] = 1402.94
nsamp = np.cast[int](nsamp)
nsamp+=1 ### this seems to be the case for all actual observations !!
objects = object[::4]
NOBJ = len(nsamp)/4
texp = np.zeros(NOBJ)
for i in range(NOBJ):
texp[i] = np.sum(expsamp[nsamp[i*4:(i+1)*4]])
print objects[i], texp[i]
print 'G141: %.1f - %.1f' %(texp.min(), texp.max())
def spectral_features():
wmin, wmax = 1.1e4, 1.65e4
print '%-8s %.1f -- %.1f' %('Halpha', wmin/6563.-1, wmax/6563.-1)
print '%-8s %.1f -- %.1f' %('OIII', wmin/5007.-1, wmax/5007.-1)
print '%-8s %.1f -- %.1f' %('OII', wmin/3727.-1, wmax/3727.-1)
print '%-8s %.1f -- %.1f' %('4000', wmin/4000.-1, wmax/4000.-1)
wmin, wmax = 0.55e4, 1.0e4
print '\n\nACS\n\n'
print '%-8s %.1f -- %.1f' %('Halpha', wmin/6563.-1, wmax/6563.-1)
print '%-8s %.1f -- %.1f' %('OIII', wmin/5007.-1, wmax/5007.-1)
print '%-8s %.1f -- %.1f' %('OII', wmin/3727.-1, wmax/3727.-1)
print '%-8s %.1f -- %.1f' %('4000', wmin/4000.-1, wmax/4000.-1)
def aXe_model():
import copy
import scipy.ndimage as nd
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER')
dir = pyfits.open('/research/HST/GRISM/3DHST/GOODS-S/PREP_FLT/UDF-F140W_drz.fits')
gri = pyfits.open('/research/HST/GRISM/3DHST/GOODS-S/PREP_FLT/UDF-FC-G141_drz.fits')
mod = pyfits.open('/research/HST/GRISM/3DHST/GOODS-S/PREP_FLT/UDF-FC-G141CONT_drz.fits')
#### rotate all the images so that dispersion axis is along X
angle = gri[1].header['PA_APER']#+180
direct = nd.rotate(dir[1].data, angle, reshape=False)
grism = nd.rotate(gri[1].data, angle, reshape=False)
model = nd.rotate(mod[1].data, angle, reshape=False)
xc, yc = 1877, 2175
NX, NY = 1270, 365
aspect = 3.*NY/NX
xc, yc = 1731, 977
NX, NY = 882, 467
aspect = 1.*NY/NX
plt.gray()
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=8, aspect=aspect, left=0.12)
fig.subplots_adjust(wspace=0.0,hspace=0.0,left=0.01,
bottom=0.005,right=0.99,top=0.995)
fs1 = 12 ### Font size of label
xlab, ylab = 0.04*NX/3., NY-0.02*NY-0.02*NY
xd, yd = 65, 412 ### my direct images don't line up any more (deleted old ones?)
xd, yd = 412, 65*2
ax = fig.add_subplot(221)
ax.imshow(0-direct[yc-NY/2+yd:yc+NY/2+yd, xc-NX/2+xd:xc+NX/2+xd], vmin=-0.2, vmax=0.02, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks([0,NX]); ytick = ax.set_yticks([0,NY])
ax.text(xlab, ylab, 'a) Direct F140W', fontsize=fs1, verticalalignment='top')
#ax.text(xlab, ylab, r'$%d\times\ $$%d^{\prime\prime}$' %(NX*0.06, NY*0.06), fontsize=18, backgroundcolor='white', verticalalignment='top')
ax = fig.add_subplot(222)
ax.imshow(0-grism[yc-NY/2:yc+NY/2, xc-NX/2:xc+NX/2], vmin=-0.04, vmax=0.004, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks([0,NX]); ytick = ax.set_yticks([0,NY])
ax.text(xlab, ylab, 'b) Grism G141', fontsize=fs1, verticalalignment='top')
ax = fig.add_subplot(224)
diff = grism-model
### Flag em lines and 0th order
dy0 = 20
emx, emy = [223, 272, 487, 754, 520, 850, 565, 558, 51, 834, 345, 495], [122, 189, 83, 240, 148, 124, 336, 418, 338, 225, 197, 268]
ax.plot(np.array(emx)+(xc-1731), np.array(emy)-dy0+(yc-977), marker='^', markersize=6, linestyle='None', color='blue', alpha=0.9)
zx, zy = [301, 393, 648], [183, 321, 446]
ax.plot(np.array(zx)+(xc-1731), np.array(zy)-dy0+(yc-977), marker='^', markersize=6, linestyle='None', markeredgecolor='black', markerfacecolor='None', alpha=0.9, markeredgewidth=1.2)
fonts = matplotlib.font_manager.FontProperties()
fonts.set_size(9)
ax.legend(['Emission',r'0th order'], numpoints=1, prop=fonts, handletextpad=0.001, borderaxespad=0.001)
# ax.text(0.04*NX/3., 0.02*NY, 'Em.', fontsize=fs1*0.8, backgroundcolor='white', verticalalignment='bottom', color='green')
# ax.text(0.3*NX/3., 0.02*NY, r'0th', fontsize=fs1*0.8, backgroundcolor='white', verticalalignment='bottom', color='red')
ax.imshow(0-diff[yc-NY/2:yc+NY/2, xc-NX/2:xc+NX/2], vmin=-0.02, vmax=0.002, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks([0,NX]); ytick = ax.set_yticks([0,NY])
ax.text(xlab, ylab, r'd) Model-subtracted grism', fontsize=fs1, verticalalignment='top')
ax = fig.add_subplot(223)
ax.imshow(0-model[yc-NY/2:yc+NY/2, xc-NX/2:xc+NX/2], vmin=-0.04, vmax=0.004, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks([0,NX]); ytick = ax.set_yticks([0,NY])
ax.text(xlab, ylab, r'c) aXe Model', fontsize=fs1, verticalalignment='top')
fig.savefig('grism_model.pdf')
plt.rcParams['text.usetex'] = False
def sync():
pass
"""
paths="RAW HTML/scripts PREP_FLT"
dirs="AEGIS COSMOS ERS GOODS-N GOODS-S SN-GEORGE SN-MARSHALL SN-PRIMO UDS"
for dir in $dirs; do
mkdir ${dir}
mkdir ${dir}/HTML
for path in $paths; do
# du -sh ${dir}/${path}
mkdir ${dir}/${path}
rsync -avz --progress $UNICORN:/3DHST/Spectra/Work/${dir}/${path} ${dir}/${path}
done
done
"""
def all_pointings():
pointings(ROOT='GOODS-SOUTH')
pointings(ROOT='COSMOS')
pointings(ROOT='AEGIS')
pointings(ROOT='UDS')
pointings(ROOT='GOODS-N')
def all_pointings_width():
"""
This is the mosaic figure from the paper. The individual fields are combined
manually with Adobe Illustrator.
"""
from unicorn.survey_paper import pointings
fs, left = 10, 0.22
fs, left = 12, 0.25
fs, left = 14, 0.28
fs, left = 16, 0.32
pointings(ROOT='GOODS-SOUTH', width=7, corner='ll', fontsize=fs, left=left)
pointings(ROOT='COSMOS', width=6, corner='lr', fontsize=fs, left=left, right=0.03, bottom=0.115)
pointings(ROOT='AEGIS', width=7, corner='ll', fontsize=fs, left=left, right=0.045)
pointings(ROOT='UDS', width=9, corner='lr', fontsize=fs, left=left-0.02, right=0.04, top=0.02)
pointings(ROOT='GOODS-N', width=6, corner='ur', fontsize=fs, left=left, bottom=0.115)
def pointings_with_status():
"""
Highlight pointings that have been observed (status == 'Archived')
"""
from unicorn.survey_paper import pointings
pointings(ROOT='GOODS-SOUTH', width=7, corner='ll', use_status=True)
pointings(ROOT='GOODS-SOUTH', width=7, corner='ll', use_status=True, show_acs=False)
pointings(ROOT='GOODS-SOUTH', width=7, corner='ll', use_status=True, show_wfc3=False)
pointings(ROOT='COSMOS', width=6, corner='lr', use_status=True)
pointings(ROOT='COSMOS', width=6, corner='lr', use_status=True, show_acs=False)
pointings(ROOT='COSMOS', width=6, corner='lr', use_status=True, show_wfc3=False)
pointings(ROOT='AEGIS', width=7, corner='ll', use_status=True)
pointings(ROOT='AEGIS', width=7, corner='ll', use_status=True, show_acs=False)
pointings(ROOT='AEGIS', width=7, corner='ll', use_status=True, show_wfc3=False)
pointings(ROOT='UDS', width=9, corner='lr', use_status=True)
pointings(ROOT='UDS', width=9, corner='lr', use_status=True, show_acs=False)
pointings(ROOT='UDS', width=9, corner='lr', use_status=True, show_wfc3=False)
pointings(ROOT='UDS', width=9, corner='lr', show_sn_fields=True, use_status=True)
pointings(ROOT='GOODS-SOUTH', width=7, corner='ll', show_sn_fields=True, use_status=True)
def pointings(ROOT='GOODS-SOUTH', width=None, corner='lr', use_status=False, show_acs=True, show_wfc3=True, show_sn_fields=False, fontsize=10, left=22, right=0.02, top=0.01, bottom=0.11):
"""
Make a figure showing the 3D-HST pointing poisitions, read from region files
"""
import unicorn.survey_paper as sup
plt.rcParams['lines.linewidth'] = 0.3
wfc3_color = 'blue'
acs_color = 'green'
os.chdir(unicorn.GRISM_HOME+'ANALYSIS/SURVEY_PAPER')
yticklab = None
dx_ref = (53.314005633802822-52.886197183098595)*np.cos(-27.983151830808076/360*2*np.pi)
CANDELS_PATH = '/research/HST/GRISM/3DHST/REGIONS/CANDELS/'
candels_files = []
candels_alpha = 0.3
candels_color = '0.1'
if use_status:
pointing_list, pointing_status = np.loadtxt('/research/HST/GRISM/3DHST/REGIONS/pointing_status.dat', dtype=np.str, unpack=True)
pointing_list, pointing_status = np.array(pointing_list), np.array(pointing_status)
else:
pointing_list, pointing_status = np.array([]), np.array([])
#### GOODS-S
if ROOT=='GOODS-SOUTH':
x0, x1 = 53.314005633802822, 52.886197183098595
y0, y1 = -27.983151830808076, -27.654474431818176
xticklab = [r'$3^\mathrm{h}33^\mathrm{m}00^\mathrm{s}$', r'$3^\mathrm{h}32^\mathrm{m}30^\mathrm{s}$', r'$3^\mathrm{h}32^\mathrm{m}00^\mathrm{s}$']
xtickv = [degrees(3,33,00, hours=True), degrees(3,32,30, hours=True), degrees(3,32,00, hours=True)]
yticklab = [r'$-27^\circ40^\prime00^{\prime\prime}$', r'$45^\prime00^{\prime\prime}$', r'$-27^\circ50^\prime00^{\prime\prime}$', r'$55^\prime00^{\prime\prime}$']
ytickv = [degrees(-27, 40, 00, hours=False), degrees(-27, 45, 00, hours=False), degrees(-27, 50, 00, hours=False), degrees(-27, 55, 00, hours=False)]
candels_files = glob.glob(CANDELS_PATH+'/GOODS-S*reg')
candels_files.extend(glob.glob(CANDELS_PATH+'/GOODS-W*reg'))
#### COSMOS
if ROOT=='COSMOS':
x1, x0 = 149.99120563380279, 150.23823661971829
y0, y1 = 2.1678109478476815, 2.5996973302980129
xticklab = [r'$10^\mathrm{h}00^\mathrm{m}30^\mathrm{s}$', r'$00^\mathrm{m}00^\mathrm{s}$']
xtickv = [degrees(10,00,30, hours=True), degrees(10,00,00, hours=True)]
yticklab = [r'$+2^\circ15^\prime00^{\prime\prime}$', r'$25^\prime00^{\prime\prime}$', r'$35^\prime00^{\prime\prime}$']
ytickv = [degrees(02, 15, 00, hours=False), degrees(02, 25, 00, hours=False), degrees(02, 35, 00, hours=False)]
candels_files = glob.glob(CANDELS_PATH+'/COSMOS*reg')
#### AEGIS
if ROOT=='AEGIS':
x1, x0 = 214.49707154104345, 215.12704734584406
y0, y1 = 52.680946433013482, 53.01597137966467
xticklab = [r'$18^\mathrm{m}00^\mathrm{s}$', r'$14^\mathrm{h}19^\mathrm{m}00^\mathrm{s}$', r'$20^\mathrm{m}00^\mathrm{s}$']
xticklab = [r'$18^\mathrm{m}$', r'$14^\mathrm{h}19^\mathrm{m}$', r'$20^\mathrm{m}$']
xtickv = [degrees(14,18,00, hours=True), degrees(14,19,00, hours=True), degrees(14,20,00, hours=True)]
yticklab = [r'$+52^\circ45^\prime00^{\prime\prime}$', r'$50^\prime00^{\prime\prime}$', r'$55^\prime00^{\prime\prime}$']
ytickv = [degrees(52, 45, 00, hours=False), degrees(52, 50, 00, hours=False), degrees(52, 55, 00, hours=False)]
candels_files = glob.glob(CANDELS_PATH+'/EGS*reg')
#### UDS
if ROOT=='UDS':
x1, x0 = 34.116935194128146, 34.51871547581829
y0, y1 = -5.2957542206957582, -5.0834327182123147+2./3600
xticklab = [r'$18^\mathrm{m}00^\mathrm{s}$', r'$2^\mathrm{h}17^\mathrm{m}30^\mathrm{s}$', r'$17^\mathrm{m}00^\mathrm{s}$', r'$16^\mathrm{m}30^\mathrm{s}$']
xtickv = [degrees(2,18,00, hours=True), degrees(2,17,30, hours=True), degrees(2,17,00, hours=True), degrees(2,16,30, hours=True)]
yticklab = [r'$05^\prime00^{\prime\prime}$', r'$-5^\circ10^\prime00^{\prime\prime}$', r'$15^\prime00^{\prime\prime}$']
ytickv = [degrees(-5, 05, 00, hours=False), degrees(-5, 10, 00, hours=False), degrees(-5, 15, 00, hours=False)]
candels_files = glob.glob(CANDELS_PATH+'/UDS*reg')
#### GOODS-N
if ROOT=='GOODS-N':
wfc3_color = 'orange'
acs_color = None
x1, x0 = 188.9139017749491, 189.44688055895648
y0, y1 = 62.093791549511998, 62.384068625281309
xticklab = [r'$12^\mathrm{h}37^\mathrm{m}30^\mathrm{s}$', r'$37^\mathrm{m}00^\mathrm{s}$', r'$36^\mathrm{m}30^\mathrm{s}$', r'$36^\mathrm{m}00^\mathrm{s}$']
xtickv = [degrees(12,37,30, hours=True), degrees(12,37,00, hours=True), degrees(12,36,30, hours=True), degrees(12,36,00, hours=True)]
yticklab = [r'$+62^\circ10^\prime00^{\prime\prime}$', r'$15^\prime00^{\prime\prime}$', r'$20^\prime00^{\prime\prime}$']
ytickv = [degrees(62, 10, 00, hours=False), degrees(62, 15, 00, hours=False), degrees(62, 20, 00, hours=False)]
candels_files = glob.glob(CANDELS_PATH+'/GOODSN-OR*reg')
candels_files.extend(glob.glob(CANDELS_PATH+'/GOODSN-SK*reg'))
#### Make square for given plot dimensions
dx = np.abs(x1-x0)*np.cos(y0/360*2*np.pi)
dy = (y1-y0)
if width is None:
width = 7*dx/dx_ref
print '%s: plot width = %.2f\n' %(ROOT, width)
fig = unicorn.catalogs.plot_init(square=True, xs=width, aspect=dy/dx, fontsize=fontsize, left=left, right=right, top=top, bottom=bottom)
#fig = unicorn.catalogs.plot_init(square=True)
ax = fig.add_subplot(111)
polys = []
for file in candels_files:
fp = open(file)
lines = fp.readlines()
fp.close()
#
polys.append(sup.polysplit(lines[1], get_shapely=True))
#fi = ax.fill(wfcx, wfcy, alpha=candels_alpha, color=candels_color)
sup.polys = polys
un = polys[0]
for pp in polys[1:]:
un = un.union(pp)
if un.geometryType() is 'MultiPolygon':
for sub_poly in un.geoms:
x,y = sub_poly.exterior.xy
ax.plot(x,y, alpha=candels_alpha, color=candels_color, linewidth=1)
ax.fill(x,y, alpha=0.1, color='0.7')
else:
x,y = un.exterior.xy
ax.plot(x,y, alpha=candels_alpha, color=candels_color, linewidth=1)
ax.fill(x,y, alpha=0.1, color='0.7')
files=glob.glob(unicorn.GRISM_HOME+'REGIONS/'+ROOT+'-[0-9]*reg')
if ROOT == 'UDS':
p18 = files.pop(9)
print '\n\nPOP %s\n\n' %(p18)
if show_sn_fields:
files.extend(glob.glob(unicorn.GRISM_HOME+'REGIONS/SN*reg'))
files.extend(glob.glob(unicorn.GRISM_HOME+'REGIONS/ERS*reg'))
wfc3_polys = []
acs_polys = []
for file in files:
#
base = os.path.basename(file.split('.reg')[0])
#print base, base in pointing_list
if base in pointing_list:
status = pointing_status[pointing_list == base][0] == 'Archived'
else:
status = False
if not use_status:
status = True
field = re.split('-[0-9]', file)[0]
pointing = file.split(field+'-')[1].split('.reg')[0]
fp = open(file)
lines = fp.readlines()
fp.close()
if base.startswith('SN') | base.startswith('ERS'):
wfc3_color = 'purple'
acs_color = None
pointing = os.path.basename(field)
status = True
#
wfc3_polys.append(sup.polysplit(lines[1], get_shapely=True))
acs_polys.append(sup.polysplit(lines[2], get_shapely=True))
acs_polys.append(sup.polysplit(lines[3], get_shapely=True))
#
wfcx, wfcy = sup.polysplit(lines[1])
if show_wfc3:
if status:
fi = ax.fill(wfcx, wfcy, alpha=0.2, color=wfc3_color)
fi = ax.plot(wfcx, wfcy, alpha=0.8, color=wfc3_color)
else:
fi = ax.fill(wfcx, wfcy, alpha=0.05, color=wfc3_color)
fi = ax.plot(wfcx, wfcy, alpha=0.8, color=wfc3_color)
#
if acs_color is not None:
acsx1, acsy1 = sup.polysplit(lines[2])
acsx2, acsy2 = sup.polysplit(lines[3])
#
if show_acs:
if show_wfc3:
afact = 3
else:
afact = 3
if status:
fi = ax.fill(acsx1, acsy1, alpha=0.05*afact, color=acs_color)
fi = ax.fill(acsx2, acsy2, alpha=0.05*afact, color=acs_color)
#
pl = ax.plot(acsx1, acsy1, alpha=0.1*afact, color=acs_color)
pl = ax.plot(acsx2, acsy2, alpha=0.1*afact, color=acs_color)
else:
pl = ax.plot(acsx1, acsy1, alpha=0.3*afact, color=acs_color)
pl = ax.plot(acsx2, acsy2, alpha=0.3*afact, color=acs_color)
#
xoff, yoff = 0.0, 0.0
if ROOT=='GOODS-SOUTH':
#print pointing
if pointing == '36':
xoff, yoff = 0.002,0.0075
if pointing == '37':
xoff, yoff = -0.005,-0.007
if pointing == '38':
xoff, yoff = 0.007,-0.007
#
if show_wfc3:
te = ax.text(np.mean(wfcx[:-1])+xoff, np.mean(wfcy[:-1])+yoff, pointing, va='center', ha='center', fontsize=13)
#### Get field area from full WFC3 polygons
un_wfc3 = wfc3_polys[0]
for pp in wfc3_polys[1:]:
un_wfc3 = un_wfc3.union(pp)
#
wfc3_union= []
if un_wfc3.geometryType() is 'MultiPolygon':
total_area = 0
xavg, yavg, wht = 0, 0, 0
for sub_poly in un_wfc3.geoms:
area_i = sub_poly.area*np.cos(y0/360.*2*np.pi)
total_area += area_i
x,y = sub_poly.exterior.xy
wfc3_union.append(sub_poly)
xavg += np.mean(x)*area_i**2
yavg += np.mean(y)*area_i**2
wht += area_i**2
#ax.plot(x,y, alpha=0.8, color='orange', linewidth=1)
xavg, yavg = xavg/wht, yavg/wht
else:
total_area = un_wfc3.area*np.cos(y0/360.*2*np.pi)
x,y = un_wfc3.exterior.xy
wfc3_union.append(un_wfc3)
#ax.plot(x,y, alpha=0.8, color='orange', linewidth=1)
xavg, yavg = np.mean(x), np.mean(y)
#plt.plot([xavg,xavg],[yavg,yavg], marker='x', markersize=20)
#### Get ACS overlap fraction
if ROOT != 'GOODS-N':
un_acs = acs_polys[0]
for pp in acs_polys[1:]:
un_acs = un_acs.union(pp)
acs_union = []
if un_acs.geometryType() is 'MultiPolygon':
for sub_poly in un_acs.geoms:
x,y = sub_poly.exterior.xy
acs_union.append(sub_poly)
else:
x,y = un_acs.exterior.xy
acs_union.append(un_acs)
wfc3_area = 0.
acs_overlap_area = 0.
for wun in wfc3_union:
wfc3_area += wun.area*np.cos(y0/360.*2*np.pi)*3600.
for aun in acs_union:
overlap = wun.intersection(aun)
acs_overlap_area += overlap.area*np.cos(y0/360.*2*np.pi)*3600.
print '== Combined areas ==\nWFC3, ACS, frac: %.1f %.1f %.1f' %(wfc3_area, acs_overlap_area ,acs_overlap_area/wfc3_area*100)
dummy = """
wf = 147.3 + 122.2 + 121.9 + 114.0
ac = 134.6 + 112.7 + 102.4 + 102.8
print ac/wf*100.
"""
#
if yticklab is not None:
ax.set_xticklabels(xticklab)
xtick = ax.set_xticks(xtickv)
ax.set_yticklabels(yticklab)
ytick = ax.set_yticks(ytickv)
ax.set_xlabel(r'$\alpha$')
ax.set_ylabel(r'$\delta$')
fsi = '20'
if ROOT == 'GOODS-SOUTH':
field_label = 'GOODS-S'
else:
field_label = ROOT
if corner=='lr':
ax.text(0.95, 0.05,r'$\mathit{%s}$' %(field_label),
horizontalalignment='right',
verticalalignment='bottom',
transform = ax.transAxes, fontsize=fsi)
#
if corner=='ll':
ax.text(0.05, 0.05,r'$\mathit{%s}$' %(field_label),
horizontalalignment='left',
verticalalignment='bottom',
transform = ax.transAxes, fontsize=fsi)
#
if corner=='ur':
ax.text(0.95, 0.95,r'$\mathit{%s}$' %(field_label),
horizontalalignment='right',
verticalalignment='top',
transform = ax.transAxes, fontsize=fsi)
#
if corner=='ul':
ax.text(0.05, 0.95,r'$\mathit{%s}$' %(field_label),
horizontalalignment='left',
verticalalignment='top',
transform = ax.transAxes, fontsize=fsi)
ax.set_xlim(x0, x1)
ax.set_ylim(y0, y1)
# print 'RA - ', hexagesimal(x0), hexagesimal(x1)
# print 'Dec - ', hexagesimal(y0, hours=False), hexagesimal(y1, hours=False)
print 'RA - ', hexagesimal(xavg)
print 'Dec - ', hexagesimal(yavg, hours=False)
print 'Area: %.1f\n' %(total_area*3600.)
tag = ''
if not show_acs:
tag += '_noacs'
if not show_wfc3:
tag += '_nowfc3'
if show_sn_fields:
tag += '_sn'
if use_status:
plt.savefig('%s_pointings_status%s.pdf' %(ROOT, tag))
else:
plt.savefig('%s_pointings%s.pdf' %(ROOT, tag))
def get_UDF_center():
file='/research/HST/GRISM/3DHST/REGIONS/GOODS-SOUTH-38.reg'
field = re.split('-[0-9]', file)[0]
pointing = file.split(field+'-')[1].split('.reg')[0]
fp = open(file)
lines = fp.readlines()
fp.close()
#
px, py = sup.polysplit(lines[1], get_shapely=False)
print 'UDF: ', hexagesimal(np.mean(px[:-1]), hours=True), hexagesimal(np.mean(py[:-1]), hours=False)
def degrees(deg, min, sec, hours=True):
adeg = np.abs(deg)
degrees = adeg + min/60. + sec/3600.
if deg < 0:
degrees *= -1
if hours:
degrees *= 360./24
return degrees
def hexagesimal(degrees, hours=True, string=True):
if hours:
degrees *= 24/360.
if degrees < 0:
sign = -1
si = '-'
else:
sign = 1
si = ''
degrees = np.abs(degrees)
deg = np.int(degrees)
min = np.int((degrees-deg)*60)
sec = (degrees-deg-min/60.)*3600
if string:
return '%s%02d:%02d:%05.2f' %(si, deg, min, sec)
else:
return sign*deg, min, sec
def polysplit(region='polygon(150.099223,2.391097,150.086084,2.422515,150.050573,2.407277,150.064586,2.376241)', get_shapely=False):
spl = region[region.find('(')+1:region.find(')')].split(',')
px = spl[0::2]
py = spl[1::2]
px.append(px[0])
py.append(py[0])
px, py = np.cast[float](px), np.cast[float](py)
if get_shapely:
from shapely.geometry import Polygon
list = []
for i in range(len(px)):
list.append((px[i], py[i]))
poly = Polygon(tuple(list))
return poly
else:
return px, py
#
def demo_background_subtract(root='COSMOS-13'):
"""
Make a figure demonstrating the background subtraction of the grism images
"""
import threedhst
import threedhst.prep_flt_files
import threedhst.grism_sky as bg
import unicorn
import unicorn.survey_paper as sup
import pyfits
path = unicorn.analysis.get_grism_path(root)
os.chdir(path)
if not os.path.exists('EXAMPLE'):
os.system('mkdir EXAMPLE')
os.chdir('EXAMPLE')
files = glob.glob('../PREP_FLT/%s-G141*' %(root))
files.append('../PREP_FLT/%s-F140W_tweak.fits' %(root))
for file in files:
if 'drz.fits' not in file:
os.system('cp %s .' %(file))
print file
threedhst.process_grism.fresh_flt_files(root+'-G141_asn.fits')
asn = threedhst.utils.ASNFile(root+'-G141_asn.fits')
flt = pyfits.open(asn.exposures[0]+'_flt.fits')
angle = flt[1].header['PA_APER']
#### First run on uncorrected images
threedhst.prep_flt_files.startMultidrizzle(root+'-G141_asn.fits',
use_shiftfile=True, skysub=False,
final_scale=0.128254, pixfrac=0.8, driz_cr=True,
updatewcs=True, median=True, clean=True, final_rot=angle)
os.system('mv %s-G141_drz.fits %s-G141_drz_first.fits' %(root, root))
sup.root = root
sup.first_prof = []
for exp in asn.exposures:
xp, yp = threedhst.grism_sky.profile(exp+'_flt.fits', flatcorr=False, biweight=True)
sup.first_prof.append(yp)
# for i,exp in enumerate(asn.exposures):
# plt.plot(xp, prof[i])
#### Now divide by the flat
threedhst.process_grism.fresh_flt_files(root+'-G141_asn.fits')
sup.flat_prof = []
for exp in asn.exposures:
bg.remove_grism_sky(flt=exp+'_flt.fits', list=['sky_cosmos.fits', 'sky_goodsn_lo.fits', 'sky_goodsn_hi.fits', 'sky_goodsn_vhi.fits'], path_to_sky = '../CONF/', out_path='./', verbose=False, plot=False, flat_correct=True, sky_subtract=False, second_pass=False, overall=False)
xp, yp = threedhst.grism_sky.profile(exp+'_flt.fits', flatcorr=False, biweight=True)
sup.flat_prof.append(yp)
threedhst.prep_flt_files.startMultidrizzle(root+'-G141_asn.fits',
use_shiftfile=True, skysub=False,
final_scale=0.128254, pixfrac=0.8, driz_cr=True,
updatewcs=True, median=True, clean=True, final_rot=angle)
os.system('mv %s-G141_drz.fits %s-G141_drz_flat.fits' %(root, root))
#### Divide by the sky
threedhst.process_grism.fresh_flt_files(root+'-G141_asn.fits')
sup.sky_prof = []
for exp in asn.exposures:
print exp
bg.remove_grism_sky(flt=exp+'_flt.fits', list=['sky_cosmos.fits', 'sky_goodsn_lo.fits', 'sky_goodsn_hi.fits', 'sky_goodsn_vhi.fits'], path_to_sky = '../CONF/', out_path='./', verbose=False, plot=False, flat_correct=True, sky_subtract=True, second_pass=False, overall=False)
xp, yp = threedhst.grism_sky.profile(exp+'_flt.fits', flatcorr=False, biweight=True)
sup.sky_prof.append(yp)
threedhst.prep_flt_files.startMultidrizzle(root+'-G141_asn.fits',
use_shiftfile=True, skysub=False,
final_scale=0.128254, pixfrac=0.8, driz_cr=True,
updatewcs=True, median=True, clean=True, final_rot=angle)
os.system('mv %s-G141_drz.fits %s-G141_drz_sky.fits' %(root, root))
#### Last pass along columns
threedhst.process_grism.fresh_flt_files(root+'-G141_asn.fits')
sup.final_prof = []
for exp in asn.exposures:
print exp
bg.remove_grism_sky(flt=exp+'_flt.fits', list=['sky_cosmos.fits', 'sky_goodsn_lo.fits', 'sky_goodsn_hi.fits', 'sky_goodsn_vhi.fits'], path_to_sky = '../CONF/', out_path='./', verbose=False, plot=False, flat_correct=True, sky_subtract=True, second_pass=True, overall=True)
xp, yp = threedhst.grism_sky.profile(exp+'_flt.fits', flatcorr=False, biweight=True)
sup.final_prof.append(yp)
threedhst.prep_flt_files.startMultidrizzle(root+'-G141_asn.fits',
use_shiftfile=True, skysub=False,
final_scale=0.128254, pixfrac=0.8, driz_cr=True,
updatewcs=True, median=True, clean=True, final_rot=angle)
# ##### Make segmentation images
# run = threedhst.prep_flt_files.MultidrizzleRun(root+'-G141')
# for i,exp in enumerate(asn.exposures):
# run.blot_back(ii=i, copy_new=(i is 0))
# threedhst.prep_flt_files.make_segmap(run.flt[i])
os.system('mv %s-G141_drz.fits %s-G141_drz_final.fits' %(root, root))
make_background_demo(root=root)
def make_background_demo(root='AEGIS-11', range1=(0.90,1.08), range2=(-0.02, 0.02)):
import unicorn.survey_paper as sup
if sup.root != root:
print "Need to run sup.demo_background_subtract(root='%s')." %(root)
path = unicorn.analysis.get_grism_path(root)
os.chdir(path+'EXAMPLE')
first = pyfits.open('%s-G141_drz_first.fits' %(root))
flat = pyfits.open('%s-G141_drz_flat.fits' %(root))
sky = pyfits.open('%s-G141_drz_sky.fits' %(root))
final = pyfits.open('%s-G141_drz_final.fits' %(root))
im_shape = first[1].data.shape
sup.im_shape = im_shape
med = threedhst.utils.biweight(flat[1].data, mean=True)
ysize = 3.
#fig = plt.figure(figsize=[ysize*im_shape[1]*1./im_shape[0]*4,ysize], dpi=100)
top_panel = 0.2
NPANEL = 4
#plt.hot()
plt.gray()
plt.close()
plt.rcParams['image.origin'] = 'lower'
plt.rcParams['image.interpolation'] = 'nearest'
if USE_PLOT_GUI:
fig = plt.figure(figsize=[ysize*im_shape[1]*1./im_shape[0]*NPANEL*(1-top_panel)/2,ysize*2],dpi=100)
else:
fig = Figure(figsize=[ysize*im_shape[1]*1./im_shape[0]*NPANEL*(1-top_panel)/2.,ysize*2], dpi=100)
fig.subplots_adjust(wspace=0.02,hspace=0.02,left=0.02,
bottom=0.07,right=0.99,top=0.97)
#
plt.rcParams['lines.linewidth'] = 1
vmin, vmax = -0.15, 0.075
vmin, vmax= -0.08, 0.08
x0 = 0.005*2
y0 = x0/2.
dx = (1.-(NPANEL+1)*x0)/NPANEL*2
top_panel/=2.
#ax = fig.add_subplot(141)
ax = fig.add_axes(((x0+(dx+x0)*0), y0+0.5, dx, 0.5-top_panel-y0))
ax.imshow((first[1].data-threedhst.utils.biweight(first[1].data, mean=True)), interpolation='nearest',aspect='auto',vmin=vmin-0.1*0,vmax=vmax+0.15*0)
sup.axis_imshow(ax, text='a)\ Raw')
ax.text(0.12, 0.85, r'$\mathrm{%s}$' %(root), horizontalalignment='left', verticalalignment='center',
transform = ax.transAxes, color='black', fontsize=14)
#
#show_limits(ax, -(vmax+0.15)+med, -(vmin-0.1)+med)
#### Show profiles
ax = fig.add_axes(((x0+(dx+x0)*0), (0.5-top_panel)+0.5, dx, top_panel-2*y0))
pp = sup.first_prof[0]*0.
for i in range(4):
#ax.plot(sup.first_prof[i])
pp += sup.first_prof[i]
ax.plot(pp/4., color='black')
sup.axis_profile(ax, yrange=range1, text='a)\ Raw')
#ax = fig.add_subplot(142)
ax = fig.add_axes(((x0+(dx+x0)*1)+x0, y0+0.5, dx, 0.5-top_panel-y0))
ax.imshow((flat[1].data-med), interpolation='nearest',aspect='auto',vmin=vmin,vmax=vmax)
sup.axis_imshow(ax, text='b)\ Flat')
#show_limits(ax, -vmax+med, -vmin+med)
#### Show profiles
ax = fig.add_axes(((x0+(dx+x0)*1)+x0, (0.5-top_panel)+0.5, dx, top_panel-2*y0))
pp = sup.flat_prof[0]*0.
for i in range(4):
#ax.plot(sup.flat_prof[i])
pp += sup.flat_prof[i]
ax.plot(pp/4.+1, color='black')
sup.axis_profile(ax, yrange=range1, text='b)\ Flat')
###########
#ax = fig.add_subplot(143)
ax = fig.add_axes(((x0+(dx+x0)*0), y0, dx, 0.5-top_panel-y0))
ax.imshow((sky[1].data-med), interpolation='nearest',aspect='auto',vmin=vmin,vmax=vmax)
sup.axis_imshow(ax, text='c)\ Background')
#show_limits(ax, -vmax+med, -vmin+med)
#### Show profiles
ax = fig.add_axes(((x0+(dx+x0)*0), (0.5-top_panel), dx, top_panel-2*y0))
pp = sup.sky_prof[0]*0.
for i in range(4):
#ax.plot(sup.sky_prof[i])
pp += sup.sky_prof[i]
ax.plot(pp/4., color='black')
sup.axis_profile(ax, yrange=range2, text='c)\ Background')
#ax = fig.add_subplot(144)
ax = fig.add_axes(((x0+(dx+x0)*1)+x0, y0, dx, 0.5-top_panel-y0))
ax.imshow((final[1].data), interpolation='nearest',aspect='auto',vmin=vmin,vmax=vmax)
sup.axis_imshow(ax, text='d)\ Final')
#show_limits(ax, -vmax, -vmin)
#### Show profiles
ax = fig.add_axes(((x0+(dx+x0)*1)+x0, (0.5-top_panel), dx, top_panel-2*y0))
pp = sup.final_prof[0]*0.
for i in range(4):
#ax.plot(sup.final_prof[i])
pp += sup.final_prof[i]
ax.plot(pp/4., color='black')
sup.axis_profile(ax, yrange=range2, text='d)\ Final')
outfile = '%s-G141_demo.pdf' %(root)
if USE_PLOT_GUI:
fig.savefig(outfile,dpi=100,transparent=False)
else:
canvas = FigureCanvasAgg(fig)
canvas.print_figure(outfile, dpi=100, transparent=False)
def show_limits(ax, vmin, vmax):
ax.text(0.98, -0.02,r'$\mathrm{[%.1f,\ %.1f]}$' %(vmin, vmax),
horizontalalignment='right',
verticalalignment='top',
transform = ax.transAxes, color='black', fontsize=12)
def axis_profile(ax, yrange=None, prof=None, text=''):
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_xlim(0,1000)
if yrange is not None:
ax.set_ylim(yrange[0], yrange[1])
ylimits = yrange
else:
ylimits = ax.get_ylim()
#
if text is not '':
ax.text(0.5, 0.06,r'$\mathrm{'+text+'}$',
horizontalalignment='center',
verticalalignment='bottom',
transform = ax.transAxes, color='black', fontsize=10)
ax.text(0.98, 0.08,r'$\mathrm{[%.2f,\ %.2f]}$' %(ylimits[0], ylimits[1]),
horizontalalignment='right',
verticalalignment='bottom',
transform = ax.transAxes, color='black', fontsize=8)
def axis_imshow(ax, text='', shape=None):
import numpy as np
import unicorn.survey_paper as sup
if shape is None:
shape = sup.im_shape
ax.set_yticklabels([])
xtick = ax.set_xticks([0,shape[1]])
ax.set_xticklabels([])
ytick = ax.set_yticks([0,shape[0]])
#
# if text is not '':
# ax.text(0.5, 1.02,r'$\mathrm{'+text+'}$',
# horizontalalignment='center',
# verticalalignment='bottom',
# transform = ax.transAxes, color='black', fontsize=12)
def compare_sky():
"""
Make a figure showing the aXe default and derived sky images.
"""
import unicorn.survey_paper as sup
sco = pyfits.open('../CONF/sky_cosmos.fits')
shi = pyfits.open('../CONF/sky_goodsn_hi.fits')
slo = pyfits.open('../CONF/sky_goodsn_lo.fits')
svh = pyfits.open('../CONF/sky_goodsn_vhi.fits')
shape = sco[0].data.shape
figsize = 6
fig = Figure(figsize=[figsize,figsize], dpi=200)
#fig = plt.figure(figsize=[figsize,figsize], dpi=100)
fig.subplots_adjust(wspace=0.02,hspace=0.02,left=0.02,
bottom=0.02,right=0.98,top=0.98)
####### COSMOS
ax = fig.add_subplot(221)
ax.imshow(sco[0].data, interpolation='nearest',aspect='auto',vmin=0.95,vmax=1.05)
sup.axis_imshow(ax, shape=shape)
ax.fill_between([850,950],[50,50],[150,150], color='white', alpha=0.8)
ax.text(900/1014., 100./1014,r'$\mathrm{a)}$', horizontalalignment='center', verticalalignment='center', transform = ax.transAxes, color='black', fontsize=12)
####### GOODS-N Lo
ax = fig.add_subplot(222)
ax.imshow(slo[0].data, interpolation='nearest',aspect='auto',vmin=0.95,vmax=1.05)
sup.axis_imshow(ax, shape=shape)
ax.fill_between([850,950],[50,50],[150,150], color='white', alpha=0.8)
ax.text(900/1014., 100./1014,r'$\mathrm{b)}$', horizontalalignment='center', verticalalignment='center', transform = ax.transAxes, color='black', fontsize=12)
####### GOODS-N Hi
ax = fig.add_subplot(223)
ax.imshow(shi[0].data, interpolation='nearest',aspect='auto',vmin=0.95,vmax=1.05)
sup.axis_imshow(ax, shape=shape)
ax.fill_between([850,950],[50,50],[150,150], color='white', alpha=0.8)
ax.text(900/1014., 100./1014,r'$\mathrm{c)}$', horizontalalignment='center', verticalalignment='center', transform = ax.transAxes, color='black', fontsize=12)
####### GOODS-N Very hi
ax = fig.add_subplot(224)
ax.imshow(svh[0].data, interpolation='nearest',aspect='auto',vmin=0.95,vmax=1.05)
sup.axis_imshow(ax, shape=shape)
ax.fill_between([850,950],[50,50],[150,150], color='white', alpha=0.8)
ax.text(900/1014., 100./1014,r'$\mathrm{d)}$', horizontalalignment='center', verticalalignment='center', transform = ax.transAxes, color='black', fontsize=12)
#### Done
canvas = FigureCanvasAgg(fig)
canvas.print_figure('sky_backgrounds.pdf', dpi=100, transparent=False)
def axeFlat(flat_file='/research/HST/GRISM/3DHST/CONF/WFC3.IR.G141.flat.2.fits', wave=1.4e4):
"""
Compute the aXe flat-field image at a specified wavelength.
"""
flat = pyfits.open(flat_file)
wmin = flat[0].header['WMIN']
wmax = flat[0].header['WMAX']
x = (wave-wmin)/(wmax-wmin)
img = np.zeros((1014,1014), dtype='float')
for i in range(len(flat)):
img += flat[i].data*x**i
return img
def get_flat_function(x=507, y=507, wave=np.arange(1.1e4,1.6e4,500), flat_file='/research/HST/GRISM/3DHST/CONF/WFC3.IR.G141.flat.2.fits'):
#wave = np.arange(1.1e4, 1.6e4, .5e3)
flat = pyfits.open(flat_file)
wmin = flat[0].header['WMIN']
wmax = flat[0].header['WMAX']
xx = (wave-wmin)/(wmax-wmin)
flat_func = xx*0.
for i in range(len(flat)):
flat_func += flat[i].data[y,x]*xx**i
return flat_func
def show_flat_function():
wave = np.arange(1.05e4, 1.7e4, 250.)
color='blue'
for xi in range(50,951,50):
print unicorn.noNewLine+'%d' %(xi)
for yi in range(50,951,50):
ffunc = unicorn.survey_paper.get_flat_function(x=xi, y=yi, wave=wave)
ffunc /= np.interp(1.4e4, wave, ffunc)
p = plt.plot(wave, ffunc , alpha=0.05, color=color)
def grism_flat_dependence():
"""
Compute the higher order terms for the grism flat-field
"""
import unicorn
import threedhst
# f140 = threedhst.grism_sky.flat_f140[1].data[5:-5, 5:-5]
#
# flat = pyfits.open(unicorn.GRISM_HOME+'CONF/WFC3.IR.G141.flat.2.fits')
# wmin, wmax = flat[0].header['WMIN'], flat[0].header['WMAX']
#
# a0 = flat[0].data
#
# lam = 1.1e4
# x = (lam-wmin)/(wmax-wmin)
#
# aX = a0*0.
# for i,ext in enumerate(flat[1:]):
# print i
# aX += ext.data*x**(i+1)
f105 = pyfits.open(os.getenv('iref')+'/uc72113oi_pfl.fits')[1].data[5:-5,5:-5]
#f105 = pyfits.open(os.getenv('iref')+'/uc72113ni_pfl.fits')[1].data[5:-5,5:-5] # F098M
f140 = pyfits.open(os.getenv('iref')+'/uc721143i_pfl.fits')[1].data[5:-5,5:-5]
yi, xi= np.indices(f140.shape)
death_star = (f140 < 0.65) & (xi < 390) & (yi < 80) & (xi > 330) & (yi > 30)
REF = 'F140W'
f160 = pyfits.open(os.getenv('iref')+'/uc721145i_pfl.fits')[1].data[5:-5,5:-5]
#### Narrow bands
#f140 = pyfits.open(os.getenv('iref')+'/PFL/uc72113si_pfl.fits')[1].data[5:-5,5:-5]
#REF = 'F127M'
#f140 = pyfits.open(os.getenv('iref')+'/PFL/uc721140i_pfl.fits')[1].data[5:-5,5:-5]
#f160 = pyfits.open(os.getenv('iref')+'/PFL/uc721146i_pfl.fits')[1].data[5:-5,5:-5]
plt.rcParams['patch.edgecolor'] = 'None'
#plt.rcParams['font.size'] = 12
plt.rcParams['image.origin'] = 'lower'
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
xs = 8
fig = plt.figure(figsize=(xs,xs/3.), dpi=100)
fig.subplots_adjust(wspace=0.01,hspace=0.01,left=0.01, bottom=0.01,right=0.99,top=0.99)
vmin, vmax = 0.95, 1.05
#vmin, vmax = 0.9, 1.1
### put scale within the box
NX = 100
y0, y1 = 1014-1.5*NX, 1014-1*NX
y0 -= NX; y1 -= NX
### F140W flat
textbbox = dict(facecolor='white', alpha=0.6, edgecolor='white')
## correct for pixel area map
PIXEL_AREA = True
if PIXEL_AREA:
pam = pyfits.open(os.getenv('iref')+'/ir_wfc3_map.fits')[1].data
else:
pam = np.ones((1014,1014),dtype='float')
ax = fig.add_subplot(131)
ax.imshow(f140/pam, vmin=vmin, vmax=vmax, interpolation='nearest')
ax.text(50,950, REF, verticalalignment='top', fontsize=14, bbox=textbbox)
#ax.text(50,950, REF, verticalalignment='top', fontsize=14)
ax.set_xlim(0,1014)
ax.set_ylim(0,1014)
ax.set_yticklabels([])
ax.set_xticklabels([])
### F105W/F140W, with label
ratio = f105/f140
label = 'F105W / ' + REF
ratio = unicorn.survey_paper.axeFlat(wave=1.1e4)/unicorn.survey_paper.axeFlat(wave=1.6e4)
label = r'aXe 1.1 $\mu$m / 1.6 $\mu$m'
ratio[death_star] = 0.
#### Color bar for label
x0 = 300
# ratio[y0:y1,x0-2.5*NX:x0-1.5*NX] = vmin
# ratio[y0:y1,x0-1.5*NX:x0-0.5*NX] = (vmin+1)/2.
# ratio[y0:y1,x0-0.5*NX:x0+0.5*NX] = 1.
# ratio[y0:y1,x0+0.5*NX:x0+1.5*NX] = (vmax+1)/2.
# ratio[y0:y1,x0+1.5*NX:x0+2.5*NX] = vmax
NSPLIT = 5
NXi = NX*2./NSPLIT
for i in range(1,NSPLIT+1):
#print i,NXi, 1+(vmin-1)*i/NSPLIT, x0-(i-0.5)*NXi
ratio[y0:y1,x0-(i+0.5)*NXi:x0-(i-0.5)*NXi] = 1+(vmin-1)*i/NSPLIT
#
ratio[y0:y1,x0-0.5*NXi:x0+0.5*NXi] = 1
for i in range(1,NSPLIT+1):
#print i,NXi, 1+(vmin-1)*i/NSPLIT, x0-(i-0.5)*NXi
ratio[y0:y1,x0+(i-0.5)*NXi:x0+(i+0.5)*NXi] = 1+(vmax-1)*i/NSPLIT
xbox = np.array([0,1,1,0,0])*NXi
ybox = np.array([0,0,1,1,0])*NX/2
ax = fig.add_subplot(132)
ax.imshow(ratio, vmin=vmin, vmax=vmax, interpolation='nearest')
ax.plot(xbox+x0-0.5*NXi, ybox+y1-0.5*NX, color='0.6', alpha=0.1)
fs = 9
ax.text(x0-2*NX, y0-0.5*NX, '%.2f' %(vmin), horizontalalignment='center', verticalalignment='center', fontsize=fs)
ax.text(x0-0*NX, y0-0.5*NX, '%.2f' %(1), horizontalalignment='center', verticalalignment='center', fontsize=fs)
ax.text(x0+2*NX, y0-0.5*NX, '%.2f' %(vmax), horizontalalignment='center', verticalalignment='center', fontsize=fs)
ax.text(50,950, label, verticalalignment='top', fontsize=14, bbox=textbbox)
ax.set_xlim(0,1014)
ax.set_ylim(0,1014)
ax.set_yticklabels([])
ax.set_xticklabels([])
### F160W/F140W
ax = fig.add_subplot(133)
ratio = f160/f140
label = 'F160W / '+REF
ratio = f105/f160
label = 'F105W / F160W'
# ratio = unicorn.survey_paper.axeFlat(wave=1.0552e4)/unicorn.survey_paper.axeFlat(wave=1.392e4)
ratio[death_star] = 0.
ax.imshow(ratio, vmin=vmin, vmax=vmax, interpolation='nearest')
ax.text(50,950, label, verticalalignment='top', fontsize=14,bbox=textbbox)
ax.set_xlim(0,1014)
ax.set_ylim(0,1014)
ax.set_yticklabels([])
ax.set_xticklabels([])
fig.savefig('compare_flats_v2.pdf')
def process_sky_background():
import threedhst.catIO as catIO
import re
info = catIO.Readfile('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/sky_background.dat')
field = []
for targ in info.targname:
targ = targ.replace('GNGRISM','GOODS-NORTH-')
field.append(re.split('-[1-9]',targ)[0].upper())
field = np.array(field)
is_UDF = field == 'xxx'
for i,targ in enumerate(info.targname):
m = re.match('GOODS-SOUTH-3[4678]',targ)
if m is not None:
is_UDF[i] = True
fields = ['AEGIS','COSMOS','GOODS-SOUTH','GOODS-NORTH','UDS']
colors = ['red','blue','green','purple','orange']
for i in range(len(fields)):
match = (field == fields[i]) & (info.filter == 'G141')
h = plt.hist(info.bg_mean[match], range=(0,5), bins=50, color=colors[i], alpha=0.5)
bg = threedhst.utils.biweight(info.bg_mean[match], both=True)
print '%-14s %.3f %.3f ' %(fields[i], bg[0], bg[1])
match = is_UDF & (info.filter == 'G141')
bg = threedhst.utils.biweight(info.bg_mean[match], both=True)
print '%-14s %.3f %.3f ' %('HUDF09', bg[0], bg[1])
plt.xlim(0,3.5)
def get_background_level():
"""
Get the sky background levels from the raw FLT images, with an object and dq mask
"""
xi, yi = np.indices((1014, 1014))
DPIX = 300
flat = pyfits.open(os.getenv('iref')+'uc721143i_pfl.fits')[1].data[5:-5,5:-5]
fp = open(unicorn.GRISM_HOME+'ANALYSIS/sky_background.dat','w')
fp.write('# file targname filter date_obs bg_mean bg_sigma\n')
for path in ['AEGIS','COSMOS','GOODS-S','GOODS-N','UDS']:
os.chdir(unicorn.GRISM_HOME+path+'/PREP_FLT')
info = catIO.Readfile('files.info')
print '\n\n%s\n\n' %(path)
for ii, file in enumerate(info.file):
file = file.replace('.gz','')
print file
#
try:
flt_raw = pyfits.open(threedhst.utils.find_fits_gz('../RAW/'+file))
flt_fix = pyfits.open(threedhst.utils.find_fits_gz(file))
seg = pyfits.open(threedhst.utils.find_fits_gz(file.replace('fits','seg.fits')))
except:
continue
#
mask = (seg[0].data == 0) & ((flt_fix[3].data & (4+32+16+512+2048+4096)) == 0) & (np.abs(xi-507) < DPIX) & (np.abs(yi-507) < DPIX)
#
if info.filter[ii].startswith('G'):
flt_raw[1].data /= flat
#
background = threedhst.utils.biweight(flt_raw[1].data[mask], both=True)
fp.write('%s %16s %5s %s %.3f %.3f\n' %(file, info.targname[ii], info.filter[ii], info.date_obs[ii], background[0], background[1]))
fp.close()
def get_spec_signal_to_noise():
"""
Measure the S/N directly from the spectrum of all objects.
"""
fp = open(unicorn.GRISM_HOME+'ANALYSIS/spec_signal_to_noise.dat','w')
fp.write('# object mag_auto flux_radius sig_noise n_bins\n')
for path in ['AEGIS','COSMOS','GOODS-S','GOODS-N','UDS']:
os.chdir(unicorn.GRISM_HOME+path)
SPC_files = glob.glob('DRIZZLE_G141/*opt.SPC.fits')
for file in SPC_files:
pointing = os.path.basename(file).split('_2_opt')[0]
#
SPC = threedhst.plotting.SPCFile(file,axe_drizzle_dir='./')
cat = threedhst.sex.mySexCat('DATA/%s_drz.cat' %(pointing))
try:
mag_auto = np.cast[float](cat.MAG_F1392W)
flux_radius = np.cast[float](cat.FLUX_RADIUS)
except:
continue
#
for id in SPC._ext_map:
print unicorn.noNewLine+'%s_%05d' %(pointing, id)
#
spec = SPC.getSpec(id)
mask = (spec.LAMBDA > 1.15e4) & (spec.LAMBDA < 1.6e4) & (spec.CONTAM/spec.FLUX < 0.1) & np.isfinite(spec.FLUX)
if len(mask[mask]) > 1:
signal_noise = threedhst.utils.biweight(spec.FLUX[mask]/spec.FERROR[mask], mean=True)
mat = cat.id == id
fp.write('%s_%05d %8.3f %8.3f %13.3e %-0d\n' %(pointing, id, mag_auto[mat][0], flux_radius[mat][0], signal_noise, len(mask[mask])))
#
else:
continue
fp.close()
def process_signal_to_noise():
import threedhst.catIO as catIO
import re
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/')
info = catIO.Readfile('spec_signal_to_noise.dat')
field = []
for targ in info.object:
targ = targ.replace('GNGRISM','GOODS-NORTH-')
field.append(re.split('-[1-9]',targ)[0].upper())
field = np.array(field)
snscale = 2.5 ### aXe errors too large
ma = 'o'
ms = 5
##### S/N vs mag.
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=1, left=0.10)
fig.subplots_adjust(wspace=0.2,hspace=0.24,left=0.09, bottom=0.08,right=0.975,top=0.99)
ax = fig.add_subplot(211)
ff = (field == 'COSMOS') & (info.sig_noise > 0)
ax.plot(info.mag_auto[ff], info.sig_noise[ff]*snscale, marker=ma, markersize=ms, color='red', alpha=0.1, linestyle='None')
xm, ym, ys, ns = threedhst.utils.runmed(info.mag_auto[ff], info.sig_noise[ff]*snscale, NBIN=30)
ff = (field == 'AEGIS') & (info.sig_noise > 0)
ax.plot(info.mag_auto[ff], info.sig_noise[ff]*snscale, marker=ma, markersize=ms, color='blue', alpha=0.1, linestyle='None')
ax.plot(xm,ym,color='white',linewidth=6,alpha=0.5)
ax.plot(xm,ym,color='red',linewidth=3,alpha=0.9)
xm, ym, ys, ns = threedhst.utils.runmed(info.mag_auto[ff], info.sig_noise[ff]*snscale, NBIN=30)
ax.plot(xm,ym,color='white',linewidth=6,alpha=0.5)
ax.plot(xm,ym,color='blue',linewidth=3,alpha=0.9)
ax.semilogy()
ax.plot([12,30],[3,3], linewidth=3, alpha=0.4, color='black', linestyle='--')
ax.set_ylim(0.2,300)
ax.set_xlim(16,25)
ax.set_yticklabels(['1','3','10','100']) #; ax.set_xticklabels([])
ytick = ax.set_yticks([1,3,10,100]) #; xtick = ax.set_xticks([0,NX]);
if plt.rcParams['text.usetex']:
ax.set_xlabel('MAG\_AUTO (F140W)')
else:
ax.set_xlabel('MAG_AUTO (F140W)')
ax.set_ylabel('S / N')
ax.text(16.5,1,'AEGIS',color='blue', fontsize=12)
ax.text(16.5,0.5,'COSMOS',color='red', fontsize=12)
##### S/N vs size for a mag bin
ax = fig.add_subplot(212)
m0, m1 = 22., 22.5
mm = (info.mag_auto > m0) & (info.mag_auto < m1) & (info.sig_noise > 0)
ax.plot(info.flux_radius[mm & (field == 'COSMOS')], info.sig_noise[mm & (field == 'COSMOS')]*snscale, marker=ma, markersize=ms, color='red', alpha=0.3, linestyle='None')
xm, ym, ys, ns = threedhst.utils.runmed(info.flux_radius[mm & (field == 'COSMOS')], info.sig_noise[mm & (field == 'COSMOS')]*snscale, NBIN=5)
ax.plot(info.flux_radius[mm & (field == 'AEGIS')], info.sig_noise[mm & (field == 'AEGIS')]*snscale, marker=ma, markersize=ms, color='blue', alpha=0.3, linestyle='None')
ax.plot(xm,ym,color='white',linewidth=6,alpha=0.5)
ax.plot(xm,ym,color='red',linewidth=3,alpha=0.9)
xm, ym, ys, ns = threedhst.utils.runmed(info.flux_radius[mm & (field == 'AEGIS')], info.sig_noise[mm & (field == 'AEGIS')]*snscale, NBIN=5)
ax.plot(xm,ym,color='white',linewidth=6,alpha=0.5)
ax.plot(xm,ym,color='blue',linewidth=3,alpha=0.9)
#plt.plot(xm,ym/np.sqrt(2),color='blue',linewidth=3,alpha=0.5)
ax.semilogy()
#ax.plot([0,30],[3,3], linewidth=3, alpha=0.5, color='black')
ax.set_ylim(2,15)
ax.set_xlim(1.5,12)
ax.set_yticklabels(['3','5','10']) #; ax.set_xticklabels([])
ytick = ax.set_yticks([3,5,10]) #; xtick = ax.set_xticks([0,NX]);
ax.set_xlabel(r'R$_{50}$ [$0.06^{\prime\prime}$ pix]')
ax.set_ylabel('S / N')
if plt.rcParams['text.usetex']:
ax.text(11.8,13, r'$%.1f < H_{140} < %.1f$' %(m0, m1), horizontalalignment='right', verticalalignment='top')
else:
ax.text(11.8,13, r'%.1f < $m_{140}$ < %.1f' %(m0, m1), horizontalalignment='right', verticalalignment='top')
fig.savefig('spec_signal_to_noise.pdf')
plt.rcParams['text.usetex'] = False
def clash_empty_apertures():
for cluster in ['a2261','a383','macs1149','macs1206','macs2129']:
os.chdir('/Users/gbrammer/CLASH/%s' %(cluster))
images = glob.glob('*drz.fits')
for image in images:
wht = image.replace('drz','wht')
head = pyfits.getheader(image)
zp=-2.5*np.log10(head['PHOTFLAM']) - 21.10 - 5 *np.log10(head['PHOTPLAM']) + 18.6921
unicorn.candels.clash_make_rms_map(image=wht, include_poisson=False)
unicorn.survey_paper.empty_apertures(SCI_IMAGE=image, SCI_EXT=0, WHT_IMAGE=wht.replace('wht','rms'), WHT_EXT=0, aper_params=(0.4/0.065/2.,0.4/0.065/2.+1,2), ZP=zp, make_plot=False, NSIM=1000, MAP_TYPE='MAP_RMS')
#### Sequence of apertures for measuring Beta
for cluster in ['a2261','a383','macs1149','macs1206','macs2129']:
os.chdir('/Users/gbrammer/CLASH/%s' %(cluster))
images = glob.glob('*_f160w*_drz.fits')
for image in images:
wht = image.replace('drz','wht')
head = pyfits.getheader(image)
zp=-2.5*np.log10(head['PHOTFLAM']) - 21.10 - 5 *np.log10(head['PHOTPLAM']) + 18.6921
#unicorn.candels.clash_make_rms_map(image=wht, include_poisson=False)
unicorn.survey_paper.empty_apertures(SCI_IMAGE=image, SCI_EXT=0, WHT_IMAGE=wht.replace('wht','rms'), WHT_EXT=0, aper_params=(0.2/0.065,3.05/0.065,0.2/0.065), ZP=zp, make_plot=False, NSIM=500, MAP_TYPE='MAP_RMS')
for cluster in ['a2261','a383','macs1149','macs1206','macs2129'][:-1]:
os.chdir('/Users/gbrammer/CLASH/%s' %(cluster))
files = glob.glob('*empty.fits')
print '\n--------------\n%s\n--------------\n' %(cluster.center(14))
for file in files:
head = pyfits.getheader(file.replace('_empty',''))
zp=-2.5*np.log10(head['PHOTFLAM']) - 21.10 - 5 *np.log10(head['PHOTPLAM']) + 18.6921
em = pyfits.open(file)
print '%-7s %.2f' %(file.split('_')[5], zp-2.5*np.log10(5*np.std(em[2].data)))
def run_empty_apertures_fields():
import glob
import os
import unicorn
os.chdir(unicorn.GRISM_HOME+'ANALYSIS/EMPTY_APERTURES/')
files = glob.glob('/3DHST/Spectra/Work/COSMOS/PREP_FLT/COSMOS-*-F140W_drz.fits')
files = glob.glob('/3DHST/Spectra/Work/GOODS-N/PREP_FLT/GOODS-N-*-F140W_drz.fits')
for file in files[1:]:
unicorn.survey_paper.empty_apertures(SCI_IMAGE=file, SCI_EXT=1, WHT_IMAGE=file, WHT_EXT=2, aper_params=(1,17,1), NSIM=1000, ZP=26.46, make_plot=True)
def grism_apertures_plot(SHOW_FLUX=False):
test = """
The following derives the correction, R, needed to scale the pixel standard
deviations in the drizzled images with small pixels, to the noise in
"nominal" pixels.
files = glob.glob('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/EMPTY_APERTURES/*pix*empty.fits')
ratio = []
for file in files:
impix = pyfits.open(file)
im = pyfits.open(file.replace('pix',''))
print threedhst.utils.biweight(im[2].data[:,2]) / threedhst.utils.biweight(impix[2].data[:,2])
ratio.append(threedhst.utils.biweight(im[2].data[:,2]) / threedhst.utils.biweight(impix[2].data[:,2]))
print 'R ~ %.2f' %(1./np.mean(ratio))
# Emission line flux
texp, Nexp, R, dark, sky, ee_fraction = 1277, 4, 2, 0.05, 1.5, 0.75
area = np.pi*R**2
total_counts_resel = (xarr+dark)*texp*area*Nexp #
eq1_cts = np.sqrt(total_counts_resel+rn**2*area*Nexp)
eq1_cps = eq1_cts/texp/Nexp
#eq1_flam = eq1_cps/(sens_14um*46.5)
print eq1_cps
"""
os.chdir(unicorn.GRISM_HOME+'ANALYSIS/SURVEY_PAPER')
# files = glob.glob('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/EMPTY_APERTURES_CIRCULAR/*G141_drz_empty.fits')
# aper_use = 0
files = glob.glob('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/EMPTY_APERTURES/*G141_drz_empty.fits')
aper_use = 1
#### Parameters of the plot
lam_int = 1.4e4
SN_show = 5
if SHOW_FLUX:
aper_use = 0
bg = threedhst.catIO.Readfile('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/sky_background.dat')
sens = pyfits.open(unicorn.GRISM_HOME+'CONF/WFC3.IR.G141.1st.sens.2.fits')[1].data
sens_14um = np.interp(lam_int,sens.WAVELENGTH,sens.SENSITIVITY)
colors = {'AEGIS':'red','COSMOS':'blue','UDS':'purple', 'GNGRISM':'orange','GOODSS':'green'}
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(xs=4, left=0.125, bottom=0.085, right=0.01, top=0.01, square=True, fontsize=12)
ax = fig.add_subplot(111)
for file in files:
root = os.path.basename(file).split('-G141')[0].replace('GOODS-N-','GNGRISM').replace('S-S','S-SOUTH')
print root
mat = (bg.targname == root) & (bg.filter == 'G141')
if len(mat[mat]) == 0:
continue
mean_bg = np.mean(bg.bg_mean[mat])
err_bg = np.std(bg.bg_mean[mat])
aps = pyfits.open(file)
#
fluxes = aps[2].data
stats = threedhst.utils.biweight(fluxes, both=True)
sigmas = fluxes[0,:].flatten()*0
for i in range(len(sigmas)):
sigmas[i] = threedhst.utils.biweight(fluxes[:,i])
#sigmas[i] = threedhst.utils.nmad(fluxes[:,i])
#sigmas[i] = np.std(fluxes[:,i])
#sigmas *= 1.34 ### scale factor for correlated pixels, determined empirically
inv_sens_flam = sigmas/(sens_14um*22*4)
field = os.path.basename(file).replace('GOODS-N','GNGRISM').replace('GOODS-S','GOODSS').split('-')[0]
print field, aps[1].data[aper_use]
if SHOW_FLUX:
sig3_flux = inv_sens_flam*SN_show
sig3_flux /= 0.75**2 ### Include for encircled energy within 3 nominal spatial pixles
sig3_ab = sig3_flux*2*46.5/1.e-17
else:
inv_sens_fnu = inv_sens_flam*lam_int**2/3.e18
sig3_flux = inv_sens_fnu*SN_show
sig3_flux /= 0.75 ### Include for encircled energy within 3 nominal spatial pixles
sig3_ab = -2.5*np.log10(sig3_flux)-48.6#-2.5*np.log10(np.sqrt(2))
p = ax.errorbar(mean_bg,sig3_ab[aper_use], xerr=err_bg, marker='o', ms=8, alpha=0.4, color=colors[field], ecolor=colors[field])
xarr = np.arange(0,5,0.02) ### background rate, cts / s
yarr = 22.9-2.5*np.log10(np.sqrt(xarr/2.))
#### Eq. 1 from paper
scale = 0.06/0.128254
Nexp, texp, rn, R, dark = 4, 1277, 20, 2*scale, 0.05
#dark += 0.181/3.
#area = np.pi*R**2
area = 3
ee_fraction = 0.75 # for 3 pix aperture in spatial direction, nominal pixels
total_counts_resel = (xarr+dark)*texp*area*2*Nexp #
eq1_cts = np.sqrt(total_counts_resel+rn**2*area*Nexp)
eq1_cps = eq1_cts/texp/Nexp/ee_fraction/2
eq1_flam = eq1_cps/(sens_14um*46.5)
eq1_fnu = eq1_flam*lam_int**2/3.e18
eq1_ab = -2.5*np.log10(eq1_fnu*SN_show)-48.6#-2.5*np.log10(np.sqrt(0.5))
plt.plot(xarr, eq1_ab+2.5*np.log10(1.35), color='black', alpha=0.4, linewidth=2, linestyle='--')
plt.plot(xarr, eq1_ab, color='black', alpha=0.4, linewidth=2)
plt.text(1.,24.21-2.5*np.log10(SN_show/3.),'ETC', rotation=-44, color='black', alpha=0.4,horizontalalignment='center')
plt.arrow(1.5, 24.1-2.5*np.log10(SN_show/3.), 0, 0.1, color='0.6', alpha=1, fill=True, width=0.02, head_width=0.06, head_length=0.02, overhang=0.05)
plt.text(1.5+0.05, 24.1-2.5*np.log10(SN_show/3.)+0.05,r'$R_\mathrm{driz}$', verticalalignment='center', alpha=0.4)
#plt.plot(xarr, eq1_ab-2.5*np.log10(np.sqrt(2*46.5/22.5)), color='purple', linewidth=2)
### Predict source counts
mag = 23
source_fnu = 10**(-0.4*(mag+48.6))
source_flam = source_fnu/lam_int**2*3.e18
source_cps = source_flam*sens_14um*46.5*ee_fraction
source_counts = source_cps*Nexp*texp*2
print np.interp(1.88,xarr,total_counts_resel), np.interp(1.88,xarr,np.sqrt(total_counts_resel+rn**2*area*Nexp)), np.interp(1.88,xarr,eq1_ab), source_counts, source_counts / np.interp(1.88,xarr,np.sqrt(total_counts_resel+rn**2*area*Nexp))
#ax.plot(xarr, yarr, color='black', alpha=0.2, linewidth=3)
ax.set_xlim(0.6,3.2)
if SHOW_FLUX:
ax.set_ylim(1,5.1)
ax.set_ylabel(r'$%0d\sigma$ emission line sensitivity ($10^{-17}\,\mathrm{erg\,s^{-1}\,cm^{-2}}$)' %(SN_show))
#ax.semilogy()
else:
ax.set_ylim(23.8-2.5*np.log10(SN_show/3.),25.0-2.5*np.log10(SN_show/3.))
ax.set_ylabel(r'$%0d\sigma$ continuum depth (1.4$\mu$m, $\Delta=92\,$\AA)' %(SN_show))
ax.set_xlabel(r'Background level [electrons / s]')
#ax.set_ylabel(r'$3\sigma$ continuum depth @ 1.4$\mu$m, $D_\mathrm{ap}=0.24^{\prime\prime}/\,90\,$\AA')
x0, y0, dy = 3, 24.8-2.5*np.log10(SN_show/3.), 0.1
for i,field in enumerate(colors.keys()):
field_txt = field.replace('GNGRISM','GOODS-N').replace('GOODSS','GOODS-S')
ax.text(x0, y0-i*dy, field_txt, color=colors[field], horizontalalignment='right')
plt.savefig('grism_empty_apertures.pdf')
plt.close()
def grism_empty_apertures():
"""
Try simple empty apertures routine to measure depth of grism exposures,
to compare with the values measured directly from the spectra.
"""
unicorn.survey_paper.empty_apertures(SCI_IMAGE='GOODS-S-34-G141_drz.fits', WHT_IMAGE='GOODS-S-34-G141_drz.fits', aper_params=(2,8.1,2), NSIM=500, ZP=25, make_plot=False, verbose=True, threshold=0.8, is_grism=True)
for field in ['AEGIS','GOODS-S','UDS']: #,'COSMOS','GOODS-N']:
os.chdir(unicorn.GRISM_HOME+field+'/PREP_FLT/')
images = glob.glob(field+'*[0-9]-G141_drz.fits')
print images
for image in images[1:]:
unicorn.survey_paper.empty_apertures(SCI_IMAGE=image, WHT_IMAGE=image, aper_params=(2,8.1,2), NSIM=1000, ZP=25, make_plot=False, verbose=True, threshold=0.8, is_grism=True, rectangle_apertures = [(4,4),(2,6),(4,6)])
#
#### Make a version with nominal pixels for testing
threedhst.shifts.make_grism_shiftfile(image.replace('drz','asn').replace('G141','F140W'), image.replace('drz','asn'))
threedhst.utils.combine_asn_shifts([image.replace('drz','asn')], out_root=image.split('_drz')[0]+'pix')
threedhst.prep_flt_files.startMultidrizzle(image.split('_drz')[0] +'pix_asn.fits',
use_shiftfile=True, skysub=False,
final_scale=0.128254, pixfrac=0.01, driz_cr=False,
updatewcs=False, clean=True, median=False)
new = image.replace('G141','G141pix')
unicorn.survey_paper.empty_apertures(SCI_IMAGE=new, WHT_IMAGE=new, aper_params=(2,8.1,2), NSIM=500, ZP=25, make_plot=False, verbose=True, threshold=1.0, is_grism=True, rectangle_apertures = [(2,2),(1,3),(2,3),(4,4)])
aps = pyfits.open('GOODS-S-34-G141_drz_empty.fits')
fluxes = aps[2].data.flatten()
stats = threedhst.utils.biweight(fluxes, both=True)
sens = pyfits.open('../../CONF/WFC3.IR.G141.1st.sens.2.fits')[1].data
wave = sens.WAVELENGTH
inv_sens_flam = 1./(sens.SENSITIVITY*0.06/0.128254*46.5)
inv_sens_fnu = inv_sens_flam*wave**2/3.e18
sig3 = inv_sens_fnu*3*stats[1]
sig3_ab = -2.5*np.log10(sig5)-48.6
plt.plot(wave, sig5_ab)
mag = sig5_ab*0+23
input_fnu = 10**(-0.4*(mag+48.6))
input_flam = input_fnu*3.e18/wave**2
input_counts = input_flam * sens.SENSITIVITY * 46.5
#plt.plot(sens.WAVELENGTH, sens.SENSITIVITY)
def empty_apertures(SCI_IMAGE='PRIMO_F125W_drz.fits', SCI_EXT=1, WHT_IMAGE='PRIMO_F125W_drz.fits', WHT_EXT=2, aper_params=(1,17,0.5), NSIM=1000, ZP=26.25, make_plot=True, verbose=True, MAP_TYPE='MAP_WEIGHT', threshold=1.5, is_grism=False, rectangle_apertures = None):
"""
1) Run SExtractor on the input image to generate a segmentation map.
2) Place `NSIM` empty apertures on the image, avoiding objects and areas with
zero weight as defined in the `WHT_IMAGE`.
The list of aperture radii used are np.arange(`aper_params`).
3) Store the results in a FITS file `SCI_IMAGE`_empty.fits.
Circular apertures are placed on the science image with the fractional pixel
coverage determined with a polygon approximation, and this can take a while.
"""
from shapely.geometry import Point, Polygon
if SCI_EXT == 0:
SCI_EXT_SEX = 1
else:
SCI_EXT_SEX = SCI_EXT
if WHT_EXT == 0:
WHT_EXT_SEX = 1
else:
WHT_EXT_SEX = WHT_EXT
ROOT = os.path.basename(SCI_IMAGE).split('.fits')[0]
#### Open the science image
if verbose:
print 'Read images...'
img = pyfits.open(SCI_IMAGE)
img_data = img[SCI_EXT].data
img_head = img[SCI_EXT].header
img_shape = img_data.shape
#### Setup SExtractor and run to generate a segmentation image
if is_grism:
threedhst.sex.USE_CONVFILE = 'grism.conv'
else:
threedhst.sex.USE_CONVFILE = 'gauss_4.0_7x7.conv'
se = threedhst.sex.SExtractor()
se.aXeParams()
se.copyConvFile()
se.overwrite = True
se.options['CATALOG_NAME'] = '%s_empty.cat' %(ROOT)
se.options['CHECKIMAGE_NAME'] = '%s_empty_seg.fits' %(ROOT)
se.options['CHECKIMAGE_TYPE'] = 'SEGMENTATION'
if WHT_IMAGE is None:
se.options['WEIGHT_TYPE'] = 'NONE'
img_wht = img_data*0.
img_wht[img_data != 0] = 1
else:
se.options['WEIGHT_TYPE'] = MAP_TYPE
se.options['WEIGHT_IMAGE'] = '%s[%d]' %(WHT_IMAGE, WHT_EXT_SEX-1)
wht = pyfits.open(WHT_IMAGE)
img_wht = wht[WHT_EXT].data
##### Needed for very faint limits
se.options['MEMORY_OBJSTACK'] = '8000'
se.options['MEMORY_PIXSTACK'] = '800000'
se.options['FILTER'] = 'Y'
se.options['DETECT_THRESH'] = '%.2f' %(threshold)
se.options['ANALYSIS_THRESH'] = '%.2f' %(threshold)
se.options['MAG_ZEROPOINT'] = '%.2f' %(ZP) ### arbitrary, actual mags don't matter
status = se.sextractImage('%s[%d]' %(SCI_IMAGE, SCI_EXT_SEX-1))
#### Read the Segmentation image
segim = pyfits.open('%s_empty_seg.fits' %(ROOT))
seg = segim[0].data
segim.close()
#### Set up the apertures
#NSIM = 1000
#apertures = np.arange(1,17,0.5)
apertures = np.arange(aper_params[0], aper_params[1], aper_params[2])
if rectangle_apertures is not None:
IS_RECTANGLE = True
apertures = rectangle_apertures ### list of tuples with (dx,dy) sizes
else:
IS_RECTANGLE = False
fluxes = np.zeros((NSIM, len(apertures)))
centers = np.zeros((NSIM, len(apertures), 2))
#### Loop throuth the desired apertures and randomly place NSIM of them
aper = np.zeros(img_shape, dtype=np.float)
for iap, ap in enumerate(apertures):
#aper_image = np.zeros(img_shape)
icount = 0
if IS_RECTANGLE:
print 'Aperture %.2f x %.2f pix\n' %(ap[0], ap[1])
rap = (ap[0]/2.,ap[1]/2.)
else:
print 'Aperture radius: %.2f pix\n' %(ap)
rap = (ap, ap)
while icount < NSIM:
#### Random coordinate
xc = np.random.rand()*(img_shape[1]-4*rap[0])+2*rap[0]
yc = np.random.rand()*(img_shape[0]-4*rap[1])+2*rap[1]
#### Quick test to see if the coordinate is within an object or
#### where weight is zero
if (seg[int(yc), int(xc)] != 0) | (img_wht[int(yc), int(xc)] <= 0) | (img_data[int(yc), int(xc)] == 0):
continue
#### Shapely point + buffer to define the circular aperture
if IS_RECTANGLE:
aperture_polygon = Polygon(((xc-ap[0]/2.,yc-ap[1]/2.), (xc+ap[0]/2.,yc-ap[1]/2.), (xc+ap[0]/2.,yc+ap[1]/2.), (xc-ap[0]/2.,yc+ap[1]/2.)))
else:
point = Point(xc, yc)
aperture_polygon = point.buffer(ap, resolution=16)
#### initialize the aperture
aper*=0
#### Loop through pixels to compute fractional pixel coverage within
#### the circular aperture using the intersection of Shapely polygons
smax = 0
wmin = 1.e10
for i in range(int(np.floor(xc-rap[0])),int(np.ceil(xc+rap[0]))):
for j in range(int(np.floor(yc-rap[1])),int(np.ceil(yc+rap[1]))):
pix = Polygon(((i+0.5,j+0.5), (i+1.5,j+0.5), (i+1.5,j+1.5), (i+0.5,j+1.5)))
isect = pix.intersection(aperture_polygon)
aper[j,i] = isect.area
if isect.area > 0:
smax = np.array([smax, seg[j,i]]).max()
wmin = np.array([wmin, img_wht[j,i]]).min()
#### Only keep the result if the aperture doesn't intersect with an object
#### as defined in the segmention image and if all weights within the
#### aperture are greater than zero
if (smax == 0) & (wmin > 0):
fluxes[icount, iap] = (aper*img_data).sum()
centers[icount, iap, : ] = np.array([xc, yc])
#aper_image += aper
print unicorn.noNewLine+'%d' %(icount)
icount += 1
else:
print unicorn.noNewLine+'Skip: %f %f' %((seg*aper).max(), (img_wht*aper).min())
continue
#### Make the output FITS file. List of aperture radii in extension 1, aperture
#### fluxes in extension 2.
ap_head = pyfits.Header()
ap_head.update('NSIM',NSIM, comment='Number of apertures')
ap_head.update('SCI_IMG',SCI_IMAGE, comment='Science image')
ap_head.update('SCI_EXT',SCI_EXT, comment='Science extension')
ap_head.update('WHT_IMG',WHT_IMAGE, comment='Weight image')
ap_head.update('WHT_EXT',WHT_EXT, comment='Weight extension')
prim = pyfits.PrimaryHDU(header=ap_head)
ap_hdu = pyfits.ImageHDU(data=np.array(apertures))
fl_hdu = pyfits.ImageHDU(data=fluxes)
ce_hdu = pyfits.ImageHDU(data=centers)
pyfits.HDUList([prim, ap_hdu, fl_hdu, ce_hdu]).writeto('%s_empty.fits' %(ROOT), clobber='True')
if make_plot is True:
make_empty_apertures_plot(empty_file='%s_empty.fits' %(ROOT), ZP=ZP)
def make_empty_apertures_plot(empty_file='PRIMO_F125W_drz_empty.fits', ZP=26.25, NSIG=5):
"""
Plot the results from the `empty_apertures` routine.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy import polyfit, polyval
import threedhst
import unicorn
im = pyfits.open(empty_file)
apertures = im[1].data
fluxes = im[2].data
ROOT = empty_file.split('_empty')[0]
sigma = apertures*0.
means = apertures*0.
for iap, ap in enumerate(apertures):
sigma[iap] = threedhst.utils.biweight(fluxes[:,iap])
means[iap] = threedhst.utils.biweight(fluxes[:,iap], mean=True)
#plt.plot(apertures, means/np.pi/apertures**2)
#plt.ylim(0,1.5*np.max(means/np.pi/apertures**2))
#threedhst.utils.biweight(img_data[(seg == 0) & (img_wht > 0)], both=True)
fig = unicorn.catalogs.plot_init(xs=6, aspect=0.5, left=0.12)
fig.subplots_adjust(wspace=0.24,hspace=0.0,left=0.095,
bottom=0.17,right=0.97,top=0.97)
ax = fig.add_subplot(121)
################################## plot sigma vs radius
ax.plot(apertures, sigma, marker='o', linestyle='None', color='black', alpha=0.8, markersize=5)
coeffs = polyfit(np.log10(apertures), np.log10(sigma), 1)
ax.plot(apertures, 10**polyval(coeffs, np.log10(apertures)), color='red')
xint = 1
y2 = apertures**2
y2 = y2*np.interp(xint,apertures,sigma) / np.interp(xint,apertures,y2)
ax.plot(apertures, y2, linestyle='--', color='black', alpha=0.3)
y1 = apertures**1
y1 = y1*np.interp(xint,apertures,sigma) / np.interp(xint,apertures,y1)
ax.plot(apertures, y1, linestyle='--', color='black', alpha=0.3)
ax.set_xlabel(r'$R_\mathrm{aper}$ [pix]')
ax.set_ylabel(r'$\sigma_\mathrm{biw}$')
#ax.text(apertures.max(), 0.1*sigma.max(), r'$\beta=%.2f$' %(coeffs[0]), horizontalalignment='right')
ax.text(0.08, 0.85, r'$N_\mathrm{ap}=%d$' %(im[0].header['NSIM']), transform=ax.transAxes)
ax.set_ylim(0,2)
################################# Plot AB depth
ax = fig.add_subplot(122)
ax.plot(apertures, ZP-2.5*np.log10(sigma*NSIG), marker='o', linestyle='None', color='black', alpha=0.8, markersize=5)
ax.plot(apertures, ZP-2.5*np.log10(10**polyval(coeffs, np.log10(apertures))*NSIG), color='red')
ax.plot(apertures, ZP-2.5*np.log10(y2*NSIG), linestyle='--', color='black', alpha=0.3)
ax.plot(apertures, ZP-2.5*np.log10(y1*NSIG), linestyle='--', color='black', alpha=0.3)
ax.set_xlabel(r'$R_\mathrm{aper}$ [pix]')
ax.set_ylabel(r'Depth AB mag (%d$\sigma$)' %(NSIG))
ax.text(0.95, 0.9, ROOT, horizontalalignment='right', verticalalignment='top', transform=ax.transAxes)
ax.text(0.95, 0.8, r'$\beta=%.2f$' %(coeffs[0]), horizontalalignment='right', verticalalignment='top', transform=ax.transAxes)
ax.set_ylim(23, 30)
################################## Save the result
outfile = ROOT+'_empty.pdf'
if USE_PLOT_GUI:
fig.savefig(outfile,dpi=100,transparent=False)
else:
canvas = FigureCanvasAgg(fig)
canvas.print_figure(outfile, dpi=100, transparent=False)
print ROOT+'_empty.pdf'
def show_background_flux_distribution():
"""
Extract the SKYSCALE parameter from the G141 FLT images and plot their distribution
by field.
"""
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
# for field in ['AEGIS','COSMOS','GOODS-S','UDS']:
# os.chdir(unicorn.GRISM_HOME+'%s/PREP_FLT' %(field))
# print field
# status = os.system('dfits *flt.fits |fitsort TARGNAME FILTER SKYSCALE |grep G141 > %s_skyscale.dat' %(field))
#
# os.chdir(unicorn.GRISM_HOME)
# status = os.system('cat AEGIS/PREP_FLT/AEGIS_skyscale.dat COSMOS/PREP_FLT/COSMOS_skyscale.dat GOODS-S/PREP_FLT/GOODS-S_skyscale.dat UDS/PREP_FLT/UDS_skyscale.dat > 3DHST_skyscale.dat')
###
os.chdir("/research/HST/GRISM/3DHST/SIMULATIONS")
sky = catIO.Readfile('3DHST_skyscale.dat')
pointings = np.unique(sky.pointing)
colors = {}
colors['UDS'] = 'purple'; colors['COSMOS'] = 'blue'; colors['AEGIS'] = 'red'; colors['GOODS-SOUTH'] = 'green'
off = {}
off['UDS'] = 1
nexp = np.arange(1,5)
fields = []
for pointing in pointings:
this = sky.pointing == pointing
field = '-'.join(pointing.split('-')[:-1])
fields.extend([field]*4)
#print pointing, field
#p = plt.plot(sky.skyscale[this], nexp, marker='o', linestyle='-', alpha=0.5, color=colors[field])
fields = np.array(fields)
plt.ylim(0,5)
fig = unicorn.catalogs.plot_init(xs=3.8, left=0.06, bottom=0.08, use_tex=True, fontsize=10)
ax = fig.add_subplot(111)
for i, field in enumerate(np.unique(fields)[::-1]):
this = fields == field
yh, xh = np.histogram(sky.skyscale[this], range=(0,4), bins=40)
p = ax.plot(xh[1:], yh*1./yh.max()+i*3.5, marker='None', linestyle='steps-', color=colors[field], alpha=0.5, linewidth=3)
#
pointings = np.unique(sky.pointing[fields == field])
for pointing in pointings:
this = sky.pointing == pointing
p = ax.plot(sky.skyscale[this], np.arange(4)/4.*2+1.5+i*3.5, marker='o', linestyle='-', alpha=0.3, color=colors[field], ms=4)
#
ax.text(0.1, i*3.5+1.5, field.replace('SOUTH','S'), horizontalalignment='left')
ax.set_xlim(0,3.4)
ax.yaxis.set_major_locator(MultipleLocator(3.5))
ax.set_yticklabels([])
ax.set_ylabel('Field')
ax.set_xlabel(r'Background per exposure [e$^-$/ s]')
unicorn.catalogs.savefig(fig, 'pointing_backgrounds.pdf')
def eqw_as_fn_mag():
"""
For a given line flux, plot the equivalent width
as a function of magnitude to show the equivalent width sensitivity.
"""
lam = np.arange(1.e4,1.8e4,0.1)
l0 = 1.4e4
dv = 120 # km/s
line = 1./np.sqrt(2*np.pi*(dv/3.e5*l0)**2)*np.exp(-(lam-l0)**2/2/(dv/3.e5*l0)**2)
continuum = lam*0.+line.max()*0.1
line_fnu = line*lam**2/3.e18
continuum_fnu = continuum*lam**2/3.e18
xfilt, yfilt = np.loadtxt(os.getenv('iref')+'/F140W.dat', unpack=True)
yfilt_int = np.interp(lam, xfilt, yfilt) #/np.trapz(yfilt, xfilt)
## filter width
piv = np.sqrt(np.trapz(yfilt*xfilt, xfilt)/np.trapz(yfilt/xfilt, xfilt))
INT, SQRT, LAM, THRU, LN = np.trapz, np.sqrt, xfilt, yfilt, np.log
BARLAM = INT(THRU * LN(LAM) / LAM, LAM) / INT(THRU / LAM, LAM)
BANDW = BARLAM * SQRT(INT(THRU * LN(LAM / BARLAM)**2 / LAM, LAM)) / INT(THRU / LAM, LAM)
barlam = np.trapz(yfilt*np.log(xfilt)/xfilt, xfilt) / np.trapz(yfilt/xfilt, xfilt)
bandw = barlam*np.sqrt(np.trapz(yfilt*np.log(xfilt/barlam)**2/xfilt, xfilt)) / np.trapz(yfilt/xfilt, xfilt)
nu = 3.e8/(lam*1.e-10)
bigL = -np.trapz(line_fnu*yfilt_int, nu)
bigC = -np.trapz(continuum_fnu*yfilt_int, nu)
bigF = -np.trapz(yfilt_int, nu)
bigW = np.trapz(line/continuum, lam)
xfilt_125, yfilt_125 = np.loadtxt(os.getenv('iref')+'/F125W.dat', unpack=True)
yfilt_int_125 = np.interp(lam, xfilt_125, yfilt_125) #/np.trapz(yfilt, xfilt)
bigL_125 = -np.trapz(line_fnu*yfilt_int_125, nu)
bigC_125 = -np.trapz(continuum_fnu*yfilt_int_125, nu)
bigF_125 = -np.trapz(yfilt_int_125, nu)
# integrated line flux
alpha = 5.e-17
### plot trend
mag = np.arange(20, 25.5, 0.05)
fnu = 10**(-0.4*(mag+48.6))
EQW = bigW*bigC / (bigF/alpha*fnu-bigL)
#plt.plot(mag, EQW)
#
# EQW2 = bigW*bigC*alpha/bigF / (fnu-alpha/bigF*bigL)
# plt.plot(mag, EQW2)
#
# EQW3 = 3.19e-27 / (fnu - 8.04e-31)
# plt.plot(mag, EQW3)
#
# EQW4 = 3.19e-27 / (10**(-0.4*mag)*3.63e-20 - 8.04e-31)
# plt.plot(mag, EQW4)
#
# EQW5 = 8.78e-8*(alpha/5.e-17) / (10**(-0.4*mag)-2.21e-11*(alpha/5.e-17))
# plt.plot(mag, EQW5)
#
# m0 = 23
# EQW6 = 8.78e-8*(alpha/5.e-17) / (10**(-0.4*m0)-2.21e-11*(alpha/5.e-17))
#### above equation reaches a limit when continuum = 0, mag can't be less than
#### that of just the integrated line
line_only = (line+continuum*0)
line_only_fnu = line_only*lam**2/3.e18
fnu_filt = np.trapz(line_only_fnu*yfilt_int, nu) / np.trapz(yfilt_int, nu)
mag_limit = -2.5*np.log10(alpha*fnu_filt)-48.6
mag_limit2 = -2.5*np.log10(alpha/5.e-17)-2.5*np.log10(5.e-17)-2.5*np.log10(fnu_filt)-48.6
mag_limit3 = -2.5*np.log10(alpha/5.e-17)+26.64
### test if test case comes out right
spec_obs = alpha*(line+continuum)
spec_obs_fnu = spec_obs*lam**2/3.e18
fnu_filt = np.trapz(spec_obs_fnu*yfilt_int, nu) / np.trapz(yfilt_int, nu)
mag_obs = -2.5*np.log10(fnu_filt)-48.6
eqw_obs = np.trapz(line/continuum, lam)
#plt.plot([mag_obs,mag_obs], [eqw_obs,eqw_obs], marker='o', ms=15)
#### Make figure
mag = np.arange(20, 26, 0.1)
fnu = 10**(-0.4*(mag+48.6))
#fig = unicorn.catalogs.plot_init(xs=3.8, left=0.10, bottom=0.08, use_tex=True, square=True, fontsize=11)
fig = unicorn.catalogs.plot_init(left=0.11, bottom=0.08, xs=3.8, right=0.09, top=0.01, use_tex=True)
ax = fig.add_subplot(111)
lst = ['-.','-','--',':']
#### Show Arjen's sample
elg = catIO.Readfile('elg_mag.txt')
elg.oiii = (5+15)*1./(5+15+23)*elg.ew_obs_tot
elg.avmag = -2.5*np.log10(0.5*10**(-0.4*elg.j)+0.5*10**(-0.4*elg.h))
ax.scatter(elg.j, elg.oiii, alpha=0.4, s=10, color='red', label=r'van der Wel et al. 2011 ($J_{125}$)')
EQW_125 = bigW*bigC_125 / (bigF_125/5.e-17*fnu-bigL_125)
ax.plot(mag, EQW_125, color='red', alpha=0.5)
for ii, limit in enumerate([1.e-17, 3.e-17, 5.e-17, 1.e-16][::-1]):
EQW = bigW*bigC / (bigF/limit*fnu-bigL)
ll = np.log10(limit)
l0 = np.round(10**(ll-np.floor(ll)))
l10 = np.floor(ll)
ax.plot(mag, EQW, label=r'$f_{\lambda,\mathrm{line}} = %0d\times10^{%0d}$' %(l0,l10), linestyle=lst[ii], color='black')
ax.semilogy()
ax.set_xlabel(r'$m_{140}$')
ax.set_ylabel(r'Equivalent width (\AA)')
ax.set_ylim(1,1.e4)
ax.set_xlim(20,26)
#8.78e-8*(alpha/5.e-17) / (10**(-0.4*mag)-2.21e-11*(alpha/5.e-17))
ax.text(23,1.7,r'$\mathrm{EQW} = \frac{8.78\times10^{-8}\left(f_\mathrm{line}/5\times10^{-17}\right)}{10^{-0.4\,m_{140}}-2.21\times10^{-11}\left(f_\mathrm{line}/5\times10^{-17}\right)}$', horizontalalignment='center')
ax.legend(prop=matplotlib.font_manager.FontProperties(size=8), loc=2, bbox_to_anchor=(0.02,0.98), frameon=False)
unicorn.catalogs.savefig(fig, 'eqw_as_fn_mag.pdf')
#### compute where poisson error of source counts (1.4 um) is similar to sky background error
sky, texp = 1.6, 1200.
var = sky*texp + 20**2
bg_error = np.sqrt(var)/1200.
sens = np.interp(1.4e4, unicorn.reduce.sens_files['A'].field('WAVELENGTH'), unicorn.reduce.sens_files['A'].field('SENSITIVITY'))*46.5
mag = np.arange(14,25,0.1)
fnu = 10**(-0.4*(mag+48.6))
ctrate = fnu*3.e18/1.4e4**2*sens
re = 3 # pix
peak = 1./np.sqrt(2*np.pi*re**2)
poisson = np.sqrt(ctrate*peak*texp)/texp
m_crit = np.interp(bg_error, poisson[::-1], mag[::-1])
#### SIMULATIONS
stats = catIO.Readfile('all_simspec.dat')
plt.scatter(stats.mag, stats.ha_eqw, marker='s', alpha=0.1, s=4)
def make_star_thumbnails():
"""
Extract thumbnails for isolated stars in COSMOS
"""
os.chdir(unicorn.GRISM_HOME+'ANALYSIS/SURVEY_PAPER')
######### Make full COSMOS catalog
file=unicorn.GRISM_HOME+'COSMOS/PREP_FLT/COSMOS-F140W_drz.fits'
ROOT_GRISM = os.path.basename(file).split('_drz.fits')[0]
se = threedhst.sex.SExtractor()
se.aXeParams()
se.copyConvFile()
se.overwrite = True
se.options['CATALOG_NAME'] = ROOT_GRISM+'_drz.cat'
se.options['CHECKIMAGE_NAME'] = ROOT_GRISM+'_seg.fits'
se.options['CHECKIMAGE_TYPE'] = 'SEGMENTATION'
se.options['WEIGHT_TYPE'] = 'MAP_WEIGHT'
se.options['WEIGHT_IMAGE'] = file+'[1]'
se.options['FILTER'] = 'Y'
se.options['DETECT_THRESH'] = '1.4'
se.options['ANALYSIS_THRESH'] = '1.4'
se.options['MAG_ZEROPOINT'] = '26.46'
status = se.sextractImage(file+'[0]', mode='direct')
cat = threedhst.sex.mySexCat('COSMOS-F140W_drz.cat')
mag, radius = np.cast[float](cat.MAG_AUTO), np.cast[float](cat.FLUX_RADIUS)
xpix, ypix = np.cast[float](cat.X_IMAGE), np.cast[float](cat.Y_IMAGE)
ra, dec = np.cast[float](cat.X_WORLD), np.cast[float](cat.Y_WORLD)
#### Find isolated point sources
points = (mag > 17) & (mag < 22) & (radius < 2.7)
# plt.plot(mag, radius, marker='o', linestyle='None', alpha=0.5, color='blue')
# plt.plot(mag[points], radius[points], marker='o', linestyle='None', alpha=0.8, color='red')
# plt.ylim(0,20)
# plt.xlim(14,26)
idx = np.arange(len(points))
isolated = mag > 1.e10
buff = 3 ## buffer, in arcsec
dmag = 2.5
scale = 0.06
for i in idx[points]:
dr = np.sqrt((xpix[i]-xpix)**2+(ypix[i]-ypix)**2)*scale
near = (dr > 0) & (dr < buff) & (mag < (mag[i]+dmag))
if len(near[near]) == 0:
isolated[i] = True
else:
isolated[i] = False
#### Make thumbnails
img = pyfits.open(unicorn.GRISM_HOME+'COSMOS/PREP_FLT/COSMOS-F140W_drz.fits')
img_data = img[1].data
img_wht = img[2].data
NPIX = int(np.ceil(buff/scale))
prim = pyfits.PrimaryHDU()
list_d = [prim]
list_w = [prim]
head = img[1].header
head['CRPIX1'], head['CRPIX2'] = NPIX, NPIX
for i in idx[points & isolated]:
print unicorn.noNewLine+'%d' %(i)
id = np.int(cat.NUMBER[i])
xi, yi = int(np.round(xpix[i])), int(np.round(ypix[i]))
sub_data = img_data[yi-NPIX:yi+NPIX, xi-NPIX: xi+NPIX]
sub_wht = img_wht[yi-NPIX:yi+NPIX, xi-NPIX: xi+NPIX]
#
head['CRVAL1'], head['CRVAL2'] = ra[i], dec[i]
head.update('MAG',mag[i])
head.update('RADIUS',radius[i])
head.update('XCENTER',xpix[i]-xi+NPIX)
head.update('YCENTER',ypix[i]-yi+NPIX)
#
list_d.append(pyfits.ImageHDU(sub_data, header=head))
list_w.append(pyfits.ImageHDU(sub_wht, header=head))
pyfits.HDUList(list_d).writeto('stars_sci.fits', clobber=True)
pyfits.HDUList(list_w).writeto('stars_wht.fits', clobber=True)
def curve_of_growth():
"""
Some code to evaluate the curve of growth of the F140W PSF, the
optimal aperture taken to be the ratio of the CoG divided by the empty aperture
sigmas, and the overall depth within some aperture.
"""
import threedhst
import unicorn
sci = pyfits.open('stars_sci.fits')
wht = pyfits.open('stars_wht.fits')
apers = np.arange(1,25,0.5)
lstep = np.arange(0, np.log10(25), 0.05)
apers = 10**lstep
NOBJ = len(sci)-1
count = 0
average_fluxes = apers*0.
stack = sci[1].data*0.
for i in range(NOBJ):
print unicorn.noNewLine+'%d' %(i)
star = sci[i+1].data
yy, xx = np.indices(star.shape)
center = (np.abs(xx-50) < 5) & (np.abs(yy-50) < 5)
xc = np.sum((star*xx)[center])/np.sum(star[center])
yc = np.sum((star*yy)[center])/np.sum(star[center])
#xc, yc = sci[i+1].header['XCENTER'], sci[i+1].header['YCENTER']
#
bg = threedhst.utils.biweight(star, both=True)
bg = threedhst.utils.biweight(star[star < (bg[0]+4*bg[1])], both=True)
star = star-bg[0]
stack = stack + star
#
NAP = len(apers)
fluxes = np.zeros(apers.shape)
for i in range(NAP):
#print unicorn.noNewLine+'%.2f' %(apers[i])
fluxes[i] = unicorn.survey_paper.aper_phot(star, xc, yc, apers[i])
#
pp = plt.plot(apers, fluxes/fluxes[20], alpha=0.2, color='blue')
average_fluxes += fluxes/fluxes[20]
count = count + 1
stack = stack / count
stack_fluxes = np.zeros(apers.shape)
for i in range(NAP):
print unicorn.noNewLine+'%.2f' %(apers[i])
stack_fluxes[i] = unicorn.survey_paper.aper_phot(star, xc, yc, apers[i])
plt.xlabel(r'$R_\mathrm{aper}$')
plt.ylabel(r'$f/f_{10}$')
plt.text(15,0.4,'$N=%d$' %(count))
plt.savefig('curve_of_growth.pdf')
plt.close()
#plt.plot(apers, average_fluxes/count, color='black', linewidth=2)
#plt.plot(apers, stack_fluxes/stack_fluxes[20], color='red', linewidth=2)
# plt.plot(apers, average_fluxes/count / (stack_fluxes/stack_fluxes[20]))
fp = open('curve_of_growth.dat','w')
for i in range(len(apers)):
fp.write('%.2f %.3f\n' %(apers[i], (average_fluxes/count)[i]))
fp.close()
#### optimal color aperture:
empty = pyfits.open('../../EMPTY_APERTURES/COSMOS-1-F140W_drz_empty.fits')
files = glob.glob('../../EMPTY_APERTURES/*empty.fits')
for file in files:
empty = pyfits.open(file)
apertures = empty[1].data
fluxes = empty[2].data
#
sigma = apertures*0.
means = apertures*0.
for iap, ap in enumerate(apertures):
sigma[iap] = threedhst.utils.biweight(fluxes[:,iap])
means[iap] = threedhst.utils.biweight(fluxes[:,iap], mean=True)
#
ycog = np.interp(apertures, apers, (average_fluxes/count))
pp = plt.plot(apertures, ycog/(sigma/np.interp(6, apertures, sigma)), alpha=0.5)
plt.xlabel(r'$R_\mathrm{aper}$')
plt.ylabel(r'CoG / $\sigma$')
plt.plot(apertures, ycog/ycog.max(), color='black', linewidth=2)
plt.plot(apertures, (sigma/np.interp(6, apertures, sigma))*0.3, color='black', alpha=0.4, linewidth=2)
plt.savefig('optimal_aperture.pdf', dpi=100)
plt.close()
#### Actual calculation of the depth
from scipy import polyfit, polyval
APER = 0.5 # arcsec, diameter
ycog = average_fluxes/count
ycog = ycog / ycog.max()
print 'Aperture, D=%.2f"' %(APER)
files = glob.glob('../../EMPTY_APERTURES/[CG]*empty.fits')
for file in files:
empty = pyfits.open(file)
apertures = empty[1].data
fluxes = empty[2].data
#
sigma = apertures*0.
means = apertures*0.
for iap, ap in enumerate(apertures):
sigma[iap] = threedhst.utils.biweight(fluxes[:,iap])
means[iap] = threedhst.utils.biweight(fluxes[:,iap], mean=True)
#
#plt.plot(apertures, 26.46-2.5*np.log10(5*sigma), marker='o', color='black', linestyle='None')
coeffs = polyfit(np.log10(apertures), np.log10(sigma), 1)
yfit = 10**polyval(coeffs, np.log10(apertures))
pp = plt.plot(apertures, 26.46-2.5*np.log10(5*yfit), color='red')
#
apcorr = -2.5*np.log10(np.interp(APER/0.06/2, apers, ycog))
#
sig_at_aper = 10**polyval(coeffs, np.log10(APER/0.06/2))
depth = 26.46-2.5*np.log10(5*sig_at_aper)-apcorr
print '%s - %.2f' %(os.path.basename(file).split('-F14')[0], depth)
plt.ylim(23,30)
def aper_phot(array, xc, yc, aper_radius):
"""
Aperture photometry on an array
"""
from shapely.geometry import Point, Polygon
point = Point(xc, yc)
buff = point.buffer(aper_radius, resolution=16)
#### Make the aperture
im_aper = array*0.
yy, xx = np.indices(array.shape)
dr = np.sqrt((xx-xc)**2+(yy-yc)**2)
#### these are obviously in the aperture
solid = dr < (aper_radius-1.5)
im_aper[solid] = 1.
#### This is the edge
edge = (dr <= (aper_radius+1.5)) & (dr >= (aper_radius-1.5))
# for i in range(int(np.floor(xc-aper_radius)),int(np.ceil(xc+aper_radius))):
# for j in range(int(np.floor(yc-aper_radius)),int(np.ceil(yc+aper_radius))):
for i, j in zip(xx[edge], yy[edge]):
pix = Polygon(((i+0.5,j+0.5), (i+1.5,j+0.5), (i+1.5,j+1.5), (i+0.5,j+1.5)))
isect = pix.intersection(buff)
im_aper[j,i] = isect.area
return np.sum(array*im_aper)
def make_examples():
import unicorn
unicorn.survey_paper.redshift_fit_example(id='GOODS-N-33-G141_00946')
unicorn.survey_paper.redshift_fit_example(id='GOODS-N-17-G141_00573')
unicorn.survey_paper.redshift_fit_example(id='GOODS-N-33-G141_01028')
unicorn.survey_paper.redshift_fit_example(id='COSMOS-1-G141_00252')
unicorn.survey_paper.redshift_fit_example(id='AEGIS-4-G141_00266')
unicorn.survey_paper.redshift_fit_example(id='COSMOS-5-G141_00751')
unicorn.survey_paper.redshift_fit_example(id='PRIMO-1101-G141_01022')
unicorn.survey_paper.redshift_fit_example(id='GOODS-S-24-G141_00029')
#### Examples
unicorn.survey_paper.redshift_fit_example(id='COSMOS-14-G141_00100')
unicorn.survey_paper.redshift_fit_example(id='COSMOS-18-G141_00485')
import unicorn
import unicorn.catalogs
unicorn.catalogs.read_catalogs()
from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit
mat = lines.id == 'COSMOS-14-G141_00100'
print lines.id[mat][0], lines.halpha_eqw[mat][0], lines.halpha_eqw_err[mat][0], lines.halpha_flux[mat][0]
mat = lines.id == 'COSMOS-18-G141_00485'
print lines.id[mat][0], lines.halpha_eqw[mat][0], lines.halpha_eqw_err[mat][0], lines.halpha_flux[mat][0]
def redshift_fit_example(id='COSMOS-18-G141_00485', force=False):
"""
Make a big plot showing how the whole redshift fitting works
"""
#id = 'COSMOS-14-G141_00100' ### aligned along dispersion axis, weak line
#id = 'GOODS-N-33-G141_00946' ### classic spiral
#id = 'GOODS-N-17-G141_00573'
#id = 'COSMOS-18-G141_00485' ### asymmetric line, spiral
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/EXAMPLE_FITS')
#### Get the necessary files from unicorn
if (not os.path.exists('%s_thumb.fits.gz' %(id))) | force:
os.system('wget http://3dhst:[email protected]/P/GRISM_v1.6/images/%s_thumb.fits.gz' %(id))
os.system('wget http://3dhst:[email protected]/P/GRISM_v1.6/images/%s_2D.fits.gz' %(id))
os.system('wget http://3dhst:[email protected]/P/GRISM_v1.6/ascii/%s.dat' %(id))
os.system('rsync -avz --progress $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS/OUTPUT/%s* OUTPUT/ ' %(id))
os.system('rsync -avz --progress $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS/%s* ./ ' %(id))
zo = threedhst.catIO.Readfile('OUTPUT/%s.zout' %(id))
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = plt.figure(figsize=(6,6))
dsep = 0.05
xsep = 0.6
left = 0.085
bottom = 0.07
spec_color = 'purple'
dy2d = 0.13
#spec_color = 'blue'
spec_color = (8/255.,47/255.,101/255.)
spec_color = 'red'
phot_color = 'orange'
#phot_color = (78/255.,97/255.,131/255.)
#phot_color = '0.7'
spec_color = 'black'
phot_color = '0.7'
temp_color = (8/255.,47/255.,101/255.)
temp_color = 'red'
########### Full spectrum
ax = fig.add_axes((left, 0.5+bottom+dy2d, 0.99-left-(1-xsep), 0.49-bottom-dy2d))
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='%s' %(id), OUTPUT_DIRECTORY='OUTPUT', CACHE_FILE = 'Same')
tempfilt, coeffs, temp_seds, pz = eazy.readEazyBinary(MAIN_OUTPUT_FILE=id, OUTPUT_DIRECTORY='OUTPUT', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-19*(lci/5500.)**2
temp_convert = 10**(-0.4*(25+48.6))*3.e18/lambdaz**2/10.**-19*(lambdaz/5500.)**2
fobs, efobs, obs_sed, temp_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert, temp_sed*temp_convert
ymax = max(fobs[is_spec & (fobs > 0)])
ax.semilogx([1],[1])
## photometry
ax.plot(lci[~is_spec], obs_sed[~is_spec], marker='o', color='black', linestyle='None', markersize=6, alpha=0.2)
## best-fit SED
## Spectrum + convolved fit
#ax.plot(lci[is_spec], obs_sed[is_spec], color='black', markersize=6, alpha=0.7, linewidth=1)
ax.plot(lci[is_spec], fobs[is_spec], marker='None', alpha=0.8, color=spec_color, linewidth=2)
ax.plot(lambdaz, temp_sed, color='white', linewidth=3, alpha=0.6)
ax.plot(lambdaz, temp_sed, color=temp_color, alpha=0.6)
ax.errorbar(lci[~is_spec], fobs[~is_spec], efobs[~is_spec], marker='o', linestyle='None', alpha=0.6, color=phot_color, markersize=10)
ax.set_yticklabels([])
ax.set_ylabel(r'$f_\lambda$')
ax.set_xlabel(r'$\lambda$')
xtick = ax.set_xticks(np.array([0.5, 1., 2, 4])*1.e4)
ax.set_xticklabels(np.array([0.5, 1., 2, 4]))
#ax.set_xlim(3000,9.e4)
ax.set_xlim(3290,2.5e4)
ax.set_ylim(-0.1*ymax, 1.2*ymax)
############# Sub spectrum
ax = fig.add_axes((left, bottom, 0.99-left, 0.49-bottom))
obs_sed_continuum = np.dot(tempfilt['tempfilt'][:,0:7,coeffs['izbest'][0]],coeffs['coeffs'][0:7,0])/(lci/5500.)**2*obs_convert
temp_sed_continuum = np.dot(temp_seds['temp_seds'][:,0:7],coeffs['coeffs'][0:7,0])/(1+zo.z_peak[0])**2*temp_convert
ymax = max(fobs[is_spec & (fobs > 0)]-obs_sed_continuum[is_spec & (fobs > 0)])
#ymin = min(fobs[is_spec & (fobs > 0)])
# ax.semilogx([1],[1])
## photometry
ax.plot(lci[~is_spec], obs_sed[~is_spec]-obs_sed_continuum[~is_spec], marker='o', color='black', linestyle='None', markersize=6, alpha=0.2, zorder=10)
## best-fit SED
ax.plot(lci[is_spec], fobs[is_spec]-obs_sed_continuum[is_spec], marker='None', alpha=0.8, color=spec_color, linewidth=2, zorder=10)
ax.plot(lambdaz, temp_sed-temp_sed_continuum, color=temp_color, alpha=0.3, zorder=10)
## Spectrum + convolved fit
ax.plot(lci[is_spec], obs_sed[is_spec]-obs_sed_continuum[is_spec], color='white', markersize=6, alpha=0.7, linewidth=4, zorder=10)
ax.plot(lci[is_spec], obs_sed[is_spec]-obs_sed_continuum[is_spec], color=temp_color, markersize=6, alpha=0.7, linewidth=1, zorder=10)
#ax.plot(lci[is_spec], obs_sed_continuum[is_spec]-obs_sed_continuum[is_spec], color='black', markersize=6, alpha=0.3, linewidth=2)
ax.errorbar(lci[~is_spec], fobs[~is_spec]-obs_sed_continuum[~is_spec], efobs[~is_spec], marker='o', linestyle='None', alpha=0.6, color=phot_color, markersize=10)
#ax.set_yticklabels([])
#ax.set_ylabel(r'$f_\lambda-\ \mathrm{continuum}$')
ax.set_ylabel(r'$f_\lambda - f_{\lambda,\ \mathrm{cont.}}\ [10^{-19}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$')
ax.set_xlabel(r'$\lambda\ [\mu\mathrm{m}]$')
xtick = ax.set_xticks(np.array([1.2, 1.4,1.6])*1.e4)
ax.set_xticklabels(np.array([1.2, 1.4,1.6]))
#ax.set_xlim(3000,9.e4)
ax.set_xlim(1.05e4,1.7e4)
ax.set_ylim(-0.2*ymax, 1.2*ymax)
########### p(z)
ax = fig.add_axes((xsep+left, 0.5+bottom+dy2d, 0.99-left-xsep, 0.49-bottom-dy2d))
colors = [spec_color,phot_color,'blue']
alpha = [0.5, 0.5, 0.2]
zmin = 4
zmax = 0
ymax = 0
for i in range(2):
zgrid, pz = eazy.getEazyPz(i, MAIN_OUTPUT_FILE='%s' %(id),
OUTPUT_DIRECTORY='./OUTPUT',
CACHE_FILE='Same')
ax.fill_between(zgrid, pz, pz*0., color=colors[i], alpha=alpha[i], edgecolor=colors[i])
ax.fill_between(zgrid, pz, pz*0., color=colors[i], alpha=alpha[i], edgecolor=colors[i])
#
if pz.max() > ymax:
ymax = pz.max()
#
if zgrid[pz > 1.e-3].min() < zmin:
zmin = zgrid[pz > 1.e-2].min()
#
if zgrid[pz > 1.e-6].max() > zmax:
zmax = zgrid[pz > 1.e-2].max()
ax.plot(zo.z_spec[0]*np.array([1,1]),[0,1.e4], color='green', linewidth=1)
ax.set_yticklabels([])
ax.set_xlabel(r'$z$')
ax.set_ylabel(r'$p(z)$')
ax.xaxis.set_major_locator(unicorn.analysis.MyLocator(4, prune='both'))
### Plot labels
#ax.text(0.5, 0.9, '%s' %(id), transform = ax.transAxes, horizontalalignment='center')
xtxt, align = 0.95,'right'
xtxt, align = 0.5,'right'
fs, dyt = 9, 0.1
fs, dyt = 10,0.13
ax.text(xtxt, 0.8, r'$z_\mathrm{phot}=$'+'%5.3f' %(zo.z_peak[1]), transform = ax.transAxes, horizontalalignment=align, fontsize=fs)
ax.text(xtxt, 0.8-dyt, r'$z_\mathrm{gris}=$'+'%5.3f' %(zo.z_peak[0]), transform = ax.transAxes, horizontalalignment=align, fontsize=fs)
if zo.z_spec[0] > 0:
ax.text(xtxt, 0.8-2*dyt, r'$z_\mathrm{spec}=$'+'%5.3f' %(zo.z_spec[0]), transform = ax.transAxes, horizontalalignment=align, fontsize=fs)
ax.set_xlim(zmin, zmax)
#ax.set_xlim(zgrid.min(), zgrid.max())
ax.set_ylim(0,1.1*ymax)
#################### 2D spectrum
thumb = pyfits.open('%s_thumb.fits.gz' %(id))
thumb_data = thumb[0].data
#thumb_data[10,:] = 1000
profile = np.sum(thumb_data, axis=1)
NSUB = int(np.round(0.5*thumb_data.shape[0]))/2
yc = thumb_data.shape[0]/2
dx = NSUB*2*22/(ax.get_xlim()[1]-ax.get_xlim()[0])*(0.98-left)
dx = dy2d
ax = fig.add_axes((left, 0.49, 0.99-left, dy2d))
#ax.errorbar(lci[~is_spec], fobs[~is_spec]-obs_sed_continuum[~is_spec], efobs[~is_spec], marker='o', linestyle='None', alpha=0.6, color=phot_color, markersize=10)
twod_file = '%s_2D.fits.gz' %(id)
twod = pyfits.open(twod_file)
spec2d = twod[1].data-twod[4].data
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(np.array([1.05e4,1.7e4]), lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx)-np.interp(1.05e4, lam, lam_idx)
spec2d_sub = spec2d[yc-NSUB:yc+NSUB,lam_mima[0]:lam_mima[1]]
ax.imshow(0-spec2d_sub, aspect='auto', vmin=-0.1, vmax=0.01, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks(tick_int); ytick = ax.set_yticks([0,2*NSUB])
########### Thumbnail
#ax = fig.add_axes((left+left*0.3, 0.49-dx-left*0.3, dx, dx))
ax = fig.add_axes((left, 0.49, dx, dx))
#ax.imshow(thumb_data[yc-NSUB:yc+NSUB, yc-NSUB:yc+NSUB], vmin=-0.05, vmax=0.5, interpolation='nearest')
ax.imshow(0-thumb_data[yc-NSUB:yc+NSUB, yc-NSUB:yc+NSUB], vmin=-0.7, vmax=0.05, interpolation='nearest', zorder=2)
ax.set_yticklabels([])
ax.set_xticklabels([])
#ax = fig.add_axes((left+left*0.3*2+dx, 0.49-dx-left*0.3, dx, dx))
#profile = np.sum(thumb_data[yc-NSUB:yc+NSUB, yc-NSUB:yc+NSUB], axis=0)
#ax.plot(profile/profile.max(), color='black', alpha=0.4)
size = thumb[0].data.shape
twod_file = '%s_2D.fits.gz' %(id)
twod = pyfits.open(twod_file)
model1D = np.matrix(twod[5].data.sum(axis=1))
model1D /= np.max(model1D)
model2D = np.array(np.dot(np.transpose(model1D),np.ones((1,size[0]))))
thumb_data *= model2D
profile = np.sum(thumb_data[yc-NSUB:yc+NSUB, yc-NSUB:yc+NSUB], axis=0)
ax.plot(profile/profile.max()*2*NSUB*0.8, color='black', alpha=0.3, zorder=2)
ax.set_xlim(0,2*NSUB); ax.set_ylim(0,2*NSUB)
ax.set_yticklabels([])
ax.set_xticklabels([])
xtick = ax.set_xticks([0,2*NSUB]); ytick = ax.set_yticks([0,2*NSUB])
fig.savefig('%s_example.pdf' %(id))
def equivalent_width_errors():
"""
Compute the limiting equivalent widths as a function of magnitude or mass
or something
"""
import unicorn
import unicorn.catalogs
unicorn.catalogs.read_catalogs()
from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER')
keep = unicorn.catalogs.run_selection(zmin=0.8, zmax=5.5, fcontam=0.2, qzmin=0., qzmax=0.1, dr=1.0, has_zspec=False, fcovermin=0.9, fcovermax=1.0, massmin=8.5, massmax=15, magmin=17, magmax=23.5)
keep_22 = unicorn.catalogs.run_selection(zmin=0.8, zmax=5.5, fcontam=0.2, qzmin=0., qzmax=0.1, dr=1.0, has_zspec=False, fcovermin=0.9, fcovermax=1.0, massmin=8.5, massmax=15, magmin=21.7, magmax=22.3)
halpha_sn = lines.halpha_eqw / lines.halpha_eqw_err
#halpha_sn[(halpha_sn > 0) & (halpha_sn < 1)] = 2
keep_ha = keep & (halpha_sn[lines.idx] > 0)
oiii_sn = lines.oiii_eqw / lines.oiii_eqw_err
keep_oiii = keep & (oiii_sn[lines.idx] > 0)
#plt.scatter(phot.mag_f1392w[phot.idx][keep], phot.flux_radius[phot.idx][keep], marker='o')
marker_size = phot.flux_radius[phot.idx]**1.5
colors = 'purple'
# colors = (phot.mag_f1392w[phot.idx]-17)
# colors[colors < 0] = 0
# colors[colors > 5] = 5
##### FLux
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=5/4., left=0.12)
fig.subplots_adjust(wspace=0.20,hspace=0.24,left=0.12,
bottom=0.08,right=0.98,top=0.98)
plt.rcParams['patch.edgecolor'] = 'k'
ax = fig.add_subplot(211)
ax.scatter(lines.halpha_flux[lines.idx][keep_ha], halpha_sn[lines.idx][keep_ha], marker='o', c='purple', alpha=0.1, s=marker_size[keep_ha])
ax.scatter(lines.oiii_flux[lines.idx][keep_oiii], oiii_sn[lines.idx][keep_oiii], marker='o', c='orange', alpha=0.1, s=marker_size[keep_oiii])
xm, ym, ys, ns = threedhst.utils.runmed(lines.halpha_flux[lines.idx][keep_ha], halpha_sn[lines.idx][keep_ha], NBIN=20, median=True)
ax.plot(xm, ym, color='white', alpha=0.6, linewidth=4)
ax.plot(xm, ym, color='purple', alpha=0.8, linewidth=3)
xm, ym, ys, ns = threedhst.utils.runmed(lines.oiii_flux[lines.idx][keep_oiii], oiii_sn[lines.idx][keep_oiii], NBIN=20, median=True)
ax.plot(xm[:-1], ym[:-1], color='white', alpha=0.6, linewidth=4)
ax.plot(xm[:-1], ym[:-1], color='orange', alpha=0.8, linewidth=3)
## label
for si in [2,4,8,16]:
ax.scatter(np.array([1,1])*2.e-17, np.array([1,1])*25*si**0.4, s=si**1.5, color='black', alpha=0.2)
ax.text(2.e-17*1.3, 25*si**0.4, '%.1f' %(si*0.06), verticalalignment='center')
nha = len(lines.halpha_flux[lines.idx][keep_ha])
noiii = len(lines.halpha_flux[lines.idx][keep_oiii])
ax.text(2.e-17*1.15, 25*(0.5)**0.4, r'$N_\mathrm{H\alpha}=%d$' %(nha), color='purple', horizontalalignment='center')
ax.text(2.e-17*1.15, 25*(0.5/3)**0.4, r'$N_\mathrm{O III}=%d$' %(noiii), color='orange', horizontalalignment='center')
ax.semilogy()
ax.semilogx()
ax.set_ylim(1,100)
ax.set_xlim(1.e-17,2.e-15)
ax.set_yticklabels([1,3,10,30,100])
ytick = ax.set_yticks([1,3,10,30,100])
ax.set_ylabel('line S / N')
ax.set_xlabel(r'line flux $\mathrm{[ergs\ s^{-1}\ cm^{-2}]}$')
#### EqW
#fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=1, left=0.12)
#plt.rcParams['patch.edgecolor'] = 'k'
ax = fig.add_subplot(212)
marker_size = 10**(-0.4*(18-phot.mag_f1392w[phot.idx]))**0.8
zz = lines.z_grism[lines.idx]*0
zz = lines.z_grism[lines.idx]
ax.scatter(lines.halpha_eqw[lines.idx][keep_ha]*(1+zz[keep_ha]), halpha_sn[lines.idx][keep_ha], marker='o', c='purple', alpha=0.1, s=marker_size[keep_ha])
ax.scatter(lines.oiii_eqw[lines.idx][keep_oiii]*(1+zz[keep_oiii]), oiii_sn[lines.idx][keep_oiii], marker='o', c='orange', alpha=0.1, s=marker_size[keep_oiii])
xm, ym, ys, ns = threedhst.utils.runmed(lines.halpha_eqw[lines.idx][keep_ha]*(1+zz[keep_ha]), halpha_sn[lines.idx][keep_ha], NBIN=20, median=False)
ax.plot(xm, ym, color='white', alpha=0.6, linewidth=4)
ax.plot(xm, ym, color='purple', alpha=0.8, linewidth=3)
xm, ym, ys, ns = threedhst.utils.runmed(lines.oiii_eqw[lines.idx][keep_oiii]*(1+zz[keep_oiii]), oiii_sn[lines.idx][keep_oiii], NBIN=20, median=True)
ax.plot(xm, ym, color='white', alpha=0.6, linewidth=4)
ax.plot(xm, ym, color='orange', alpha=0.8, linewidth=3)
for si, mag in enumerate([19, 21, 23]):
ax.scatter(np.array([1,1])*10, np.array([1,1])*25*(2**(si+1))**0.4, s=10**(-0.4*(18-mag))**0.8, color='black', alpha=0.2)
ax.text(10*1.3, 25*(2**(si+1))**0.4, '%d' %(mag), verticalalignment='center')
ax.semilogy()
ax.semilogx()
ax.set_ylim(1,100)
ax.set_xlim(5,1000)
ax.set_yticklabels([1,3,10,30,100])
ytick = ax.set_yticks([1,3,10,30,100])
ax.set_xticklabels([5,10,100,500])
xtick = ax.set_xticks([5,10,100, 500])
ax.set_ylabel('line S / N')
if plt.rcParams['text.usetex']:
ax.set_xlabel(r'Equivalent width [\AA]')
else:
ax.set_xlabel(r'Equivalent width [$\AA$]')
fig.savefig('equivalent_width_errors.pdf')
plt.rcParams['text.usetex'] = False
def zphot_zspec_plot():
import unicorn
import unicorn.catalogs
import copy
os.chdir(unicorn.GRISM_HOME+'/ANALYSIS/SURVEY_PAPER')
unicorn.catalogs.read_catalogs()
from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit
if unicorn.catalogs.zsp is None:
unicorn.catalogs.make_specz_catalog()
zsp = unicorn.catalogs.zsp
USE_NEW_FITS=True
if USE_NEW_FITS:
##### Refit redshifts gets rid of the offset
zout_new = catIO.Readfile('/research/HST/GRISM/3DHST/UDF/CATALOGS/LINE_TEMPLATES/full_redshift_fixed_noTilt.cat')
#zout_new = catIO.Readfile('/research/HST/GRISM/3DHST/UDF/CATALOGS/LINE_TEMPLATES/full_redshift_origTemp_noTilt.cat')
zout_new = catIO.Readfile('/research/HST/GRISM/3DHST/UDF/CATALOGS/LINE_TEMPLATES/full_redshift_scaleSpecErr3_noTilt.cat')
zout_new = catIO.Readfile('/research/HST/GRISM/3DHST/UDF/CATALOGS/LINE_TEMPLATES/full_redshift_scaleSpecErr2_noTilt.cat')
zout_new = catIO.Readfile('/research/HST/GRISM/3DHST/UDF/CATALOGS/LINE_TEMPLATES/full_redshift_scaleSpecErr2_yesTilt.cat')
refit = zout.id[0::3] == 'x'
refit_idx = zout.z_peak[0::3]*0.
for i in range(len(zout.id[0::3])):
print unicorn.noNewLine+'%d' %(i)
if zout.id[i*3] in zout_new.id:
refit[i] = True
refit_idx[i] = np.where(zout_new.id[0::3] == zout.id[i*3])[0][0]
refit_idx = np.cast[int](refit_idx)
zphot = zout_new.z_peak[0::3][refit_idx]
qz = zout_new.q_z[0::3][refit_idx]
qz2 = zout_new.q_z[2::3][refit_idx]
else:
zphot = zout.z_peak[0::3]
qz = zout.q_z[0::3]
qz2 = zout.q_z[2::3]
maglim = 24
qzmax = 0.2
contam_max = 0.05
stats_zmin = 0.7
keep = (phot.mag_f1392w[phot.idx] < maglim) & (phot.fcontam[phot.idx] < contam_max) & (qz < qzmax) & (phot.fcover[phot.idx] > 0.9) & (mcat.logm[mcat.idx] > 0) & (mcat.rmatch[mcat.idx] < 0.5) & (zsp.zspec[zsp.mat_idx] > 0) & (zsp.dr < 1)
#### Same selection but nothing on specz
keep_nospec = (phot.mag_f1392w[phot.idx] < maglim) & (phot.fcontam[phot.idx] < 0.05) & (qz < qzmax) & (phot.fcover[phot.idx] > 0.9) & (mcat.logm[mcat.idx] > 0) & (mcat.rmatch[mcat.idx] < 0.5) & (zout.z_peak[0::3] > stats_zmin)
keep_nospec_goods = (phot.mag_f1392w[phot.idx] < maglim) & (phot.fcontam[phot.idx] < 0.05) & (qz < qzmax) & (phot.fcover[phot.idx] > 0.9) & (mcat.logm[mcat.idx] > 0) & (mcat.rmatch[mcat.idx] < 0.5) & (zout.z_peak[0::3] > stats_zmin) & ((phot.field[phot.idx] == 'GOODS-N') | (phot.field[phot.idx] == 'GOODS-X'))
keep_hasspec = (phot.mag_f1392w[phot.idx] < maglim) & (phot.fcontam[phot.idx] < 0.05) & (qz < qzmax) & (phot.fcover[phot.idx] > 0.9) & (mcat.logm[mcat.idx] > 0) & (mcat.rmatch[mcat.idx] < 0.5) & (zout.z_peak[0::3] > stats_zmin) & (zsp.zspec[zsp.mat_idx] > 0) & (zsp.dr < 1)
keep_hasspec_goods = (phot.mag_f1392w[phot.idx] < maglim) & (phot.fcontam[phot.idx] < 0.05) & (qz < qzmax) & (phot.fcover[phot.idx] > 0.9) & (mcat.logm[mcat.idx] > 0) & (mcat.rmatch[mcat.idx] < 0.5) & (zout.z_peak[0::3] > stats_zmin) & (zsp.zspec[zsp.mat_idx] > 0) & (zsp.dr < 1) & ((phot.field[phot.idx] == 'GOODS-N') | (phot.field[phot.idx] == 'GOODS-X'))
#### Spectroscopic redshift ratio by field
for field in ['GOODS-N', 'GOODS-S', 'COSMOS', 'AEGIS']:
print '%s %.2f' %(field, len(keep[keep_hasspec & (phot.field[phot.idx] == field)])*1. / len(keep[keep_nospec & (phot.field[phot.idx] == field)]))
print len(keep[keep_hasspec])*1./len(keep[keep_nospec]), len(keep[keep_nospec])
#### Only way to get out a few objects where the photometry wasn't found for the fit
keep = keep & (qz != qz2)
if USE_NEW_FITS:
keep = keep & (refit_idx > 0)
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(left=0.07, xs=3, bottom=0.07)
ax = fig.add_subplot(111)
zsplit = stats_zmin
ms=2
ax.plot(np.log10(1+zsp.zspec[zsp.mat_idx][keep & (zsp.zspec[zsp.mat_idx] > zsplit)]), np.log10(1+zphot[keep & (zsp.zspec[zsp.mat_idx] > zsplit)]), marker='o', linestyle='None', alpha=0.2, color='black', markersize=ms)
ax.plot(np.log10(1+zsp.zspec[zsp.mat_idx][keep & (zsp.zspec[zsp.mat_idx] < zsplit)]), np.log10(1+zphot[keep & (zsp.zspec[zsp.mat_idx] < zsplit)]), marker='o', linestyle='None', alpha=0.2, color='0.9', markersize=ms)
ax.plot([0,5],[0,5], color='white', alpha=0.2, linewidth=3)
#ax.plot([0,5],[0,5], color='black', alpha=0.3, linewidth=1)
zz = np.array([0,4])
ax.plot(np.log10(1+zz), np.log10(1+zz+0.1*(1+zz)), linestyle='--', color='0.8', alpha=0.5)
ax.plot(np.log10(1+zz), np.log10(1+zz-0.1*(1+zz)), linestyle='--', color='0.8', alpha=0.5)
ax.set_xticklabels(['0','1','2','3','4'])
ax.set_xticks(np.log10(1+np.array([0,1,2,3,4])))
ax.set_yticklabels(['0','1','2','3','4'])
ax.set_yticks(np.log10(1+np.array([0,1,2,3,4])))
ax.set_xlim(np.log10(1+0),np.log10(4+1))
ax.set_ylim(np.log10(1+0),np.log10(4+1))
ax.set_xlabel(r'$z_\mathrm{spec}$')
ax.set_ylabel(r'$z_\mathrm{G141+phot}$')
dz = (zphot - zsp.zspec[zsp.mat_idx])/(1+zsp.zspec[zsp.mat_idx])
clip = np.abs(dz) < 0.1
sigma_gt1 = threedhst.utils.nmad(dz[keep & (zout.z_spec[0::3] > 1)])
sigma_gt1_clip = threedhst.utils.nmad(dz[keep & (zout.z_spec[0::3] > 1) & clip])
sigma_gt0_biw = threedhst.utils.biweight(dz[keep & (zout.z_spec[0::3] > stats_zmin)])
sigma_gt0 = threedhst.utils.nmad(dz[keep & (zout.z_spec[0::3] > stats_zmin)])
sigma_gt0_clip = threedhst.utils.nmad(dz[keep & (zout.z_spec[0::3] > stats_zmin) & clip])
NOUT = len(dz[keep & (zout.z_spec[0::3] > stats_zmin) & ~clip])*1./len(dz[keep & (zout.z_spec[0::3] > stats_zmin)])
fs = 9
print sigma_gt0, sigma_gt0_clip, sigma_gt1, sigma_gt1_clip, NOUT
ax.text(0.1,0.9,r'$H_{140} <\ %.1f,\ z_\mathrm{spec} >\ %.1f,\ Q_z <\ %.2f$' %(maglim, stats_zmin, qzmax), transform=ax.transAxes, fontsize=fs)
ax.text(0.1,0.81,r'$N=%d$' %(len(dz[keep & (zout.z_spec[0::3] > stats_zmin)])), transform=ax.transAxes, fontsize=fs)
ax.text(0.1,0.72,r'$\sigma_\mathrm{NMAD}=%.4f$' %(sigma_gt0), transform=ax.transAxes, fontsize=fs)
pct = '\%'
ax.text(0.1,0.63,r'$f_\mathrm{>0.1}=%.1f%s$' %(NOUT*100,pct), transform=ax.transAxes, fontsize=fs)
# zbox = np.log10(1+stats_zmin)
# ax.fill_between([0,zbox],[0,0],[zbox,zbox], color='red', alpha=0.1)
ax.set_xlim(np.log10(0.0+1),np.log10(3.5+1))
ax.set_ylim(np.log10(0.0+1),np.log10(3.5+1))
fig.savefig('zphot_zspec.pdf')
plt.rcParams['text.usetex'] = False
##### Show line misidentifications
# zha = np.log10(np.array([1.05e4,1.68e4])/6563.)
# ax.plot(zha, zha+np.log10(6563./5007), color='green', alpha=0.5)
# ax.plot(zha, zha+np.log10(6563./3727), color='purple', alpha=0.5)
# ax.plot(zha, zha+np.log10(6563./4863), color='orange', alpha=0.5)
# zhb = np.log10(np.array([1.05e4,1.68e4])/4861.)
# ax.plot(zhb, zhb+np.log10(4861./3727), color='blue', alpha=0.5)
# zoiii = np.log10(np.array([1.05e4,1.68e4])/3727.)
# ax.plot(zoiii, zoiii+np.log10(3727./4861), color='blue', alpha=0.5)
# plt.xlim(0,np.log10(1+5))
# plt.ylim(0,np.log10(1+5))
#### Show dz as a function of parameters
if 1 == 0:
"""
Make plots to see how the redshift residuals depend on things like mag,
contamination fraction, Qz.
"""
keep = (phot.mag_f1392w[phot.idx] < 25) & (phot.fcontam[phot.idx] < 1) & (zout.q_z[0::3] < 1000) & (phot.fcover[phot.idx] > 0.9) & (mcat.logm[mcat.idx] > 0) & (mcat.rmatch[mcat.idx] < 0.5) & (zsp.zspec[zsp.mat_idx] > stats_zmin) & (zsp.dr < 1)
keep = keep & (zout.q_z[0::3] != zout.q_z[2::3])
dz = (zphot - zsp.zspec[zsp.mat_idx])/(1+zsp.zspec[zsp.mat_idx])
yr = (-0.5,0.5)
alpha, ms, color = 0.5, 2,'black'
fig = unicorn.catalogs.plot_init(xs=8,aspect=0.7,left=0.12)
#### Mag
ax = fig.add_subplot(221)
ax.plot(phot.mag_f1392w[phot.idx][keep], dz[keep], marker='o', alpha=alpha, linestyle='None', ms=ms, color=color)
xm, ym, ys, ns = threedhst.utils.runmed(phot.mag_f1392w[phot.idx][keep], dz[keep], NBIN=20)
ax.plot(xm, ys*10, color='red', linewidth=2)
ax.set_ylim(yr[0], yr[1])
ax.set_xlim(19,25)
ax.set_xlabel(r'$H_{140}$')
#### Contam
ax = fig.add_subplot(222)
ax.plot(phot.fcontam[phot.idx][keep], dz[keep], marker='o', alpha=alpha, linestyle='None', ms=ms, color=color)
xm, ym, ys, ns = threedhst.utils.runmed(phot.fcontam[phot.idx][keep], dz[keep], NBIN=20)
ax.plot(xm, ys*10, color='red', linewidth=2)
#ax.semilogx()
ax.set_ylim(yr[0], yr[1])
ax.set_xlim(0.01,1)
ax.set_xlabel(r'$f_\mathrm{cont}\ \mathrm{at}\ 1.4\ \mu m$')
#### Q_z
ax = fig.add_subplot(223)
ax.plot(zout.q_z[0::3][keep], dz[keep], marker='o', alpha=alpha, linestyle='None', ms=ms, color=color)
xm, ym, ys, ns = threedhst.utils.runmed(zout.q_z[0::3][keep], dz[keep], NBIN=20)
ax.plot(xm, ys*10, color='red', linewidth=2)
ax.semilogx()
ax.set_ylim(yr[0], yr[1])
ax.set_xlim(0.001,10)
ax.set_xlabel(r'$Q_z$')
#### Offset near z=1, appears to be due to the tilted slope of the spectrum being a bit too steep. If you just use a 0th order offset correction to the spectrum (TILT_ORDER=0), the redshifts for many of these objects become correct
#keep = keep & (np.array(zsp.source)[zsp.mat_idx] == 'Barger')
if (1==0):
dzlog = np.log10(1+zout.z_peak[0::3]) - np.log10(1+zsp.zspec[zsp.mat_idx])
bad = (dzlog > 0.027) & (dzlog < 0.047 ) & (np.log10(1+zsp.zspec[zsp.mat_idx]) > 0.2) & keep
bad = (dzlog > 0.18) & keep
bad = (np.abs(dz) > 0.1) & (zsp.zspec[zsp.mat_idx] > stats_zmin) & keep
print np.array(zsp.source)[zsp.mat_idx][bad]
print phot.id[phot.idx][bad]
for id in phot.id[phot.idx][bad]:
os.system('wget http://3dhst:[email protected]/P/GRISM_v1.6/EAZY/%s_eazy.png' %(id))
fig = unicorn.catalogs.plot_init(left=0.12)
ax = fig.add_subplot(111)
ax.plot(np.log10(1+zsp.zspec[zsp.mat_idx][keep]), dzlog[keep], marker='o', linestyle='None', alpha=0.5, color='black', markersize=5)
ax.set_xlim(0,np.log10(4+1))
ax.set_ylim(-1,1)
#### nearby offset at z~1 ~ 0.035 in log(1+z)
offset = 0.036
print 6563*10**(-offset), 5007*10**(-offset), 4861*10**(-offset), 3727*10**(-offset)
def zphot_zspec_lines():
"""
Investigate how the redshfit errors depend on the emission line signal to noise.
"""
import unicorn
import unicorn.catalogs
import copy
os.chdir(unicorn.GRISM_HOME+'/ANALYSIS/SURVEY_PAPER')
unicorn.catalogs.read_catalogs()
from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit
if unicorn.catalogs.zsp is None:
unicorn.catalogs.make_specz_catalog()
zsp = unicorn.catalogs.zsp
zphot = zout.z_peak[0::3]
##### Refit redshifts gets rid of the offset
zout_new = catIO.Readfile('/research/HST/GRISM/3DHST/UDF/CATALOGS/LINE_TEMPLATES/full_redshift_fixed_centering.cat')
refit = zout.id[0::3] == 'x'
refit_idx = zout.z_peak[0::3]*0.
for i in range(len(zout.id[0::3])):
print unicorn.noNewLine+'%d' %(i)
if zout.id[i*3] in zout_new.id:
refit[i] = True
refit_idx[i] = np.where(zout_new.id[0::3] == zout.id[i*3])[0][0]
refit_idx = np.cast[int](refit_idx)
zphot = zout_new.z_peak[0::3][refit_idx]
maglim = 24
qzmax = 0.2
contam_max = 0.05
keep = (phot.mag_f1392w[phot.idx] < maglim) & (phot.fcontam[phot.idx] < contam_max) & (zout.q_z[0::3] < qzmax) & (phot.fcover[phot.idx] > 0.9) & (mcat.logm[mcat.idx] > 0) & (mcat.rmatch[mcat.idx] < 0.5) & (zsp.zspec[zsp.mat_idx] > 0) & (zsp.dr < 1)
keep = keep & (zout.q_z[0::3] != zout.q_z[2::3])
lmin, lmax = 1.2e4, 1.6e4
z_ha = (zsp.zspec[zsp.mat_idx] > (lmin/6563.-1)) & (zsp.zspec[zsp.mat_idx] < (lmax/6563.-1))
z_oiii = (zsp.zspec[zsp.mat_idx] > (lmin/5007.-1)) & (zsp.zspec[zsp.mat_idx] < (lmax/5007.-1))
dz = (zphot - zsp.zspec[zsp.mat_idx])/(1+zsp.zspec[zsp.mat_idx])
halpha_eqw = lines.halpha_eqw*1.
oiii_eqw = lines.oiii_eqw*1.
eqw_min = 0.5
rnd_halpha = np.random.rand(len(halpha_eqw))*2+1
rnd_oiii = np.random.rand(len(oiii_eqw))*2+1
halpha_eqw[halpha_eqw < eqw_min] = eqw_min*rnd_halpha[halpha_eqw < eqw_min]
oiii_eqw[oiii_eqw < eqw_min] = eqw_min*rnd_oiii[oiii_eqw < eqw_min]
ha_color, oiii_color = 'black', 'orange'
fig = unicorn.catalogs.plot_init(left=0.15, bottom=0.075, xs=4, right=0.01, top=0.01, square=True)
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig.subplots_adjust(wspace=0.0)
################ Ha eqw
# ax = fig.add_subplot(122)
# unicorn.survey_paper.dz_trend(halpha_eqw[lines.idx][keep & z_ha], dz[keep & z_ha], xrange=[0.8*eqw_min,1000], yrange=[-0.015, 0.02], xlog=True, ax=ax, xlabel=r'EQW H$\alpha$')
# yticks = [r'$0.01$',r'$0.1$',r'$1$',r'$10$',r'$10^{2}$']
# ax.set_yticklabels([])
# xtick = ax.set_xticks([1,10,100,1000])
# ax.set_xticklabels([1,10,100,1000])
halpha_sn = lines.halpha_eqw / lines.halpha_eqw_err
sn_min = 0.2
rnd_halpha = np.random.rand(len(halpha_sn))*2+1
halpha_sn[halpha_sn < sn_min] = sn_min*rnd_halpha[halpha_sn < sn_min]
ax = fig.add_subplot(122)
unicorn.survey_paper.dz_trend(halpha_sn[lines.idx][keep & z_ha], dz[keep & z_ha], xrange=[0.1,300], yrange=[-0.015, 0.015], xlog=True, ax=ax, xlabel=r'H$\alpha$ S/N')
yticks = [r'$0.01$',r'$0.1$',r'$1$',r'$10$',r'$10^{2}$']
ax.set_yticklabels([])
xtick = ax.set_xticks([1,10,100])
ax.set_xticklabels([1,10,100])
################ Mag F140W
ax = fig.add_subplot(121)
unicorn.survey_paper.dz_trend(phot.mag_f1392w[phot.idx][keep & z_ha], dz[keep & z_ha], xrange=[19,24], yrange=[-0.015, 0.015], ax=ax, xlabel=r'$m_{140}$')
ax.text(0.08,0.9,r'H$\alpha$, $%.1f < z < %.1f$' %((lmin/6563.-1), (lmax/6563.-1)), color='black', transform=ax.transAxes, fontsize=12)
ax.text(0.08,0.83,r'$N=%d$' %(len(z_ha[keep & z_ha])), color='black', transform=ax.transAxes, fontsize=12)
ax.text(0.08,0.12,r'$\sigma_\mathrm{NMAD}=0.0025$', color='black', transform=ax.transAxes, alpha=0.8)
ax.text(0.08,0.12,r'$\sigma_\mathrm{NMAD}=0.0025$', color='orange', transform=ax.transAxes, alpha=0.8)
ax.text(0.08,0.07,r'$\sigma_\mathrm{NMAD}=0.0050$', color='red', transform=ax.transAxes)
# ################ z
# ax = fig.add_subplot(131)
# unicorn.survey_paper.dz_trend(zsp.zspec[zsp.mat_idx][keep & z_ha], dz[keep & z_ha], xrange=[0.7,1.5], yrange=[-0.015, 0.02], ax=ax)
fig.savefig('zphot_zspec_lines.pdf')
def dz_trend(xin, yin, xrange=[0.7,1.5], yrange=[-0.015, 0.015], xlabel=r'$z_\mathrm{spec}$', xlog=False, ax=None, ms=3):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xin, yin, linestyle='None', marker='o', alpha=0.2, color='white', zorder=200, ms=ms)
ax.plot(xin, yin, linestyle='None', marker='o', alpha=0.2, color='black', zorder=201, ms=ms)
xm, ym, ys, ns = threedhst.utils.runmed(xin, yin, NBIN=12, use_nmad=True)
xm_, ym_, ys_, ns_ = threedhst.utils.runmed(xin, yin, NBIN=10, use_nmad=True)
xm[0], ym[0], ys[0], ns[0] = xrange[0]*0.8, ym_[0], ys_[0], ns_[0]
xm[1:11], ym[1:11], ys[1:11], ns[1:11] = xm_, ym_, ys_, ns_
xm[-1], ym[-1], ys[-1], ns[-1] = xrange[1]*1.2, ym_[-1], ys_[-1], ns_[-1]
ax.plot(xm, ym, color='black', alpha=0.9, zorder=101, marker='o', linewidth=3)
ax.fill_between(xm, ym+ys, ym-ys, color='black', alpha=0.4, zorder=100)
yx = ys*0+0.0025
ax.plot(xm, ym+yx, color='orange', alpha=0.9, zorder=101, linewidth=3)
ax.plot(xm, ym-yx, color='orange', alpha=0.9, zorder=101, linewidth=3)
yx = ys*0+0.005
ax.plot(xm, ym+yx, color='red', alpha=0.9, zorder=101, linewidth=3)
ax.plot(xm, ym-yx, color='red', alpha=0.9, zorder=101, linewidth=3)
ax.plot(xm, ym*0, color='white', alpha=0.8, zorder=301, linewidth=3, linestyle='--')
ax.plot(xm, ym*0, color='black', alpha=0.8, zorder=302, linewidth=3, linestyle='--')
if xlog:
ax.semilogx()
ax.set_xlim(xrange[0],xrange[1])
ax.set_ylim(yrange[0],yrange[1])
ax.set_ylabel(r'$\Delta z / (1+z)$')
ax.set_xlabel(xlabel)
def compute_SFR_limits():
import cosmocalc as cc
limiting_flux = 4.e-17
### z=1, H-alpha
cosmo = cc.cosmocalc(z=1.0)
SFR_ha = 7.9e-42 * limiting_flux * 4 * np.pi * cosmo['DL_cm']**2
### z=2, OII
cosmo = cc.cosmocalc(z=2.0)
SFR_oii = 1.4e-41 * limiting_flux * 4 * np.pi * cosmo['DL_cm']**2
print SFR_ha, SFR_oii
def number_counts():
import unicorn
import unicorn.catalogs
import copy
os.chdir(unicorn.GRISM_HOME+'/ANALYSIS/SURVEY_PAPER')
unicorn.catalogs.read_catalogs()
from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit, zsp
keep = unicorn.catalogs.run_selection(zmin=0, zmax=8, fcontam=1, qzmin=0., qzmax=10, dr=1.0, has_zspec=False, fcovermin=0.5, fcovermax=1.0, massmin=0, massmax=15, magmin=12, magmax=27)
fields = (phot.field[phot.idx] == 'AEGIS') | (phot.field[phot.idx] == 'COSMOS') | (phot.field[phot.idx] == 'GOODS-N') | (phot.field[phot.idx] == 'GOODS-S')
#fields = (phot.field[phot.idx] == 'COSMOS') | (phot.field[phot.idx] == 'AEGIS') | (phot.field[phot.idx] == 'GOODS-S')
#fields = (phot.field[phot.idx] == 'COSMOS') | (phot.field[phot.idx] == 'AEGIS')
#fields = (phot.field[phot.idx] == 'GOODS-N')
fields = fields & (phot.fcover[phot.idx] > 0.5)
pointings = []
for field, pointing in zip(phot.field, phot.pointing):
pointings.append('%s-%d' %(field, pointing))
pointings = np.array(pointings)
NPOINT = len(np.unique(pointings[phot.idx][fields]))
xrange = (12,25)
nbin = np.int(np.round((xrange[1]-xrange[0])*10.))
binwidth = (xrange[1]-xrange[0])*1./nbin
normal = 1./binwidth/NPOINT
cumul = True
normal = 1./NPOINT
#normal = 1./NPOINT*148
##### OFFSET TO TOTAL!
m140 = phot.mag_f1392w - 0.22
#m140 = phot.mag_f1392w
#### Full histogram
y_full, x_full = np.histogram(m140[phot.idx][fields], bins=nbin, range=xrange)
x_full = (x_full[1:]+x_full[:-1])/2.
if cumul:
y_full = np.cumsum(y_full)
y_full, x_full = np.histogram(m140[phot.idx][fields], bins=nbin, range=xrange)
x_full = (x_full[1:]+x_full[:-1])/2.
if cumul:
y_full = np.cumsum(y_full)
lo_full, hi_full = threedhst.utils.gehrels(y_full)
#### Matched in photometric catalogs
matched = mcat.rmatch[mcat.idx] < 1.
matched = zout.z_peak[0::3] != zout.z_peak[2::3]
y_matched, x_matched = np.histogram(m140[phot.idx][fields & matched], bins=nbin, range=xrange)
x_matched = (x_matched[1:]+x_matched[:-1])/2.
if cumul:
y_matched = np.cumsum(y_matched)
#### point sources
xpoint, ypoint = np.array([14,18,23]), np.array([6,3.18, 2.8])
ypoint_int = np.interp(m140, xpoint, ypoint)
points = (phot.flux_radius[phot.idx] < ypoint_int[phot.idx]) #& (m140[phot.idx] < 23)
y_points, x_points = np.histogram(m140[phot.idx][fields & matched & points], bins=nbin, range=xrange)
x_points = (x_points[1:]+x_points[:-1])/2.
if cumul:
y_points = np.cumsum(y_points)
#### Low contamination
contam = phot.fcontam[phot.idx] < 0.1
y_contam, x_contam = np.histogram(m140[phot.idx][fields & contam & matched], bins=nbin, range=xrange)
x_contam = (x_contam[1:]+x_contam[:-1])/2.
if cumul:
y_contam = np.cumsum(y_contam)
#### z > 1
z1 = (zout.z_peak[0::3] > 1) & (zout.q_z[0::3] < 50.5) & ~points
y_z1, x_z1 = np.histogram(m140[phot.idx][fields & matched & z1], bins=nbin, range=xrange)
x_z1 = (x_z1[1:]+x_z1[:-1])/2.
if cumul:
y_z1 = np.cumsum(y_z1)
lo_z1, hi_z1 = threedhst.utils.gehrels(y_z1)
#wx, wy = np.loadtxt('whitaker_completeness.dat', unpack=True)
#wscale = np.interp(x_z1, wx, wy)
wscale = y_matched*1. / y_full
wscale[~np.isfinite(wscale)] = 1
wscale[wscale > 1] = 1
wscale[wscale == 0] = 1
#hi_z1 /= wscale
# lo_z1 /= wscale
# y_z1 /= wscale
#### No cut on Q_z
z1q = (zout.z_peak[0::3] > 1) & (zout.q_z[0::3] < 100) & ~points
y_z1q, x_z1q = np.histogram(m140[phot.idx][fields & matched & z1q], bins=nbin, range=xrange)
x_z1q = (x_z1q[1:]+x_z1q[:-1])/2.
if cumul:
y_z1q = np.cumsum(y_z1q)
#### Total number at z>1
print 'NPOINT: %d' %(NPOINT)
#z1q_mag = unicorn.catalogs.run_selection(zmin=1, zmax=5.5, fcontam=1, qzmin=0., qzmax=100, dr=1.0, has_zspec=False, fcovermin=0.5, fcovermax=1.0, massmin=0, massmax=15, magmin=0, magmax=23)
z1q_mag = z1q & fields & (m140[phot.idx] <= 23.8) & ~points
N_z1_total = len(z1q_mag[z1q_mag])*1./NPOINT*149.
N_total = len(z1q_mag[matched & fields & (m140[phot.idx] <= 23.8)])*1./NPOINT*149.
print 'N (z>1, m<23) = %d, N_total = %d' %(N_z1_total, N_total)
print 'N (z>1, m<23) = %d' %(np.interp(23.8, x_z1, y_z1*149./NPOINT))
#### z > 2
z2 = (zout.z_peak[0::3] > 2) & (zout.q_z[0::3] < 50.5) & ~points
y_z2, x_z2 = np.histogram(m140[phot.idx][fields & matched & z2], bins=nbin, range=xrange)
x_z2 = (x_z2[1:]+x_z2[:-1])/2.
if cumul:
y_z2 = np.cumsum(y_z2)
lo_z2, hi_z2 = threedhst.utils.gehrels(y_z2)
#hi_z2 /= wscale
#### Tail of bright objects in the z>2 set
tail = (zout.z_peak[0::3] > 2) & (zout.q_z[0::3] < 50.5) & ~points & fields & matched & (m140[phot.idx] < 21)
print 'z2 tail:', zout.id[0::3][tail], mcat.rmatch[mcat.idx][tail], phot.flux_radius[phot.idx][tail], np.interp(m140[phot.idx][tail], xpoint, ypoint)
#### No cut on Q_z
z2q = (zout.z_peak[0::3] > 2) & (zout.q_z[0::3] < 100) & ~points
y_z2q, x_z2q = np.histogram(m140[phot.idx][fields & matched & z2q], bins=nbin, range=xrange)
x_z2q = (x_z2q[1:]+x_z2q[:-1])/2.
if cumul:
y_z2q = np.cumsum(y_z2q)
#### NMBS comparison
cat_nmbs, zout_nmbs, fout_nmbs = unicorn.analysis.read_catalogs(root='COSMOS-1')
#nmbs_hmag = 25-2.5*np.log10(cat_nmbs.H1*cat_nmbs.Ktot/cat_nmbs.K)
#nmbs_hmag = 25-2.5*np.log10((cat_nmbs.H1+cat_nmbs.J3+cat_nmbs.J2+cat_nmbs.H2)/4.*cat_nmbs.Ktot/cat_nmbs.K)
nmbs_hmag = 25-2.5*np.log10((cat_nmbs.H1+cat_nmbs.J3)/2.*cat_nmbs.Ktot/cat_nmbs.K)
keep_nmbs = (cat_nmbs.wmin > 0.3)
y_nmbs, x_nmbs = np.histogram(nmbs_hmag[keep_nmbs], bins=nbin, range=xrange)
x_nmbs = (x_nmbs[1:]+x_nmbs[:-1])/2.
if cumul:
y_nmbs = np.cumsum(y_nmbs)
y_nmbs *= 1./(0.21*2*3600.)*4*NPOINT
z1_nmbs = (zout_nmbs.z_peak > 1) & (cat_nmbs.star_flag == 0)
y_nmbs_z1, x_nmbs_z1 = np.histogram(nmbs_hmag[keep_nmbs & z1_nmbs], bins=nbin, range=xrange)
x_nmbs_z1 = (x_nmbs_z1[1:]+x_nmbs_z1[:-1])/2.
if cumul:
y_nmbs_z1 = np.cumsum(y_nmbs_z1)
y_nmbs_z1 *= 1./(0.21*2*3600)*4*NPOINT
z2_nmbs = (zout_nmbs.z_peak > 2) & (cat_nmbs.star_flag == 0)
y_nmbs_z2, x_nmbs_z2 = np.histogram(nmbs_hmag[keep_nmbs & z2_nmbs], bins=nbin, range=xrange)
x_nmbs_z2 = (x_nmbs_z2[1:]+x_nmbs_z2[:-1])/2.
if cumul:
y_nmbs_z2 = np.cumsum(y_nmbs_z2)
y_nmbs_z2 *= 1./(0.21*2*3600)*4*NPOINT
#
nmbs_stars = (cat_nmbs.star_flag == 1)
y_nmbs_stars, x_nmbs_stars = np.histogram(nmbs_hmag[keep_nmbs & nmbs_stars], bins=nbin, range=xrange)
x_nmbs_stars = (x_nmbs_stars[1:]+x_nmbs_stars[:-1])/2.
if cumul:
y_nmbs_stars = np.cumsum(y_nmbs_stars)
y_nmbs_stars *= 1./(0.21*2*3600)*4*NPOINT
#### Make the plot
fig = unicorn.catalogs.plot_init(left=0.11, bottom=0.08, xs=3.8, right=0.09, top=0.01)
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
ax = fig.add_subplot(111)
ax.plot(x_full, y_full*normal, color='black')
ax.fill_between(x_full,lo_full*normal, hi_full*normal, color='black', alpha=0.4)
ax.plot(x_matched, y_matched*normal, color='blue',alpha=0.8)
ax.plot(x_contam, y_contam*normal, color='green',alpha=0.8)
ax.plot(x_points[x_points <= 23], y_points[x_points < 23]*normal, color='purple',alpha=0.8)
ax.plot(x_points[x_points >= 23], y_points[x_points >= 23]*normal, color='purple',alpha=0.8, linestyle=':')
ax.plot(x_z1, y_z1*normal, color='orange',alpha=0.7)
ax.fill_between(x_z1,lo_z1*normal, hi_z1*normal, color='orange', alpha=0.4)
ax.plot(x_z1q, y_z1q*normal, color='orange',alpha=0.7, linestyle='--')
ax.plot(x_z2, y_z2*normal, color='red',alpha=0.7)
ax.fill_between(x_z2,lo_z2*normal, hi_z2*normal, color='red', alpha=0.4)
ax.plot(x_z2q, y_z2q*normal, color='red',alpha=0.7, linestyle='--')
# ax.plot(x_nmbs, y_nmbs*normal, color='black',alpha=0.8, linewidth=3, alpha=0.2)
# ax.plot(x_nmbs_z1, y_nmbs_z1*normal, color='orange',alpha=0.8, linewidth=3, alpha=0.2)
# ax.plot(x_nmbs_z2, y_nmbs_z2*normal, color='red',alpha=0.8, linewidth=3, alpha=0.2)
# ax.plot(x_nmbs_stars, y_nmbs_stars*normal, color='purple',alpha=0.8, linewidth=3, alpha=0.2)
#ax.text(0.05,0.92,r'%s ($N=%d$)' %(', '.join(np.unique(phot.field[phot.idx][fields])), NPOINT), color='black', transform=ax.transAxes)
ax.text(0.05,0.92,r'Total, from $N=%d$ pointings' %(NPOINT), color='black', transform=ax.transAxes)
ax.text(0.05,0.87,r'matched', color='blue', transform=ax.transAxes)
ax.text(0.05,0.82,r'$f_\mathrm{contam} < 10\%$', color='green', transform=ax.transAxes)
ax.text(0.05,0.77,r'point sources', color='purple', transform=ax.transAxes)
ax.text(0.05,0.72,r'$z > 1$', color='orange', transform=ax.transAxes)
ax.text(0.05,0.67,r'$z > 2$', color='red', transform=ax.transAxes)
ax.set_xlabel('MAG\_AUTO (F140W $\sim$ $H$)')
if cumul:
ax.set_ylabel('N($<m$) per WFC3 pointing')
else:
ax.set_ylabel('N / pointing / mag')
ax.semilogy()
yticks = [r'$0.01$',r'$0.1$',r'$1$',r'$10$',r'$10^{2}$']
ax.set_yticklabels(yticks)
ytick = ax.set_yticks([0.01,0.1,1,10,100])
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
minorLocator = MultipleLocator(1)
ax.xaxis.set_minor_locator(minorLocator)
ax.set_xlim(xrange[0], xrange[1])
ax.set_ylim(0.01, 500)
ax2 = ax.twinx()
ax2.semilogy()
yticks = [r'$10$',r'$10^{2}$',r'$10^{3}$',r'$10^{4}$']
ax2.set_yticklabels(yticks)
ytick = ax2.set_yticks([10,100,1000,1.e4])
ax2.set_ylim(0.01*149, 500*149)
ax2.set_ylabel('N($<m$), full survey')
ax2.set_xlim(xrange[0], xrange[1])
ax2.xaxis.set_minor_locator(minorLocator)
### Grid
ax.xaxis.grid(alpha=0.35, zorder=1, which='major')
ax.xaxis.grid(alpha=0.2, zorder=1, which='minor')
ax2.yaxis.grid(alpha=0.35, zorder=1, which='major')
fig.savefig('number_counts.pdf')
plt.rcParams['text.usetex'] = False
def ancillary_matches():
"""
Get an idea of how the matching to the ancillary catalogs depends on mag:
matched fraction
multiple matches
"""
import unicorn
import unicorn.catalogs
import copy
os.chdir(unicorn.GRISM_HOME+'/ANALYSIS/SURVEY_PAPER')
unicorn.catalogs.read_catalogs()
from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit, zsp
keep = unicorn.catalogs.run_selection(zmin=0, zmax=8, fcontam=1, qzmin=0., qzmax=10, dr=1.0, has_zspec=False, fcovermin=0.5, fcovermax=1.0, massmin=0, massmax=15, magmin=12, magmax=27)
fields = (phot.field[phot.idx] == 'AEGIS') | (phot.field[phot.idx] == 'COSMOS') | (phot.field[phot.idx] == 'GOODS-N') | (phot.field[phot.idx] == 'GOODS-S')
phot_dr = np.zeros(phot.field.shape)+100
phot_id = np.zeros(phot.field.shape)
phot_kmag = np.zeros(phot.field.shape)
idx = np.arange(phot.field.shape[0])
#### Do separate matching again on every object in photometric catalog
for field in ['COSMOS','AEGIS','GOODS-S','GOODS-N']:
this = phot.field == field
cat, zout, fout = unicorn.analysis.read_catalogs(field+'-1')
cos_dec = np.cos(np.median(cat.dec)/360.*2*np.pi)**2
for i in idx[this]:
print unicorn.noNewLine+'%d / %d' %(i, idx[this][-1])
dr = np.sqrt((cat.ra-phot.x_world[i])**2*cos_dec+(cat.dec-phot.y_world[i])**2)*3600.
ma = dr == dr.min()
phot_dr[i] = dr.min()
phot_id[i] = cat.id[ma][0]
phot_kmag[i] = cat.kmag[ma][0]
#### Ask, "what fraction of F140W objects have multiple matches to the same ancillary object"
n_match = phot_dr*0
n_brighter = phot_dr*0.
i = 0
base_selection = (phot.fcover > 0.5) & (phot.has_spec == 1) & (phot_dr < 1.0)
for f,id,m,p in zip(phot.field, phot_id, phot.mag_f1392w,phot.pointing):
print unicorn.noNewLine+'%d / %d' %(i, len(phot_id))
mat = (phot.field == f) & (phot_id == id) & (phot.pointing == p) & base_selection
n_match[i] = mat.sum()
brighter = mat & (phot.mag_f1392w-m < 0.75)
n_brighter[i] = brighter.sum()-1
i = i+1
use = n_match > 0
yh_full, xh_full = np.histogram(phot.mag_f1392w[use], range=(12,26), bins=14*4)
fig = unicorn.plotting.plot_init(square=True, use_tex=True, left=0.09, bottom=0.07, xs=3.5)
ax = fig.add_subplot(111)
yh_n, xh_n = np.histogram(phot.mag_f1392w[use & (n_match > 1)], range=(12,26), bins=14*4)
ax.plot(xh_n[1:]-0.22, yh_n*1./yh_full, color='blue', linestyle='steps', label=r'$N_{\rm match} > 1$')
yh_n, xh_n = np.histogram(phot.mag_f1392w[use & (n_match > 1) & (n_brighter == 1)], range=(12,26), bins=14*4)
ax.plot(xh_n[1:]-0.22, yh_n*1./yh_full, color='red', linestyle='steps', label=r'$N_{\rm brighter} = 1$')
yh_n, xh_n = np.histogram(phot.mag_f1392w[use & (n_match > 1) & (n_brighter > 1)], range=(12,26), bins=14*4)
ax.plot(xh_n[1:]-0.22, yh_n*1./yh_full, color='orange', linestyle='steps', label=r'$N_{\rm brighter} > 1$')
ax.set_xlabel(r'$m_{140}$')
ax.set_ylabel('fraction')
ax.set_xlim(19,24.5)
ax.set_ylim(0,0.21)
ax.legend(loc='upper left', frameon=False)
unicorn.plotting.savefig(fig, 'ancillary_matched_from_f140w.pdf')
#### Check these cases of n_brighter == 1
test = (n_match > 0) & (n_brighter == 1)
idx = np.arange(len(n_match))[test]
i = 0
id = phot_id[idx][i]
mat = base_selection & (phot.field == phot.field[idx][i]) & (phot_id == phot_id[idx][i])
### Some pointings, such as GOODS-S flanking fields don't overlap with photom. catalog
test_field_goodsn = (phot.field == 'GOODS-N')
test_field_goodss = (phot.field == 'GOODS-S') & (phot.pointing != 1) & (phot.pointing != 28)
test_field_cosmos = phot.field == 'COSMOS'
test_field_aegis = phot.field == 'AEGIS' ### out of NMBS
for i in [11,2,1,6]:
test_field_aegis = test_field_aegis & (phot.pointing != i)
fig = unicorn.plotting.plot_init(square=True, use_tex=True, left=0.09, bottom=0.07, xs=3.5)
ax = fig.add_subplot(111)
#### Make a plot showing the fraction of matched galaxies
for test_field, c in zip([test_field_goodsn, test_field_goodss, test_field_cosmos, test_field_aegis], ['orange','red','blue','green']):
base_selection = (phot.fcover > 0.5) & test_field & (phot.has_spec == 1)
has_match = phot_dr < 1.0
yh_full, xh_full = np.histogram(phot.mag_f1392w[base_selection], range=(12,26), bins=14*4)
yh_mat, xh_mat = np.histogram(phot.mag_f1392w[base_selection & has_match], range=(12,26), bins=14*4)
yh_full, yh_mat = np.maximum(yh_full, 0.01), np.maximum(yh_mat, 0.01)
# plt.plot(xh_full[1:], yh_full, linestyle='steps', color='blue', alpha=0.5)
# plt.plot(xh_mat[1:], yh_mat, linestyle='steps', color='red', alpha=0.5)
# plt.semilogy()
# plt.ylim(0.5,500)
#
ax.plot(xh_full[1:]-0.22, yh_mat/yh_full, linestyle='-', linewidth=3, color=c, alpha=0.5, label=np.unique(phot.field[test_field])[0])
ax.legend(loc='lower left')
ax.plot([0,100],[1,1], color='black', linestyle='-', alpha=0.8, linewidth=2)
ax.plot([0,100],[0.9,0.9], color='black', linestyle=':', alpha=0.8, linewidth=2)
ax.set_ylim(0,1.1)
ax.set_xlim(21,25.)
ax.set_xlabel(r'$m_{140}$')
ax.set_ylabel(r'Matched fraction')
unicorn.plotting.savefig(fig, 'ancillary_matched_fraction.pdf')
#### Look at multiple matches
base_selection = (phot.fcover > 0.5) & (phot.has_spec == 1)
use = base_selection & test_field_cosmos & (phot_dr < 1.0)
plt.scatter(phot.mag_f1392w[use], phot_kmag[use], color='blue', alpha=0.1, s=10)
matched_id = np.unique(phot_id[use])
kmag = matched_id*0.
dmag1 = matched_id*0.+100
dmag2 = matched_id*0.+100
N = matched_id*0
for ii, id in enumerate(matched_id):
print unicorn.noNewLine+'%d / %d' %(ii, len(matched_id))
this = (phot_id == id) & use
dmag = phot.mag_f1392w[this]-phot_kmag[this]
kmag[ii] = phot_kmag[this][0]
dmag1[ii] = dmag[0]
N[ii] = this.sum()
if this.sum() > 1:
so = np.argsort(dmag)
dmag2[ii] = dmag[so][1]
#
fig = unicorn.plotting.plot_init(square=True, use_tex=True, left=0.09, bottom=0.07, xs=3.5)
ax = fig.add_subplot(111)
ax.scatter(kmag, dmag1-0.22, color='blue', alpha=0.2, s=10, label='1st matched')
ax.scatter(kmag, dmag2-0.22, color='red', alpha=0.2, s=10, label='2nd matched')
ax.set_xlim(17,24)
ax.set_ylim(-1,5)
ax.legend(loc='upper left')
ax.set_xlabel(r'$K_\mathrm{matched}$')
ax.set_ylabel(r'$m_{140} - K_\mathrm{matched}$')
unicorn.plotting.savefig(fig,'ancillary_delta_mag.pdf')
### Show fraction of ancillary objects that have multiple matches a function of magnitude
fig = unicorn.plotting.plot_init(square=True, use_tex=True, left=0.09, bottom=0.07, xs=3.5)
ax = fig.add_subplot(111)
yh_full, xh_full = np.histogram(kmag, range=(17,24), bins=7*4)
yh, xh = np.histogram(kmag[dmag2 < 1.2], range=(17,24), bins=7*4)
ax.plot(xh[1:], yh*1./yh_full, linestyle='steps', color='red', linewidth=3, alpha=0.5, label=r'$\Delta 2^{\rm nd} < 1.2$')
yh, xh = np.histogram(kmag[N > 1], range=(17,24), bins=7*4)
ax.plot(xh[1:], yh*1./yh_full, linestyle='steps', color='red', linewidth=3, alpha=0.3, label=r'$N_\mathrm{match} > 1$')
yh, xh = np.histogram(kmag[N > 3], range=(17,24), bins=7*4)
ax.plot(xh[1:], yh*1./yh_full, linestyle='steps', color='blue', linewidth=3, alpha=0.5, label=r'$N_\mathrm{match} > 3$')
ax.set_xlabel(r'$K_\mathrm{matched}$')
ax.set_ylabel(r'fraction')
ax.legend(loc='upper left', prop=matplotlib.font_manager.FontProperties(size=9))
unicorn.plotting.savefig(fig,'ancillary_multiple_fraction.pdf')
def get_iband_mags():
"""
On Unicorn, loop through the ascii spectra to retrieve the iband mags, should all be ZP=25.
"""
os.chdir(unicorn.GRISM_HOME+'ANALYSIS/')
unicorn.catalogs.read_catalogs()
from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit, zsp
ids = zout.id[0::3]
fields = phot.field[phot.idx]
iflux = zout.z_peak[0::3]*0.-1
imod = iflux*1.
lc_i = iflux*1.
hflux = iflux*1
hmod = iflux*1.
lc_h = iflux*1
count = 0
for id, field in zip(ids, fields):
path = unicorn.GRISM_HOME+'ANALYSIS/REDSHIFT_FITS_v1.6/ASCII/%s/%s_obs_sed.dat' %(field, id)
if os.path.exists(path):
print unicorn.noNewLine+id
obs = catIO.Readfile(path)
dlam_spec = obs.lc[-1]-obs.lc[-2]
is_spec = np.append(np.abs(1-np.abs(obs.lc[1:]-obs.lc[0:-1])/dlam_spec) < 0.05,True)
dl_i = np.abs(obs.lc-7688.1)
dl_h = np.abs(obs.lc[~is_spec]-1.6315e4)
ix_i = np.where(dl_i == dl_i.min())[0][0]
ix_h = np.where(dl_h == dl_h.min())[0][0]
iflux[count] = obs.fnu[ix_i]
imod[count] = obs.obs_sed[ix_i]
lc_i[count] = obs.lc[ix_i]
hflux[count] = obs.fnu[ix_h]
hmod[count] = obs.obs_sed[ix_h]
lc_h[count] = obs.lc[ix_h]
#
count = count+1
fp = open('full_imag_hmag.dat','w')
fp.write('# id iflux imodel lc_i hflux hmodel lc_h\n')
for i in range(len(ids)):
fp.write('%s %.5e %.5e %.1f %.5e %.5e %.1f\n' %(ids[i], iflux[i], imod[i], lc_i[i], hflux[i], hmod[i], lc_h[i]))
fp.close()
def zspec_colors():
"""
Show as a function if H / (i-H) where the galaxies with zspec fall
"""
import unicorn
import unicorn.catalogs
import copy
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
os.chdir(unicorn.GRISM_HOME+'/ANALYSIS/SURVEY_PAPER')
unicorn.catalogs.read_catalogs()
from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit
if unicorn.catalogs.zsp is None:
unicorn.catalogs.make_specz_catalog()
zsp = unicorn.catalogs.zsp
maglim = 25
qzmax = 200
contam_max = 0.5
###### Selection criteria
keep = (phot.mag_f1392w[phot.idx] < maglim) & (phot.fcontam[phot.idx] < contam_max) & (zout.q_z[0::3] < qzmax) & (phot.fcover[phot.idx] > 0.9) & (mcat.logm[mcat.idx] > 0) & (mcat.rmatch[mcat.idx] < 0.5) #& (zsp.zspec[zsp.mat_idx] > 0) & (zsp.dr < 1)
keep = keep & (zout.q_z[0::3] != zout.q_z[2::3])
has_specz = (zsp.zspec[zsp.mat_idx] > 0) & (zsp.dr < 1)
#mag, radius = np.cast[float](cat.MAG_AUTO), np.cast[float](cat.FLUX_RADIUS)
#### Find isolated point sources
points = (phot.flux_radius[phot.idx] < 2.7)
keep = keep & (~points)
zphot = zout.z_peak[0::3]
##### H mags from F140W and matches
icat = catIO.Readfile('full_imag_hmag.dat')
IH = -2.5*np.log10(icat.iflux / icat.hflux)
phot_zp = zphot*0.+25
phot_zp[(phot.field[phot.idx] == 'GOODS-S') | (phot.field[phot.idx] == 'PRIMO') | (phot.field[phot.idx] == 'WFC3-ERSII-G01') | (phot.field[phot.idx] == 'GEORGE')] = 23.86
m140 = phot.mag_f1392w[phot.idx]-0.22 #### Offset to total in catalogs!
hmag = phot_zp-2.5*np.log10(icat.hflux)
fin = np.isfinite(hmag) & (icat.iflux > 0) & (mcat.rmatch[mcat.idx] < 1)
#### Few wierd objects with very discrepant H mags in GOODS-N
bad = (zout.z_peak[0::3] < 1) & (IH > 3.5)
fin = fin & (~bad)
######### Compare h mags
# use = fin
#
# use = (phot.field[phot.idx] == 'GOODS-S') | (phot.field[phot.idx] == 'PRIMO') | (phot.field[phot.idx] == 'WFC3-ERSII-G01') | (phot.field[phot.idx] == 'GEORGE')
# use = phot.field[phot.idx] == 'GOODS-N'
#
# dmag = m140-hmag
# plt.plot(m140[use & fin], dmag[use & fin], marker='o', linestyle='None', alpha=0.5)
# plt.plot([0,30],[0,0], color='black', alpha=0.5)
# plt.xlim(15,25)
# plt.ylim(-2,2)
#
# plt.plot(phot.kron_radius[phot.idx][use & fin], dmag[use & fin], marker='o', linestyle='None', alpha=0.2)
# xm, ym, ys, ns = threedhst.utils.runmed(phot.kron_radius[phot.idx][use & fin & (m140 < 23)], dmag[use & fin & (m140 < 23)], NBIN=30)
# plt.plot(xm, ym, color='orange', linewidth=2)
#
# plt.xlim(3,8)
# plt.ylim(-2,2)
#
#
# plt.plot(phot.kron_radius[phot.idx][use & fin], m140[use & fin], marker='o', linestyle='None', alpha=0.2)
# plt.xlim(3,8)
# plt.ylim(16,25)
########## H vs I-H
field = phot.field[phot.idx] != 'xx'
fields = {'COSMOS': ['COSMOS'], 'AEGIS': ['AEGIS'], 'GOODS-N':['GOODS-N'], 'GOODS-S':['GOODS-S','PRIMO','WFC3-ERSII-G01','GEORGE']}
field_use = 'COSMOS'
ix = 220
fig = unicorn.catalogs.plot_init(square=True, xs=8, aspect=1./2, left=0.1, right=0.12, bottom=0.10, top=0.01, fontsize=10)
fig.subplots_adjust(wspace=0.01,hspace=0.02, left=0.05, right=0.94, bottom=0.10, top=0.98)
#ax = fig.add_subplot(111)
for field_use in ['AEGIS','COSMOS','GOODS-N','GOODS-S']:
ix += 1
ax = fig.add_subplot(ix)
field = phot.field[phot.idx] == 'xx'
print field_use
for mat in fields[field_use]:
field = field | (phot.field[phot.idx] == mat)
ms = 6
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
ax2 = ax.twinx()
ax.plot(m140[fin & keep & ~has_specz & field], IH[fin & keep & ~has_specz & field], marker='.', linestyle='None', color='black', alpha=0.1, ms=ms)
ax.plot(m140[fin & keep & has_specz & field], IH[fin & keep & has_specz & field], marker='.', linestyle='None', color='green', alpha=0.5, ms=ms)
#ax.plot(m140[fin & keep & field & (zout.z_peak[0::3] > 1)], IH[fin & keep & field & (zout.z_peak[0::3] > 1)], marker='o', linestyle='None', color='orange', alpha=0.5, ms=ms)
ax.plot(np.array([10,30]), 22.5-np.array([10,30]), color='black', alpha=0.5, linewidth=3, linestyle='--')
#ax.plot(np.array([10,30]), 24-np.array([10,30]), color='orange', alpha=0.5, linewidth=3)
ax.plot(np.array([10,30]), [2.25, 2.25], color='purple', alpha=0.8, linewidth=3)
#### Fraction histograms
z1_red = IH > 2.25
yh_a, xh_a = np.histogram(m140[fin & keep & field & ~z1_red], range=(16,25), bins=18)
yh_z, xh_z = np.histogram(m140[fin & keep & field & has_specz & ~z1_red], range=(16,25), bins=18)
show = yh_a > 0
ax2.plot((xh_a[1:]+xh_a[:-1])[show]/2., (yh_z*1./yh_a)[show], color='white', linewidth=4, alpha=0.7, linestyle='steps-mid')
ax2.plot((xh_a[1:]+xh_a[:-1])[show]/2., (yh_z*1./yh_a)[show], color='blue', linewidth=3, alpha=0.8, linestyle='steps-mid')
yh_a, xh_a = np.histogram(m140[fin & keep & field & z1_red], range=(16,25), bins=18)
yh_z, xh_z = np.histogram(m140[fin & keep & field & has_specz & z1_red], range=(16,25), bins=18)
show = yh_a > 0
ax2.plot((xh_a[1:]+xh_a[:-1])[show]/2., (yh_z*1./yh_a)[show], color='white', linewidth=4, alpha=0.7, linestyle='steps-mid')
ax2.plot((xh_a[1:]+xh_a[:-1])[show]/2., (yh_z*1./yh_a)[show], color='red', linewidth=3, alpha=0.8, linestyle='steps-mid')
ax.text(0.95, 0.88, field_use, transform=ax.transAxes, fontsize=12, backgroundcolor='white', horizontalalignment='right')
if field_use == 'AEGIS':
ax.text(19,3.5, r'$i=22.5$', horizontalalignment='center', verticalalignment='bottom', rotation=-30)
if field_use == 'GOODS-N':
ax.text(17.5,2.4, r'$\uparrow\ z > 1$, red $\uparrow$', horizontalalignment='left', verticalalignment='bottom')
minorLocator = MultipleLocator(1)
ax.xaxis.set_minor_locator(minorLocator)
ax.set_xlim(17,24.5)
ax.set_ylim(-0.5,5.2)
minorLocator = MultipleLocator(0.1)
ax2.yaxis.set_minor_locator(minorLocator)
ax2.set_xlim(17.1,24.5)
ax2.set_ylim(-0.1,1.1)
if field_use in ['AEGIS','COSMOS']:
ax.set_xticklabels([])
else:
ax.set_xlabel(r'$m_{140}\ \sim\ H$')
if field_use in ['COSMOS','GOODS-S']:
ax.set_yticklabels([])
else:
ax.set_ylabel(r'$(i-H)$')
#
if field_use in ['AEGIS','GOODS-N']:
ax2.set_yticklabels([])
else:
ax2.set_ylabel(r'$f(z_\mathrm{spec})$')
#fig.savefig('zspec_fraction_%s.pdf' %(field_use))
fig.savefig('zspec_fraction_all.pdf')
#plt.plot(zout.z_peak[0::3][fin], IH[fin], marker='o', linestyle='None', color='black', alpha=0.1, ms=4)
# plt.plot(zout.z_peak[0::3][fin & keep & field], IH[fin & keep & field], marker='o', linestyle='None', color='black', alpha=0.1, ms=ms)
# plt.plot(zout.z_peak[0::3][fin & keep & has_specz & field], IH[fin & keep & has_specz & field], marker='o', linestyle='None', color='blue', alpha=0.5, ms=ms)
# plt.plot(np.array([0,30]), [2.25, 2.25], color='red', alpha=0.8, linewidth=3)
#
#
# z1_red = IH > 2.25
# yh_a, xh_a = np.histogram(zout.z_peak[0::3][fin & keep & field & ~z1_red], range=(0,4), bins=8)
# yh_z, xh_z = np.histogram(zout.z_peak[0::3][fin & keep & field & has_specz & ~z1_red], range=(0,4), bins=8)
# show = yh_a > 0
# plt.plot((xh_a[1:]+xh_a[:-1])[show]/2., (yh_z*1./yh_a)[show], color='blue', linewidth=3, alpha=0.8)
#
# yh_a, xh_a = np.histogram(zout.z_peak[0::3][fin & keep & field & z1_red], range=(0,4), bins=8)
# yh_z, xh_z = np.histogram(zout.z_peak[0::3][fin & keep & field & has_specz & z1_red], range=(0,4), bins=8)
# show = yh_a > 0
# plt.plot((xh_a[1:]+xh_a[:-1])[show]/2., (yh_z*1./yh_a)[show], color='red', linewidth=3, alpha=0.8)
#
# plt.xlim(0,4)
# plt.ylim(-0.5,5)
def find_brown_dwarf():
import unicorn
import unicorn.catalogs
import copy
os.chdir(unicorn.GRISM_HOME+'/ANALYSIS/SURVEY_PAPER')
unicorn.catalogs.read_catalogs()
from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit, zsp
int_lam = np.array([0.77e4, 1.25e4, 2.1e4])
fp = open('stars_ijk.dat','w')
fp.write('# id ii jj kk\n')
##### Known Brown dwarf
object = 'AEGIS-3-G141_00195'
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='%s' %(object), OUTPUT_DIRECTORY=unicorn.GRISM_HOME+'/ANALYSIS/REDSHIFT_FITS_v1.6/OUTPUT/', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
so = np.argsort(lci[~is_spec])
yint = np.interp(int_lam, lci[~is_spec][so], fobs[~is_spec][so])/(int_lam/5500.)**2
fp.write('%s %.3e %.3e %.3e\n' %(object, yint[0], yint[1], yint[2]))
###### Loop through all point sources
stars = (phot.flux_radius[phot.idx] < 3) & (phot.mag_f1392w[phot.idx] < 24) & (mcat.rmatch[mcat.idx] < 0.5)
for object in phot.id[phot.idx][stars]:
print unicorn.noNewLine+'%s' %(object)
try:
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='%s' %(object), OUTPUT_DIRECTORY=unicorn.GRISM_HOME+'/ANALYSIS/REDSHIFT_FITS_v1.6/OUTPUT/', CACHE_FILE = 'Same')
except:
pass
#
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
so = np.argsort(lci[~is_spec])
yint = np.interp(int_lam, lci[~is_spec][so], fobs[~is_spec][so])/(int_lam/5500.)**2
fp.write('%s %.3e %.3e %.3e\n' %(object, yint[0], yint[1], yint[2]))
fp.close()
if (1 == 0):
ijk = catIO.Readfile('stars_ijk.dat')
ij = -2.5*np.log10(ijk.ii/ijk.jj)
jk = -2.5*np.log10(ijk.jj/ijk.kk)
plt.plot(ij, jk, marker='o', markersize=3, color='black', alpha=0.8, linestyle='None')
plt.plot(ij[0], jk[0], marker='o', markersize=8, color='red', alpha=0.5, linestyle='None')
mat = ijk.id == 'AEGIS-11-G141_00314'
plt.plot(ij[mat], jk[mat], marker='o', markersize=8, color='orange', alpha=0.5, linestyle='None')
bd = phot.id[phot.idx] == 'x'
for i, obj in enumerate(ijk.id):
bd[phot.id[phot.idx] == obj] = (ij[i] > -0.) & (jk[i] < -1.7)
#
bdf = unicorn.analysis.BD_fit()
for obj in phot.id[phot.idx][bd]:
bdf.fit('/Users/gbrammer/Sites_GLOBAL/P/GRISM/ascii/%s.dat' %(obj), chi2_limit=100, trim_mtype=False, max_contam=0.8)
unicorn.catalogs.make_selection_catalog(bd, filename='massive_lines.cat', make_html=True)
os.system('rsync -avz massive_lines* ~/Sites_GLOBAL/P/GRISM_v1.6/ANALYSIS/')
| mit |
marshallmcdonnell/interactive_plotting | matplotlib/event_handling_legend.py | 1 | 1315 | #!/usr/bin/env python
"""
Enable picking on the legend to toggle the original line on and off
"""
import numpy as np
import matplotlib.pyplot as plt
t = np.arange(0.0, 0.2, 0.1)
y1 = 2*np.sin(2*np.pi*t)
y2 = 4*np.sin(2*np.pi*2*t)
fig, ax = plt.subplots()
ax.set_title('Click on legend line to toggle line on/off')
line1, = ax.plot(t, y1, lw=2, color='red', label='1 HZ')
line2, = ax.plot(t, y2, lw=2, color='blue', label='2 HZ')
leg = ax.legend(loc='upper left', fancybox=True, shadow=True)
leg.get_frame().set_alpha(0.4)
# we will set up a dict mapping legend line to orig line, and enable
# picking on the legend line
lines = [line1, line2]
lined = dict()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_picker(5) # 5 pts tolerance
lined[legline] = origline
def onpick(event):
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
legline = event.artist
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
fig.canvas.draw()
fig.canvas.mpl_connect('pick_event', onpick)
plt.show()
| mit |
datapythonista/pandas | pandas/tests/extension/test_string.py | 2 | 5354 | """
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import string
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas.core.arrays.string_ import StringDtype
from pandas.core.arrays.string_arrow import ArrowStringDtype
from pandas.tests.extension import base
def split_array(arr):
if not isinstance(arr.dtype, ArrowStringDtype):
pytest.skip("chunked array n/a")
def _split_array(arr):
import pyarrow as pa
arrow_array = arr._data
split = len(arrow_array) // 2
arrow_array = pa.chunked_array(
[*arrow_array[:split].chunks, *arrow_array[split:].chunks]
)
assert arrow_array.num_chunks == 2
return type(arr)(arrow_array)
return _split_array(arr)
@pytest.fixture(params=[True, False])
def chunked(request):
return request.param
@pytest.fixture(
params=[
StringDtype,
pytest.param(
ArrowStringDtype, marks=td.skip_if_no("pyarrow", min_version="1.0.0")
),
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype, chunked):
strings = np.random.choice(list(string.ascii_letters), size=100)
while strings[0] == strings[1]:
strings = np.random.choice(list(string.ascii_letters), size=100)
arr = dtype.construct_array_type()._from_sequence(strings)
return split_array(arr) if chunked else arr
@pytest.fixture
def data_missing(dtype, chunked):
"""Length 2 array with [NA, Valid]"""
arr = dtype.construct_array_type()._from_sequence([pd.NA, "A"])
return split_array(arr) if chunked else arr
@pytest.fixture
def data_for_sorting(dtype, chunked):
arr = dtype.construct_array_type()._from_sequence(["B", "C", "A"])
return split_array(arr) if chunked else arr
@pytest.fixture
def data_missing_for_sorting(dtype, chunked):
arr = dtype.construct_array_type()._from_sequence(["B", pd.NA, "A"])
return split_array(arr) if chunked else arr
@pytest.fixture
def na_value():
return pd.NA
@pytest.fixture
def data_for_grouping(dtype, chunked):
arr = dtype.construct_array_type()._from_sequence(
["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"]
)
return split_array(arr) if chunked else arr
class TestDtype(base.BaseDtypeTests):
pass
class TestInterface(base.BaseInterfaceTests):
def test_view(self, data, request):
if isinstance(data.dtype, ArrowStringDtype):
mark = pytest.mark.xfail(reason="not implemented")
request.node.add_marker(mark)
super().test_view(data)
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
def test_transpose(self, data, dtype, request):
if isinstance(dtype, ArrowStringDtype):
mark = pytest.mark.xfail(reason="not implemented")
request.node.add_marker(mark)
super().test_transpose(data)
class TestGetitem(base.BaseGetitemTests):
pass
class TestSetitem(base.BaseSetitemTests):
def test_setitem_preserves_views(self, data, dtype, request):
if isinstance(dtype, ArrowStringDtype):
mark = pytest.mark.xfail(reason="not implemented")
request.node.add_marker(mark)
super().test_setitem_preserves_views(data)
class TestMissing(base.BaseMissingTests):
pass
class TestNoReduce(base.BaseNoReduceTests):
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
if op_name in ["min", "max"]:
return None
s = pd.Series(data)
with pytest.raises(TypeError):
getattr(s, op_name)(skipna=skipna)
class TestMethods(base.BaseMethodsTests):
@pytest.mark.skip(reason="returns nullable")
def test_value_counts(self, all_data, dropna):
return super().test_value_counts(all_data, dropna)
@pytest.mark.skip(reason="returns nullable")
def test_value_counts_with_normalize(self, data):
pass
class TestCasting(base.BaseCastingTests):
pass
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
result = getattr(s, op_name)(other)
expected = getattr(s.astype(object), op_name)(other).astype("boolean")
self.assert_series_equal(result, expected)
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
self._compare_other(s, data, op_name, "abc")
class TestParsing(base.BaseParsingTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestGroupBy(base.BaseGroupbyTests):
pass
| bsd-3-clause |
russel1237/scikit-learn | sklearn/svm/tests/test_svm.py | 70 | 31674 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
tomlof/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 36 | 4076 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples. Note you can
increase this to label more than 30 by changing `max_iterations`. Labeling
more than 30 can be useful to get a sense for the speed of convergence of
this active learning technique.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
max_iterations = 5
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(max_iterations):
if len(unlabeled_indices) == 0:
print("No unlabeled items left to label.")
break
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print("Iteration %i %s" % (i, 70 * "_"))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select up to 5 digit examples that the classifier is most uncertain about
uncertainty_index = np.argsort(pred_entropies)[::-1]
uncertainty_index = uncertainty_index[
np.in1d(uncertainty_index, unlabeled_indices)][:5]
# keep track of indices that we get labels for
delete_indices = np.array([])
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" %
((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title("predict: %i\ntrue: %i" % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += len(uncertainty_index)
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
joshua-cogliati-inl/moose | modules/tensor_mechanics/doc/tests/beam_cosserat.py | 13 | 4653 | #!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(x):
ee = 1.2
nu = 0.3
t = 0.0002
ll = 10
z = 0.5
c = 0.5
gg = ee / 2 / (1+nu)
beta = 3 * t * (1 - nu * nu) / 4 / c / c / c / ee
dd = - beta * nu / (1 - nu)
delta = beta / 3
gamma = -3 * t / 4 / c / gg
alpha = dd / 3 + t / 4 / c / c / c / gg
ux = beta * x * z * (2 * ll - x) + alpha * pow(z, 3)
uz = delta * x * x * (x - 3 * ll) + gamma * x + dd * z * z * (ll - x)
return (ux, uz)
def expected2(x):
ee = 1.2
nu = 0.3
aa = 1.11E-2
ll = 10
z = 0.5
c = 0.5
y = 0
gg = ee / 2 / (1+nu)
dd = -nu * aa
ux = aa * x * z
uy = dd * z * y
uz = -0.5 * aa * x * x + 0.5 * dd * (z * z - y * y)
return (ux, uy, uz)
def expected_slippery(x):
ee = 1.2
nu = 0.3
ll = 10
h = 0.5
s = -2E-4
gg = ee / 2 / (1 + nu)
bb = ee * h * h / 12 / (1 - nu * nu)
thy = 0.5 * s * x * (x - 2 * ll) / bb
uz = 2 * s * x / gg + 2 * s * (1 - nu * nu) * x * x * (3 * ll - x) / ee / h / h
return (thy, uz)
def expected_slippery_h(h):
ee = 1.2
nu = 0.3
ll = 10
x = 10
s = -2E-4
gg = ee / 2 / (1 + nu)
bb = ee * h * h / 12 / (1 - nu * nu)
thy = 0.5 * s * x * (x - 2 * ll) / bb
uz = 2 * s * x / gg + 2 * s * (1 - nu * nu) * x * x * (3 * ll - x) / ee / h / h
return (thy, uz)
def solid_bar():
f = open("../../tests/static_deformations/gold/beam_cosserat_01_soln_0001.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:12]]
f.close()
return data
def solid_bar2():
f = open("../../tests/static_deformations/gold/beam_cosserat_02_apply_stress_soln_0001.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:12]]
f.close()
return data
def solid_bar_slippery():
f = open("../../tests/static_deformations/gold/beam_cosserat_01_slippery_soln_0001.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:12]]
f.close()
return data
def solid_bar_slippery_h():
# these data were generated by hand using beam_cosserat_01_slippery.i with different values of layer_thickness (and nx=800)
data = [(0.1, -60.3), (0.2, -15.1), (0.3, -6.74), (0.4, -3.8), (0.5, -2.4), (0.9, -0.76)]
return data
xpoints = np.arange(0, 10.05, 0.1)
hpoints = np.arange(0.09, 1, 0.01)
moosex = [i for i in range(11)]
moose = solid_bar()
moose2 = solid_bar2()
moose_slippery = solid_bar_slippery()
mooseh = [0.1, 0.2, 0.3, 0.4, 0.5, 0.9]
moose_slippery_h = solid_bar_slippery_h()
plt.figure()
plt.plot(xpoints, expected(xpoints)[0], 'k-', linewidth = 1.0, label = 'expected u_x')
plt.plot(xpoints, expected(xpoints)[1], 'r-', linewidth = 1.0, label = 'expected u_z')
plt.plot(moosex, [d[4] for d in moose], 'ks', markersize = 10.0, label = 'MOOSE disp_x')
plt.plot(moosex, [d[5] for d in moose], 'r^', markersize = 10.0, label = 'MOOSE disp_z')
plt.legend(loc = 'lower left')
plt.xlabel("x (m)")
plt.ylabel("displacement (m)")
plt.title("Beam deformation")
#plt.savefig("cosserat_beam_disp.pdf")
plt.figure()
plt.plot(xpoints, expected2(xpoints)[0], 'k-', linewidth = 1.0, label = 'expected u_x')
plt.plot(xpoints, expected2(xpoints)[2], 'r-', linewidth = 1.0, label = 'expected u_z')
plt.plot(moosex, [d[9] for d in moose2], 'ks', markersize = 10.0, label = 'MOOSE disp_x')
plt.plot(moosex, [d[11] for d in moose2], 'r^', markersize = 10.0, label = 'MOOSE disp_z')
plt.legend(loc = 'lower left')
plt.xlabel("x (m)")
plt.ylabel("displacement (m)")
plt.title("Beam deformation")
#plt.savefig("cosserat_beam_disp_2.pdf")
plt.figure()
plt.plot(xpoints, expected_slippery(xpoints)[0], 'k-', linewidth = 1.0, label = 'expected th_y')
plt.plot(xpoints, expected_slippery(xpoints)[1], 'r-', linewidth = 1.0, label = 'expected u_z')
plt.plot(moosex, [d[11] for d in moose_slippery], 'ks', markersize = 10.0, label = 'MOOSE wc_y')
plt.plot(moosex, [d[5] for d in moose_slippery], 'r^', markersize = 10.0, label = 'MOOSE disp_z')
plt.legend(loc = 'lower left')
plt.xlabel("x (m)")
plt.ylabel("disp (m) and rot")
plt.title("Slippery beam deformation")
#plt.savefig("cosserat_beam_disp_slippery.pdf")
plt.figure()
plt.plot(hpoints, expected_slippery_h(hpoints)[1], 'k-', linewidth = 1.0, label = 'expected')
plt.plot(mooseh, [d[1] for d in moose_slippery_h], 'ks', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("h (m)")
plt.ylabel("deflection (m)")
plt.title("End-point deflection in slippery Cosserat bar")
plt.savefig("cosserat_beam_disp_slippery_h.pdf")
sys.exit(0)
| lgpl-2.1 |
turbomanage/training-data-analyst | blogs/goes16/maria/hurricanes/goes_to_jpeg.py | 2 | 6966 | #!/usr/bin/env python
"""
Copyright Google Inc. 2017
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
GOES_PUBLIC_BUCKET='gcp-public-data-goes-16'
def list_gcs(bucket, gcs_prefix, gcs_patterns):
import google.cloud.storage as gcs
bucket = gcs.Client().get_bucket(bucket)
blobs = bucket.list_blobs(prefix=gcs_prefix, delimiter='/')
result = []
if gcs_patterns == None or len(gcs_patterns) == 0:
for b in blobs:
result.append(b)
else:
for b in blobs:
match = True
for pattern in gcs_patterns:
if not pattern in b.path:
match = False
if match:
result.append(b)
return result
def copy_fromgcs(bucket, objectId, destdir):
import os.path
import logging
import google.cloud.storage as gcs
bucket = gcs.Client().get_bucket(bucket)
blob = bucket.blob(objectId)
basename = os.path.basename(objectId)
logging.info('Downloading {}'.format(basename))
dest = os.path.join(destdir, basename)
blob.download_to_filename(dest)
return dest
def copy_togcs(localfile, bucket_name, blob_name):
import logging
import google.cloud.storage as gcs
bucket = gcs.Client().get_bucket(bucket_name)
blob = bucket.blob(blob_name)
blob.upload_from_filename(localfile)
logging.info('{} uploaded to gs://{}/{}'.format(localfile,
bucket_name, blob_name))
return blob
def crop_image(nc, data, clat, clon):
import logging
import numpy as np
from pyproj import Proj
import pyresample as pr
# output grid centered on clat, clon in equal-lat-lon
lats = np.arange(clat-10,clat+10,0.01) # approx 1km resolution, 2000km extent
lons = np.arange(clon-10,clon+10,0.01) # approx 1km resolution, 2000km extent
lons, lats = np.meshgrid(lons, lats)
new_grid = pr.geometry.GridDefinition(lons=lons, lats=lats)
# Subsatellite_Longitude is where the GEO satellite is
lon_0 = nc.variables['nominal_satellite_subpoint_lon'][0]
ht_0 = nc.variables['nominal_satellite_height'][0] * 1000 # meters
x = nc.variables['x'][:] * ht_0 #/ 1000.0
y = nc.variables['y'][:] * ht_0 #/ 1000.0
nx = len(x)
ny = len(y)
max_x = x.max(); min_x = x.min(); max_y = y.max(); min_y = y.min()
half_x = (max_x - min_x) / nx / 2.
half_y = (max_y - min_y) / ny / 2.
extents = (min_x - half_x, min_y - half_y, max_x + half_x, max_y + half_y)
old_grid = pr.geometry.AreaDefinition('geos','goes_conus','geos',
{'proj':'geos', 'h':str(ht_0), 'lon_0':str(lon_0) ,'a':'6378169.0', 'b':'6356584.0'},
nx, ny, extents)
# now do remapping
logging.info('Remapping from {}'.format(old_grid))
return pr.kd_tree.resample_nearest(old_grid, data, new_grid, radius_of_influence=50000)
def plot_image(ncfilename, outfile, clat, clon):
import matplotlib, logging
matplotlib.use('Agg') # headless display
import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
with Dataset(ncfilename, 'r') as nc:
rad = nc.variables['Rad'][:]
# See http://www.goes-r.gov/products/ATBDs/baseline/Imagery_v2.0_no_color.pdf
ref = (rad * np.pi * 0.3) / 663.274497
ref = np.minimum( np.maximum(ref, 0.0), 1.0 )
# crop to area of interest
ref = crop_image(nc, ref, clat, clon)
# do gamma correction to stretch the values
ref = np.sqrt(ref)
# plotting to jpg file
fig = plt.figure()
plt.imsave(outfile, ref, vmin=0.0, vmax=1.0, cmap='gist_ncar_r') # or 'Greys_r' without color
plt.close('all')
logging.info('Created {}'.format(outfile))
return outfile
return None
def get_objectId_at(dt, product='ABI-L1b-RadF', channel='C14'):
import os, logging
# get first 11-micron band (C14) at this hour
# See: https://www.goes-r.gov/education/ABI-bands-quick-info.html
logging.info('Looking for data collected on {}'.format(dt))
dayno = dt.timetuple().tm_yday
gcs_prefix = '{}/{}/{:03d}/{:02d}/'.format(product, dt.year, dayno, dt.hour)
gcs_patterns = [channel,
's{}{:03d}{:02d}'.format(dt.year, dayno, dt.hour)]
blobs = list_gcs(GOES_PUBLIC_BUCKET, gcs_prefix, gcs_patterns)
if len(blobs) > 0:
objectId = blobs[0].path.replace('%2F','/').replace('/b/{}/o/'.format(GOES_PUBLIC_BUCKET),'')
logging.info('Found {} for {}'.format(objectId, dt))
return objectId
else:
logging.error('No matching files found for gs://{}/{}* containing {}'.format(GOES_PUBLIC_BUCKET, gcs_prefix, gcs_patterns))
return None
def parse_timestamp(timestamp):
from datetime import datetime
dt = datetime.strptime(timestamp[:19], '%Y-%m-%d %H:%M:%S')
return dt
def parse_line(line):
fields = line.split(',')
return parse_timestamp(fields[6]), float(fields[8]), float(fields[9])
def goes_to_jpeg(objectId, lat, lon, outbucket, outfilename):
import os, shutil, tempfile, subprocess, logging
import os.path
# if get_objectId_at fails, it returns None
if objectId == None:
logging.error('Skipping GOES object creation since no GCS file specified')
return
tmpdir = tempfile.mkdtemp()
local_file = copy_fromgcs('gcp-public-data-goes-16', objectId, tmpdir)
logging.info('Creating image from {} near {},{}'.format(os.path.basename(local_file), lat, lon))
# create image in temporary dir, then move over
jpgfile = os.path.join(tmpdir, os.path.basename(outfilename))
jpgfile = plot_image(local_file, jpgfile, lat, lon)
logging.info('Created {} from {}'.format(os.path.basename(jpgfile), os.path.basename(local_file)))
# move over
if outbucket != None:
copy_togcs(jpgfile, outbucket, outfilename)
outfilename = 'gs://{}/{}'.format(outbucket, outfilename)
else:
subprocess.check_call(['mv', jpgfile, outfilename])
# cleanup
shutil.rmtree(tmpdir)
logging.info('Created {} from {}'.format(outfilename, os.path.basename(local_file)))
return outfilename
def only_infrared(message):
import json, logging
try:
# message is a string in json format, so we need to parse it as json
#logging.debug(message)
result = json.loads(message)
# e.g. ABI-L2-CMIPF/2017/306/21/OR_ABI-L2-CMIPF-M4C01_G16_s20173062105222_e20173062110023_c20173062110102.nc
if 'C14_G16' in result['name']:
yield result['name'] #filename
except:
import sys
logging.warn(sys.exc_info()[0])
pass
| apache-2.0 |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/tseries/tests/test_tslib.py | 2 | 34231 | import nose
from distutils.version import LooseVersion
import numpy as np
from pandas import tslib
import pandas._period as period
import datetime
from pandas.core.api import Timestamp, Series, Timedelta, Period
from pandas.tslib import get_timezone
from pandas._period import period_asfreq, period_ordinal
from pandas.tseries.index import date_range
from pandas.tseries.frequencies import get_freq
import pandas.tseries.offsets as offsets
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
class TestTimestamp(tm.TestCase):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime.datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
self.assertEqual(calendar.timegm(base_dt.timetuple()) * 1000000000, base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime.datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime.datetime(2014, 7, 1, 9, 0, 0, 8), base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'), base_expected + 5)]
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
import pytz
import dateutil
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0),
('Asia/Tokyo', 9), ('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3), (dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# re-creation shouldn't affect to internal value
result = Timestamp(result)
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date, tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should preserve tz
result = Timestamp(result)
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
self.assertEqual(result.value, expected_utc)
self.assertEqual(tslib.pydt_to_i8(result), expected_utc)
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime.datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
self.assertEqual(calendar.timegm(base_dt.timetuple()) * 1000000000, base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00', base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
import pytz
import dateutil
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0),
('Asia/Tokyo', 9), ('US/Eastern', -4),
('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3), (dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# re-creation shouldn't affect to internal value
result = Timestamp(result)
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should preserve tz
result = Timestamp(result)
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
self.assertEqual(result.value, expected_utc)
self.assertEqual(tslib.pydt_to_i8(result), expected_utc)
# This should be 2013-11-01 05:00 in UTC -> converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
self.assertEqual(result.value, Timestamp('2013-11-01 05:00').value)
expected_repr = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')"
self.assertEqual(repr(result), expected_repr)
self.assertEqual(result, eval(repr(result)))
# This should be 2013-11-01 05:00 in UTC -> converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
self.assertEqual(result.value, Timestamp('2013-11-01 05:00').value)
expected_repr = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
self.assertEqual(repr(result), expected_repr)
self.assertEqual(result, eval(repr(result)))
def test_constructor_invalid(self):
with tm.assertRaisesRegexp(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assertRaisesRegexp(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_conversion(self):
# GH 9255
ts = Timestamp('2000-01-01')
result = ts.to_pydatetime()
expected = datetime.datetime(2000, 1, 1)
self.assertEqual(result, expected)
self.assertEqual(type(result), type(expected))
result = ts.to_datetime64()
expected = np.datetime64(ts.value, 'ns')
self.assertEqual(result, expected)
self.assertEqual(type(result), type(expected))
self.assertEqual(result.dtype, expected.dtype)
def test_repr(self):
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
dates = ['2014-03-07', '2014-01-01 09:00', '2014-01-01 00:00:00.000000001']
# dateutil zone change (only matters for repr)
import dateutil
if dateutil.__version__ >= LooseVersion('2.3') and dateutil.__version__ <= LooseVersion('2.4'):
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']
else:
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/America/Los_Angeles']
freqs = ['D', 'M', 'S', 'N']
for date in dates:
for tz in timezones:
for freq in freqs:
# avoid to match with timezone name
freq_repr = "'{0}'".format(freq)
if tz.startswith('dateutil'):
tz_repr = tz.replace('dateutil', '')
else:
tz_repr = tz
date_only = Timestamp(date)
self.assertIn(date, repr(date_only))
self.assertNotIn(tz_repr, repr(date_only))
self.assertNotIn(freq_repr, repr(date_only))
self.assertEqual(date_only, eval(repr(date_only)))
date_tz = Timestamp(date, tz=tz)
self.assertIn(date, repr(date_tz))
self.assertIn(tz_repr, repr(date_tz))
self.assertNotIn(freq_repr, repr(date_tz))
self.assertEqual(date_tz, eval(repr(date_tz)))
date_freq = Timestamp(date, offset=freq)
self.assertIn(date, repr(date_freq))
self.assertNotIn(tz_repr, repr(date_freq))
self.assertIn(freq_repr, repr(date_freq))
self.assertEqual(date_freq, eval(repr(date_freq)))
date_tz_freq = Timestamp(date, tz=tz, offset=freq)
self.assertIn(date, repr(date_tz_freq))
self.assertIn(tz_repr, repr(date_tz_freq))
self.assertIn(freq_repr, repr(date_tz_freq))
self.assertEqual(date_tz_freq, eval(repr(date_tz_freq)))
# this can cause the tz field to be populated, but it's redundant to information in the datestring
tm._skip_if_no_pytz()
import pytz
date_with_utc_offset = Timestamp('2014-03-13 00:00:00-0400', tz=None)
self.assertIn('2014-03-13 00:00:00-0400', repr(date_with_utc_offset))
self.assertNotIn('tzoffset', repr(date_with_utc_offset))
self.assertIn('pytz.FixedOffset(-240)', repr(date_with_utc_offset))
expr = repr(date_with_utc_offset).replace("'pytz.FixedOffset(-240)'",
'pytz.FixedOffset(-240)')
self.assertEqual(date_with_utc_offset, eval(expr))
def test_bounds_with_different_units(self):
out_of_bounds_dates = (
'1677-09-21',
'2262-04-12',
)
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
self.assertRaises(
ValueError,
Timestamp,
np.datetime64(date_string, dtype='M8[%s]' % unit)
)
in_bounds_dates = (
'1677-09-23',
'2262-04-11',
)
for date_string in in_bounds_dates:
for unit in time_units:
Timestamp(
np.datetime64(date_string, dtype='M8[%s]' % unit)
)
def test_tz(self):
t = '2014-02-01 09:00'
ts = Timestamp(t)
local = ts.tz_localize('Asia/Tokyo')
self.assertEqual(local.hour, 9)
self.assertEqual(local, Timestamp(t, tz='Asia/Tokyo'))
conv = local.tz_convert('US/Eastern')
self.assertEqual(conv,
Timestamp('2014-01-31 19:00', tz='US/Eastern'))
self.assertEqual(conv.hour, 19)
# preserves nanosecond
ts = Timestamp(t) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
self.assertEqual(local.hour, 9)
self.assertEqual(local.nanosecond, 5)
conv = local.tz_convert('US/Eastern')
self.assertEqual(conv.nanosecond, 5)
self.assertEqual(conv.hour, 19)
def test_tz_localize_ambiguous(self):
ts = Timestamp('2014-11-02 01:00')
ts_dst = ts.tz_localize('US/Eastern', ambiguous=True)
ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False)
rng = date_range('2014-11-02', periods=3, freq='H', tz='US/Eastern')
self.assertEqual(rng[1], ts_dst)
self.assertEqual(rng[2], ts_no_dst)
self.assertRaises(ValueError, ts.tz_localize, 'US/Eastern', ambiguous='infer')
# GH 8025
with tm.assertRaisesRegexp(TypeError, 'Cannot localize tz-aware Timestamp, use '
'tz_convert for conversions'):
Timestamp('2011-01-01' ,tz='US/Eastern').tz_localize('Asia/Tokyo')
with tm.assertRaisesRegexp(TypeError, 'Cannot convert tz-naive Timestamp, use '
'tz_localize to localize'):
Timestamp('2011-01-01').tz_convert('Asia/Tokyo')
def test_tz_localize_roundtrip(self):
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
for t in ['2014-02-01 09:00', '2014-07-08 09:00', '2014-11-01 17:00',
'2014-11-05 00:00']:
ts = Timestamp(t)
localized = ts.tz_localize(tz)
self.assertEqual(localized, Timestamp(t, tz=tz))
with tm.assertRaises(TypeError):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
self.assertEqual(reset, ts)
self.assertTrue(reset.tzinfo is None)
def test_tz_convert_roundtrip(self):
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
for t in ['2014-02-01 09:00', '2014-07-08 09:00', '2014-11-01 17:00',
'2014-11-05 00:00']:
ts = Timestamp(t, tz='UTC')
converted = ts.tz_convert(tz)
reset = converted.tz_convert(None)
self.assertEqual(reset, Timestamp(t))
self.assertTrue(reset.tzinfo is None)
self.assertEqual(reset, converted.tz_convert('UTC').tz_localize(None))
def test_barely_oob_dts(self):
one_us = np.timedelta64(1).astype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
# No error for the min/max datetimes
Timestamp(min_ts_us)
Timestamp(max_ts_us)
# One us less than the minimum is an error
self.assertRaises(ValueError, Timestamp, min_ts_us - one_us)
# One us more than the maximum is an error
self.assertRaises(ValueError, Timestamp, max_ts_us + one_us)
def test_utc_z_designator(self):
self.assertEqual(get_timezone(Timestamp('2014-11-02 01:00Z').tzinfo), 'UTC')
def test_now(self):
# #9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily small)
delta = Timedelta(seconds=1)
self.assertTrue(abs(ts_from_method - ts_from_string) < delta)
self.assertTrue(abs(ts_datetime - ts_from_method) < delta)
self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta)
self.assertTrue(abs(ts_from_string_tz.tz_localize(None)
- ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily small)
delta = Timedelta(seconds=1)
self.assertTrue(abs(ts_from_method - ts_from_string) < delta)
self.assertTrue(abs(ts_datetime - ts_from_method) < delta)
self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta)
self.assertTrue(abs(ts_from_string_tz.tz_localize(None)
- ts_from_method_tz.tz_localize(None)) < delta)
class TestDatetimeParsingWrappers(tm.TestCase):
def test_does_not_convert_mixed_integer(self):
bad_date_strings = (
'-50000',
'999',
'123.1234',
'm',
'T'
)
for bad_date_string in bad_date_strings:
self.assertFalse(
tslib._does_string_look_like_datetime(bad_date_string)
)
good_date_strings = (
'2012-01-01',
'01/01/2012',
'Mon Sep 16, 2013',
'01012012',
'0101',
'1-1',
)
for good_date_string in good_date_strings:
self.assertTrue(
tslib._does_string_look_like_datetime(good_date_string)
)
class TestArrayToDatetime(tm.TestCase):
def test_parsing_valid_dates(self):
arr = np.array(['01-01-2013', '01-02-2013'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np.array(
[
'2013-01-01T00:00:00.000000000-0000',
'2013-01-02T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
arr = np.array(['Mon Sep 16 2013', 'Tue Sep 17 2013'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np.array(
[
'2013-09-16T00:00:00.000000000-0000',
'2013-09-17T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_number_looking_strings_not_into_datetime(self):
# #4601
# These strings don't look like datetimes so they shouldn't be
# attempted to be converted
arr = np.array(['-352.737091', '183.575577'], dtype=object)
self.assert_numpy_array_equal(tslib.array_to_datetime(arr), arr)
arr = np.array(['1', '2', '3', '4', '5'], dtype=object)
self.assert_numpy_array_equal(tslib.array_to_datetime(arr), arr)
def test_coercing_dates_outside_of_datetime64_ns_bounds(self):
invalid_dates = [
datetime.date(1000, 1, 1),
datetime.datetime(1000, 1, 1),
'1000-01-01',
'Jan 1, 1000',
np.datetime64('1000-01-01'),
]
for invalid_date in invalid_dates:
self.assertRaises(
ValueError,
tslib.array_to_datetime,
np.array([invalid_date], dtype='object'),
coerce=False,
raise_=True,
)
self.assertTrue(
np.array_equal(
tslib.array_to_datetime(
np.array([invalid_date], dtype='object'), coerce=True
),
np.array([tslib.iNaT], dtype='M8[ns]')
)
)
arr = np.array(['1/1/1000', '1/1/2000'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, coerce=True),
np.array(
[
tslib.iNaT,
'2000-01-01T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_coerce_of_invalid_datetimes(self):
arr = np.array(['01-01-2013', 'not_a_date', '1'], dtype=object)
# Without coercing, the presence of any invalid dates prevents
# any values from being converted
self.assert_numpy_array_equal(tslib.array_to_datetime(arr), arr)
# With coercing, the invalid dates becomes iNaT
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, coerce=True),
np.array(
[
'2013-01-01T00:00:00.000000000-0000',
tslib.iNaT,
tslib.iNaT
],
dtype='M8[ns]'
)
)
def test_parsing_timezone_offsets(self):
# All of these datetime strings with offsets are equivalent
# to the same datetime after the timezone offset is added
dt_strings = [
'01-01-2013 08:00:00+08:00',
'2013-01-01T08:00:00.000000000+0800',
'2012-12-31T16:00:00.000000000-0800',
'12-31-2012 23:00:00-01:00',
]
expected_output = tslib.array_to_datetime(
np.array(['01-01-2013 00:00:00'], dtype=object)
)
for dt_string in dt_strings:
self.assert_numpy_array_equal(
tslib.array_to_datetime(
np.array([dt_string], dtype=object)
),
expected_output
)
class TestTimestampNsOperations(tm.TestCase):
def setUp(self):
self.timestamp = Timestamp(datetime.datetime.utcnow())
def assert_ns_timedelta(self, modified_timestamp, expected_value):
value = self.timestamp.value
modified_value = modified_timestamp.value
self.assertEqual(modified_value - value, expected_value)
def test_timedelta_ns_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'ns'), -123)
def test_timedelta_ns_based_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(1234567898, 'ns'), 1234567898)
def test_timedelta_us_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'), -123000)
def test_timedelta_ms_arithmetic(self):
time = self.timestamp + np.timedelta64(-123, 'ms')
self.assert_ns_timedelta(time, -123000000)
def test_nanosecond_string_parsing(self):
ts = Timestamp('2013-05-01 07:15:45.123456789')
# GH 7878
expected_repr = '2013-05-01 07:15:45.123456789'
expected_value = 1367392545123456789
self.assertEqual(ts.value, expected_value)
self.assertIn(expected_repr, repr(ts))
ts = Timestamp('2013-05-01 07:15:45.123456789+09:00', tz='Asia/Tokyo')
self.assertEqual(ts.value, expected_value - 9 * 3600 * 1000000000)
self.assertIn(expected_repr, repr(ts))
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='UTC')
self.assertEqual(ts.value, expected_value)
self.assertIn(expected_repr, repr(ts))
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='US/Eastern')
self.assertEqual(ts.value, expected_value + 4 * 3600 * 1000000000)
self.assertIn(expected_repr, repr(ts))
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1293840000000000005
t = Timestamp('2011-01-01') + offsets.Nano(5)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 5)
t = Timestamp(t)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 5)
t = Timestamp(np.datetime64('2011-01-01 00:00:00.000000005Z'))
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 5)
expected = 1293840000000000010
t = t + offsets.Nano(5)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 10)
t = Timestamp(t)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 10)
t = Timestamp(np.datetime64('2011-01-01 00:00:00.000000010Z'))
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 10)
def test_nat_arithmetic(self):
# GH 6873
nat = tslib.NaT
t = Timestamp('2014-01-01')
dt = datetime.datetime(2014, 1, 1)
delta = datetime.timedelta(3600)
# Timestamp / datetime
for (left, right) in [(nat, nat), (nat, t), (dt, nat)]:
# NaT + Timestamp-like should raise TypeError
with tm.assertRaises(TypeError):
left + right
with tm.assertRaises(TypeError):
right + left
# NaT - Timestamp-like (or inverse) returns NaT
self.assertTrue((left - right) is tslib.NaT)
self.assertTrue((right - left) is tslib.NaT)
# timedelta-like
# offsets are tested in test_offsets.py
for (left, right) in [(nat, delta)]:
# NaT + timedelta-like returns NaT
self.assertTrue((left + right) is tslib.NaT)
# timedelta-like + NaT should raise TypeError
with tm.assertRaises(TypeError):
right + left
self.assertTrue((left - right) is tslib.NaT)
with tm.assertRaises(TypeError):
right - left
class TestTslib(tm.TestCase):
def test_intraday_conversion_factors(self):
self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('H'), False), 24)
self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('T'), False), 1440)
self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('S'), False), 86400)
self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('L'), False), 86400000)
self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('U'), False), 86400000000)
self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('N'), False), 86400000000000)
self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('T'), False), 60)
self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('S'), False), 3600)
self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('L'), False), 3600000)
self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('U'), False), 3600000000)
self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('N'), False), 3600000000000)
self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('S'), False), 60)
self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('L'), False), 60000)
self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('U'), False), 60000000)
self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('N'), False), 60000000000)
self.assertEqual(period_asfreq(1, get_freq('S'), get_freq('L'), False), 1000)
self.assertEqual(period_asfreq(1, get_freq('S'), get_freq('U'), False), 1000000)
self.assertEqual(period_asfreq(1, get_freq('S'), get_freq('N'), False), 1000000000)
self.assertEqual(period_asfreq(1, get_freq('L'), get_freq('U'), False), 1000)
self.assertEqual(period_asfreq(1, get_freq('L'), get_freq('N'), False), 1000000)
self.assertEqual(period_asfreq(1, get_freq('U'), get_freq('N'), False), 1000)
def test_period_ordinal_start_values(self):
# information for 1.1.1970
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('Y')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('M')))
self.assertEqual(1, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('W')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('D')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('B')))
def test_period_ordinal_week(self):
self.assertEqual(1, period_ordinal(1970, 1, 4, 0, 0, 0, 0, 0, get_freq('W')))
self.assertEqual(2, period_ordinal(1970, 1, 5, 0, 0, 0, 0, 0, get_freq('W')))
self.assertEqual(2284, period_ordinal(2013, 10, 6, 0, 0, 0, 0, 0, get_freq('W')))
self.assertEqual(2285, period_ordinal(2013, 10, 7, 0, 0, 0, 0, 0, get_freq('W')))
def test_period_ordinal_business_day(self):
# Thursday
self.assertEqual(11415, period_ordinal(2013, 10, 3, 0, 0, 0, 0, 0, get_freq('B')))
# Friday
self.assertEqual(11416, period_ordinal(2013, 10, 4, 0, 0, 0, 0, 0, get_freq('B')))
# Saturday
self.assertEqual(11417, period_ordinal(2013, 10, 5, 0, 0, 0, 0, 0, get_freq('B')))
# Sunday
self.assertEqual(11417, period_ordinal(2013, 10, 6, 0, 0, 0, 0, 0, get_freq('B')))
# Monday
self.assertEqual(11417, period_ordinal(2013, 10, 7, 0, 0, 0, 0, 0, get_freq('B')))
# Tuesday
self.assertEqual(11418, period_ordinal(2013, 10, 8, 0, 0, 0, 0, 0, get_freq('B')))
def test_tslib_tz_convert(self):
def compare_utc_to_local(tz_didx, utc_didx):
f = lambda x: tslib.tz_convert_single(x, 'UTC', tz_didx.tz)
result = tslib.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz)
result_single = np.vectorize(f)(tz_didx.asi8)
self.assert_numpy_array_equal(result, result_single)
def compare_local_to_utc(tz_didx, utc_didx):
f = lambda x: tslib.tz_convert_single(x, tz_didx.tz, 'UTC')
result = tslib.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC')
result_single = np.vectorize(f)(utc_didx.asi8)
self.assert_numpy_array_equal(result, result_single)
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'Europe/Moscow']:
# US: 2014-03-09 - 2014-11-11
# MOSCOW: 2014-10-26 / 2014-12-31
tz_didx = date_range('2014-03-01', '2015-01-10', freq='H', tz=tz)
utc_didx = date_range('2014-03-01', '2015-01-10', freq='H')
compare_utc_to_local(tz_didx, utc_didx)
# local tz to UTC can be differ in hourly (or higher) freqs because of DST
compare_local_to_utc(tz_didx, utc_didx)
tz_didx = date_range('2000-01-01', '2020-01-01', freq='D', tz=tz)
utc_didx = date_range('2000-01-01', '2020-01-01', freq='D')
compare_utc_to_local(tz_didx, utc_didx)
compare_local_to_utc(tz_didx, utc_didx)
tz_didx = date_range('2000-01-01', '2100-01-01', freq='A', tz=tz)
utc_didx = date_range('2000-01-01', '2100-01-01', freq='A')
compare_utc_to_local(tz_didx, utc_didx)
compare_local_to_utc(tz_didx, utc_didx)
# Check empty array
result = tslib.tz_convert(np.array([], dtype=np.int64),
tslib.maybe_get_tz('US/Eastern'),
tslib.maybe_get_tz('Asia/Tokyo'))
self.assert_numpy_array_equal(result, np.array([], dtype=np.int64))
class TestTimestampOps(tm.TestCase):
def test_timestamp_and_datetime(self):
self.assertEqual((Timestamp(datetime.datetime(2013, 10, 13)) - datetime.datetime(2013, 10, 12)).days, 1)
self.assertEqual((datetime.datetime(2013, 10, 12) - Timestamp(datetime.datetime(2013, 10, 13))).days, -1)
def test_timestamp_and_series(self):
timestamp_series = Series(date_range('2014-03-17', periods=2, freq='D', tz='US/Eastern'))
first_timestamp = timestamp_series[0]
delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')])
assert_series_equal(timestamp_series - first_timestamp, delta_series)
assert_series_equal(first_timestamp - timestamp_series, -delta_series)
def test_addition_subtraction_types(self):
# Assert on the types resulting from Timestamp +/- various date/time objects
datetime_instance = datetime.datetime(2014, 3, 4)
timedelta_instance = datetime.timedelta(seconds=1)
# build a timestamp with a frequency, since then it supports addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq='D')[0]
self.assertEqual(type(timestamp_instance + 1), Timestamp)
self.assertEqual(type(timestamp_instance - 1), Timestamp)
# Timestamp + datetime not supported, though subtraction is supported and yields timedelta
# more tests in tseries/base/tests/test_base.py
self.assertEqual(type(timestamp_instance - datetime_instance), Timedelta)
self.assertEqual(type(timestamp_instance + timedelta_instance), Timestamp)
self.assertEqual(type(timestamp_instance - timedelta_instance), Timestamp)
# Timestamp +/- datetime64 not supported, so not tested (could possibly assert error raised?)
timedelta64_instance = np.timedelta64(1, 'D')
self.assertEqual(type(timestamp_instance + timedelta64_instance), Timestamp)
self.assertEqual(type(timestamp_instance - timedelta64_instance), Timestamp)
def test_addition_subtraction_preserve_frequency(self):
timestamp_instance = date_range('2014-03-05', periods=1, freq='D')[0]
timedelta_instance = datetime.timedelta(days=1)
original_freq = timestamp_instance.freq
self.assertEqual((timestamp_instance + 1).freq, original_freq)
self.assertEqual((timestamp_instance - 1).freq, original_freq)
self.assertEqual((timestamp_instance + timedelta_instance).freq, original_freq)
self.assertEqual((timestamp_instance - timedelta_instance).freq, original_freq)
timedelta64_instance = np.timedelta64(1, 'D')
self.assertEqual((timestamp_instance + timedelta64_instance).freq, original_freq)
self.assertEqual((timestamp_instance - timedelta64_instance).freq, original_freq)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
[period.D_RESO, period.D_RESO, period.D_RESO, period.D_RESO,
period.H_RESO, period.T_RESO, period.S_RESO, period.MS_RESO, period.US_RESO]):
for tz in [None, 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Eastern']:
idx = date_range(start='2013-04-01', periods=30, freq=freq, tz=tz)
result = period.resolution(idx.asi8, idx.tz)
self.assertEqual(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
djgagne/scikit-learn | sklearn/tests/test_naive_bayes.py | 70 | 17509 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
ThomasMiconi/htmresearch | projects/wavelet_dataAggregation/runDataAggregationExperiment.py | 11 | 21206 | from os.path import isfile, join, exists
import pandas as pd
import numpy as np
from scipy import signal
import numpy.matlib
import csv
import os
import time
os.environ['TZ'] = 'GMT'
time.tzset()
display = True
if display:
import matplotlib.pyplot as plt
plt.close('all')
plt.ion()
def plotWaveletPower(sig, cwtmatr, time_scale, x_range=None, title=''):
"""
Display wavelet transformations along with the original data
:param sig: original sigal
:param cwtmatr: cwt coefficients
:param time_scale: time scales of wavelets
:param x_range: x range of the plot
:param title: title of the plot
"""
if x_range is None:
x_range = range(0, cwtmatr.shape[1])
fig, ax = plt.subplots(nrows=2, ncols=1)
y_time_scale_tick = ['1-sec', '1mins', '5mins', '30mins', '60mins', '2hrs', '4hrs', '12hrs', '1day', '1week']
y_time_scale = [1, 60, 300, 1800, 3600, 7200, 14400, 43200, 86400, 604800]
y_tick = (np.log10(y_time_scale) - np.log10(time_scale[0]) ) / \
(np.log10(time_scale[-1]) - np.log10(time_scale[0])) * (len(time_scale)-1)
good_tick = np.where(np.logical_and(y_tick >= 0, y_tick < len(time_scale)))[0]
y_tick = y_tick[good_tick]
y_time_scale_tick = [y_time_scale_tick[i] for i in good_tick]
ax[0].imshow(np.abs(cwtmatr[:, x_range]), aspect='auto')
ax[0].set_yticks(y_tick)
ax[0].set_yticklabels(y_time_scale_tick)
ax[0].set_xlabel(' Time ')
ax[0].set_title(title)
ax[1].plot(sig[x_range])
ax[1].set_xlabel(' Time ')
ax[1].autoscale(tight=True)
plt.show()
def calculate_cwt(sampling_interval, sig, figDir='./', fileName='./', display=True):
"""
Calculate continuous wavelet transformation (CWT)
Return variance of the cwt coefficients overtime and its cumulative
distribution
:param sampling_interval: sampling interval of the time series
:param sig: value of the time series
:param figDir: directory of cwt plots
:param fileName: name of the dataset, used for determining figDir
:param display: whether to create the cwt plot
"""
t = np.array(range(len(sig)))*sampling_interval
widths = np.logspace(0, np.log10(len(sig)/20), 50)
T = int(widths[-1])
# continulus wavelet transformation with ricker wavelet
cwtmatr = signal.cwt(sig, signal.ricker, widths)
cwtmatr = cwtmatr[:, 4*T:-4*T]
sig = sig[4*T:-4*T]
t = t[4*T:-4*T]
freq = 1/widths.astype('float') / sampling_interval / 4
time_scale = widths * sampling_interval * 4
# variance of wavelet power
cwt_var = np.var(np.abs(cwtmatr), axis=1)
cwt_var = cwt_var/np.sum(cwt_var)
cum_cwt_var = np.cumsum(cwt_var)
(useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max) = get_local_maxima(cwt_var, time_scale)
if not exists(figDir):
os.makedirs(figDir)
if display:
# plot wavelet coefficients along with the raw signal
plt.close('all')
plotWaveletPower(sig, cwtmatr, time_scale)
plt.savefig(join(figDir, fileName + 'wavelet_transform.pdf'))
fig, axs = plt.subplots(nrows=2, ncols=1)
ax = axs[0]
ax.plot(time_scale, cwt_var, '-o')
ax.axvline(x=86400, color='c')
ax.axvline(x=604800, color='c')
for _ in xrange(len(local_max)):
ax.axvline(x=time_scale[local_max[_]], color='r')
for _ in xrange(len(strong_local_max)):
ax.axvline(x=time_scale[strong_local_max[_]], color='k')
for _ in xrange(len(local_min)):
ax.axvline(x=time_scale[local_min[_]], color='b')
ax.set_xscale('log')
ax.set_xlabel(' Time Scale (sec) ')
ax.set_ylabel(' Variance of Power')
ax.autoscale(tight='True')
ax.set_title(fileName)
ax = axs[1]
ax.plot(time_scale, cum_cwt_var, '-o')
ax.set_xscale('log')
ax.set_xlabel(' Time Scale (sec) ')
ax.set_ylabel(' Accumulated Variance of Power')
ax.autoscale(tight='True')
plt.title(['useTimeOfDay: '+str(useTimeOfDay)+' useDayOfWeek: '+str(useDayOfWeek)])
plt.savefig(join(figDir, fileName + 'aggregation_time_scale.pdf'))
return cum_cwt_var, cwt_var, time_scale
def get_local_maxima(cwt_var, time_scale):
"""
Find local maxima from the wavelet coefficient variance spectrum
A strong maxima is defined as
(1) At least 10% higher than the nearest local minima
(2) Above the baseline value
"""
# peak & valley detection
local_min = (np.diff(np.sign(np.diff(cwt_var))) > 0).nonzero()[0] + 1
local_max = (np.diff(np.sign(np.diff(cwt_var))) < 0).nonzero()[0] + 1
baseline_value = 1.0/len(cwt_var)
dayPeriod = 86400.0
weekPeriod = 604800.0
cwt_var_at_dayPeriod = np.interp(dayPeriod, time_scale, cwt_var)
cwt_var_at_weekPeriod = np.interp(weekPeriod, time_scale, cwt_var)
useTimeOfDay = False
useDayOfWeek = False
strong_local_max = []
for i in xrange(len(local_max)):
left_local_min = np.where(np.less(local_min, local_max[i]))[0]
if len(left_local_min) == 0:
left_local_min = 0
left_local_min_value = cwt_var[0]
else:
left_local_min = local_min[left_local_min[-1]]
left_local_min_value = cwt_var[left_local_min]
right_local_min = np.where(np.greater(local_min, local_max[i]))[0]
if len(right_local_min) == 0:
right_local_min = len(cwt_var)-1
right_local_min_value = cwt_var[-1]
else:
right_local_min = local_min[right_local_min[0]]
right_local_min_value = cwt_var[right_local_min]
local_max_value = cwt_var[local_max[i]]
nearest_local_min_value = np.max(left_local_min_value, right_local_min_value)
if ( (local_max_value - nearest_local_min_value)/nearest_local_min_value > 0.1 and
local_max_value > baseline_value):
strong_local_max.append(local_max[i])
if (time_scale[left_local_min] < dayPeriod and
dayPeriod < time_scale[right_local_min] and
cwt_var_at_dayPeriod > local_max_value/2.0):
# if np.abs(dayPeriod - time_scale[local_max[i]])/dayPeriod < 0.5:
useTimeOfDay = True
if (time_scale[left_local_min] < weekPeriod and
weekPeriod < time_scale[right_local_min] and
cwt_var_at_weekPeriod > local_max_value/2.0):
# if np.abs(weekPeriod - time_scale[local_max[i]])/weekPeriod < 0.5:
useDayOfWeek = True
return useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max
def get_suggested_timescale_and_encoder(timestamp, sig, thresh=0.2):
dt = np.median(np.diff(timestamp))
dt_sec = dt.astype('float32')
# resample the data with homogeneous sampling intervals
(timestamp, sig) = resample_data(timestamp, sig, dt, display=True)
(cum_cwt_var, cwt_var, time_scale) = calculate_cwt(dt_sec, sig)
(useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max) = get_local_maxima(cwt_var, time_scale)
cutoff_time_scale = time_scale[np.where(cum_cwt_var >= thresh)[0][0]]
aggregation_time_scale = cutoff_time_scale/10.0
if aggregation_time_scale < dt_sec*4:
aggregation_time_scale = dt_sec*4
new_sampling_interval = str(int(aggregation_time_scale/4))+'S'
return (new_sampling_interval, useTimeOfDay, useDayOfWeek)
def readCSVfiles(fileName):
"""
Read csv data file, the data file must have two columns
with header "timestamp", and "value"
"""
fileReader = csv.reader(open(fileName, 'r'))
fileReader.next() # skip header line
timestamps = []
values = []
for row in fileReader:
timestamps.append(row[0])
values.append(row[1])
timestamps = np.array(timestamps, dtype='datetime64')
values = np.array(values, dtype='float32')
return (timestamps, values)
def writeCSVfiles(fileName, timestamp, value):
"""
write data to csv file,
the data file will have two columns with header "timestamp", and "value"
"""
fileWriter = csv.writer(open(fileName, 'w'))
fileWriter.writerow(['timestamp', 'value'])
for i in xrange(len(timestamp)):
fileWriter.writerow([timestamp[i].astype('O').strftime("%Y-%m-%d %H:%M:%S"),
value[i]])
def resample_data(timestamp, sig, new_sampling_interval, display=False):
"""
Resample time series data at new sampling interval using linear interpolation
Note: the resampling function is using interpolation, it may not be appropriate for aggregation purpose
:param timestamp: timestamp in numpy datetime64 type
:param sig: value of the time series
:param new_sampling_interval: new sampling interrval
"""
nSampleNew = np.floor((timestamp[-1] - timestamp[0])/new_sampling_interval).astype('int') + 1
timestamp_new = np.empty(nSampleNew, dtype='datetime64[s]')
for sampleI in xrange(nSampleNew):
timestamp_new[sampleI] = timestamp[0] + sampleI * new_sampling_interval
sig_new = np.interp((timestamp_new-timestamp[0]).astype('float32'),
(timestamp-timestamp[0]).astype('float32'), sig)
if display:
plt.figure(3)
plt.plot(timestamp, sig)
plt.plot(timestamp_new, sig_new)
plt.legend(['before resampling', 'after resampling'])
return (timestamp_new, sig_new)
def aggregate_data(thresh_list, dataFile, aggregatedDataPath, waveletDir='./wavelet/', display=False, verbose=0):
"""
Aggregate individual dataset, the aggregated data will be saved at aggregatedDataFile
:param thresh: aggregation threshold
:param dataFile: path of the original datafile
:param aggregatedDataFile: path of the aggregated datafile
:param waveletDir: path of wavelet transformations (for visual inspection)
"""
data_file_dir = dataFile.split('/')
(timestamp, sig) = readCSVfiles(dataFile)
# dt = (timestamp[len(sig)-1] - timestamp[0])/(len(sig)-1)
dt = np.median(np.diff(timestamp))
dt_sec = dt.astype('float32')
# resample the data with homogeneous sampling intervals
(timestamp, sig) = resample_data(timestamp, sig, dt, display=True)
(cum_cwt_var, cwt_var, time_scale) = calculate_cwt(dt_sec, sig,
display=display,
figDir=join(waveletDir, data_file_dir[-2]),
fileName=data_file_dir[-1])
for thresh in thresh_list:
new_data_dir = join(aggregatedDataPath, 'thresh='+str(thresh), data_file_dir[-2])
if not exists(new_data_dir):
os.makedirs(new_data_dir)
new_data_file = join(new_data_dir, data_file_dir[-1])
# determine aggregation time scale
cutoff_time_scale = time_scale[np.where(cum_cwt_var >= thresh)[0][0]]
aggregation_time_scale = cutoff_time_scale/10.0
if aggregation_time_scale < dt_sec*4:
aggregation_time_scale = dt_sec*4
new_sampling_interval = np.timedelta64(int(aggregation_time_scale/4 * 1000), 'ms')
nSampleNew = np.floor((timestamp[-1] - timestamp[0])/new_sampling_interval).astype('int') + 1
timestamp_new = np.empty(nSampleNew, dtype='datetime64[s]')
value_new = np.empty(nSampleNew, dtype='float32')
left_sampleI = 0
new_sampleI = 0
for sampleI in xrange(len(sig)):
if timestamp[sampleI] >= timestamp[0] + new_sampleI * new_sampling_interval:
timestamp_new[new_sampleI] = timestamp[0] + new_sampleI * new_sampling_interval
value_new[new_sampleI] = (np.mean(sig[left_sampleI:sampleI+1]))
left_sampleI = sampleI+1
new_sampleI += 1
writeCSVfiles(new_data_file, timestamp_new, value_new)
if verbose > 0:
print " original length: ", len(sig), "\t file: ", dataFile
print "\t\tthreshold: ", thresh, "\t new length: ", len(value_new)
def aggregate_nab_data(thresh_list, dataPath='data/',
aggregatedDataPath='data_aggregate/',
waveletDir='wavelet/',
verbose=0):
"""
Aggregate all NAB data using the wavelet transformation based algorithm
:param thresh_list: threshold of the aggregation, a number in [0, 1)
:param dataPath: path of the original NAB data
:param aggregatedDataPath: path of the aggregated NAB data
:param waveletDir: path of wavelet transformations (for visual inspection)
"""
if not exists(aggregatedDataPath):
os.makedirs(aggregatedDataPath)
dataDirs = [join(dataPath, f) for f in os.listdir(dataPath) if not isfile(join(dataPath, f))]
for dir in dataDirs:
datafiles = [join(dir, f) for f in os.listdir(dir) if isfile(join(dir, f))]
for i in range(len(datafiles)):
aggregate_data(thresh_list, datafiles[i], aggregatedDataPath, waveletDir, verbose=verbose)
def get_pre_aggregated_anomaly_score(data_path, result_folder, result_folder_pre_aggregate):
"""
This function transforms anomaly scores on the aggregated data file (in result_folder)
to the original sampling rate of the data (in data_path) before aggregation. The new anomaly
score will be saved to result_folder_pre_aggregate
"""
dataDirs = [join(result_folder, f) for f in os.listdir(result_folder) if not isfile(join(result_folder, f))]
for dir in dataDirs:
resultfiles = [join(dir, f) for f in os.listdir(dir) if isfile(join(dir, f))]
for i in range(len(resultfiles)):
result_file_dir = resultfiles[i].split('/')
original_data_file = join(data_path, result_file_dir[-2], result_file_dir[-1][8:])
dat = pd.read_csv(original_data_file, header=0, names=['timestamp', 'value'])
result = pd.read_csv(resultfiles[i], header=0,
names=['timestamp', 'value', 'anomaly_score', 'raw_score', 'label'])
time_stamp_pre_aggregation = pd.to_datetime(dat.timestamp)
time_stamp_after_aggregation = pd.to_datetime(result.timestamp)
binary_anomaly_score_pre_aggregation = np.zeros(shape=(len(dat),))
binary_anomaly_score_after_aggregation = np.zeros(shape=(len(result),))
for j in range(len(result)):
if result.anomaly_score[j] > .5:
binary_anomaly_score_after_aggregation[j] = 1
idx_original = np.argmin(abs(time_stamp_pre_aggregation - time_stamp_after_aggregation[j]))
binary_anomaly_score_pre_aggregation[idx_original] = 1
value_pre_aggregation = dat.value.values
raw_score_pre_aggregation = np.zeros(shape=(len(dat),))
label_pre_aggregation = np.zeros(shape=(len(dat),))
# raw_score_pre_aggregation = np.interp(time_stamp_original, time_stamp_after_aggregation, result.raw_score.values)
result_pre_aggregate = pd.DataFrame(np.transpose(np.array([time_stamp_pre_aggregation,
value_pre_aggregation,
binary_anomaly_score_pre_aggregation,
raw_score_pre_aggregation,
label_pre_aggregation])),
columns=['timestamp', 'value', 'anomaly_score', 'raw_score', 'label'])
result_file_dir_pre_aggregate = join(result_folder_pre_aggregate, result_file_dir[-2])
if not exists(result_file_dir_pre_aggregate):
os.makedirs(result_file_dir_pre_aggregate)
result_file_pre_aggregate = join(result_file_dir_pre_aggregate, result_file_dir[-1])
result_pre_aggregate.to_csv(result_file_pre_aggregate, index=False)
print " write pre-aggregated file to ", result_file_pre_aggregate
# compare anomaly scores before and after aggregations for individual files
# plt.figure(2)
# plt.plot(time_stamp_after_aggregation, binary_anomaly_score_after_aggregation)
# plt.plot(time_stamp_pre_aggregation, binary_anomaly_score_pre_aggregation)
def runTimeVsDataLength(dataPath):
"""
Plot Data Aggregation Algorithm Runtime vs length of the data
"""
dataDirs = [join(dataPath, f) for f in os.listdir(dataPath) if not isfile(join(dataPath, f))]
thresh = 0.2
dataLength = []
runTime = []
for dir in dataDirs:
datafiles = [join(dir, f) for f in os.listdir(dir) if isfile(join(dir, f))]
for i in range(len(datafiles)):
(timestamp, sig) = readCSVfiles(datafiles[i])
dataLength.append(len(sig))
start_time = time.time()
aggregate_data([thresh], datafiles[i], aggregatedDataPath='data_aggregate/', display=False)
end_time = time.time()
print " length: ", len(sig), " file: ", datafiles[i], " Time: ", (end_time - start_time)
runTime.append(end_time - start_time)
plt.figure()
plt.plot(dataLength, runTime, '*')
plt.xlabel(' Dataset Size (# Record)')
plt.ylabel(' Runtime (seconds) ')
plt.savefig('RuntimeVsDatasetSize.pdf')
return (dataLength, runTime)
if __name__ == "__main__":
NABPath = '/Users/ycui/nta/NAB/'
currentPath = os.getcwd()
thresh_list = [0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18, 0.2,
0.22, 0.24, 0.26, 0.28, 0.3, 0.32, 0.34, 0.36, 0.38, 0.40]
# step 1: aggregate NAB data with different threshold
print " aggregating NAB data ..."
aggregate_nab_data(thresh_list, dataPath=NABPath+'data/', verbose=2)
# step 2: run HTM on aggregated NAB data
for thresh in thresh_list:
resultsAggregatePath = currentPath + "/results_aggregate/thresh=" + str(thresh) + "/numenta"
if not os.path.exists(resultsAggregatePath):
os.os.makedirs(resultsAggregatePath)
print " run HTM on aggregated data with threshold " + str(thresh)
os.system("python " + NABPath + "run.py -d numenta --detect --dataDir " + currentPath + "/data_aggregate/thresh=" + str(thresh) + \
"/ --resultsDir "+ currentPath + "/results_aggregate/thresh=" + str(thresh) + " --skipConfirmation")
# step 3: get pre-aggregated anomaly score
for thresh in thresh_list:
preresultAggregatePath = currentPath + "/results_pre_aggregate/thresh=" + str(thresh) + "/numenta"
if not os.path.exists(preresultAggregatePath):
os.os.makedirs(preresultAggregatePath)
get_pre_aggregated_anomaly_score(data_path=NABPath+'data/',
result_folder='results_aggregate/thresh=' + str(thresh) + '/numenta',
result_folder_pre_aggregate='results_pre_aggregate/thresh=' + str(thresh) + '/numenta')
# step 4: run NAB scoring
for thresh in thresh_list:
print " run scoring on aggregated data with threshold " + str(thresh)
os.system("python " + NABPath + "run.py -d numenta --score --skipConfirmation " +
"--thresholdsFile " + NABPath + "config/thresholds.json " +
"--resultsDir " + currentPath + "/results_pre_aggregate/thresh="+str(thresh)+"/")
# step 5: read & compare scores
standard_score = []
data_length_all = []
for thresh in thresh_list:
scorefile = "./results_pre_aggregate/thresh=" + str(thresh) + "/numenta/numenta_standard_scores.csv"
scoredf = pd.read_csv(scorefile, header=0)
scoredf = scoredf.sort('File')
scoredf.index = range(len(scoredf))
standard_score.append(scoredf.Score.values[:-1])
data_length = []
for i in xrange(len(scoredf.File)-1):
datafile = './data_aggregate/thresh=' + str(thresh) + '/' + scoredf.File[i]
dat = pd.read_csv(datafile, header=0, names=['timestamp', 'value'])
data_length.append(len(dat))
data_length_all.append(data_length)
data_length_all = np.array(data_length_all)
standard_score = np.array(standard_score)
short_dat = np.where(data_length_all[0, :] < 1000)[0]
long_dat = np.where(data_length_all[0, :] > 1000)[0]
use_dat = np.array(range(data_length_all.shape[1]))
use_dat = long_dat
# plt.imshow(data_length_all, interpolation='nearest', aspect='auto')
# plot anomaly score vs aggregation threshold
anomaly_score_diff = standard_score[:, long_dat] - numpy.matlib.repmat(standard_score[0, long_dat], len(thresh_list), 1)
shortFileName = []
for i in range(len(scoredf.File.values[:-1])):
file = scoredf.File.values[i]
fileName = file.split('/')[-1]
fileName = fileName[:-4]
shortFileName.append(fileName)
fig=plt.figure()
plt.imshow(anomaly_score_diff, interpolation='nearest', aspect='auto')
ytickLoc = range(len(thresh_list))
plt.yticks(ytickLoc, thresh_list)
plt.xticks(range(len(scoredf.File)-1), shortFileName, rotation='vertical')
plt.subplots_adjust(bottom=0.6)
plt.ylabel(' Threshold')
plt.title(' Anomaly Score Relative to BaseLine')
plt.colorbar()
plt.clim(-2, 2)
plt.savefig('AnomalyScore_Vs_AggregationThreshold_EachFile.pdf')
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(np.array(thresh_list)*100, np.median(standard_score[:, use_dat], 1), '-o')
plt.plot(np.array(thresh_list)*100, np.mean(standard_score[:, use_dat], 1), '-o')
plt.legend(['Median', 'Mean'])
plt.xlabel(' Threshold (%)')
plt.ylabel(' Median Anomaly Score ')
plt.subplot(2, 1, 2)
plt.plot(np.array(thresh_list)*100, np.median(data_length_all[:, use_dat], 1), '-o')
plt.plot(np.array(thresh_list)*100, np.mean(data_length_all[:, use_dat], 1), '-o')
plt.xlabel(' Threshold (%)')
plt.ylabel(' Data Length ')
plt.legend(['Median', 'Mean'])
plt.savefig('AnomalyScore_Vs_AggregationThreshold.pdf')
num_better_anomaly_score = []
for i in xrange(len(thresh_list)-1):
num_better_anomaly_score.append(len(np.where(standard_score[i+1, :] > standard_score[0, :])[0]))
(dataLength, runTime) = runTimeVsDataLength(dataPath=NABPath+'data/')
| agpl-3.0 |
BIDS-collaborative/EDAM | data/LH/oldpredict.py | 1 | 3716 | import pandas as pd
import sklearn
import numpy as np
import warnings
from sklearn.cluster import KMeans
from sklearn.linear_model import LogisticRegression as LR
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.ensemble import GradientBoostingClassifier as GBC
def load_data(fileName, dropFirstColumn = True):
df = pd.read_csv(fileName)
if dropFirstColumn:
df = df.drop(df[[0]], axis = 1)
return df.as_matrix()
def predictLR(X, y):
col_mean = np.nanmean(X,axis=0)
inds = np.where(np.isnan(X))
X[inds]=np.take(col_mean,inds[1])
lr = LR(multi_class = "multinomial", solver = "newton-cg")
X_train, X_test, y_train, y_test = chooseRandom(X, y)
lr.fit(X_train, y_train)
return lr.score(X_test, y_test)
def predictRF(X, y):
col_mean = np.nanmean(X,axis=0)
inds = np.where(np.isnan(X))
X[inds]=np.take(col_mean,inds[1])
RF = RFC(n_estimators = 100)
X_train, X_test, y_train, y_test = chooseRandom(X, y)
RF.fit(X_train, y_train)
return RF.score(X_test, y_test)
def predictGBC(X, y):
col_mean = np.nanmean(X,axis=0)
inds = np.where(np.isnan(X))
X[inds]=np.take(col_mean,inds[1])
gbc = GBC(n_estimators = 100)
X_train, X_test, y_train, y_test = chooseRandom(X, y)
gbc.fit(X_train, y_train)
return gbc.score(X_test, y_test)
def delete_unimportant(X, important):
return np.delete(X, important, 1)
def feature_combination(X, imp, category):
imp_len = len(imp)
cat_len = len(category)
newX = np.empty([X.shape[0], imp_len+cat_len-1])
for i in range(len(category)-1):
# print(category[i])
nextX = X[:,category[i]:category[i+1]]
# print(nextX.shape)
newX[:,i] = np.nansum(nextX, axis=1)
# print(newX.shape)
for i in range(imp_len):
newX[:,i+cat_len-1] = X[:,i]
return newX
def sampleAndAverage(method, method_string, iterations, X, y):
# print("[{}] start score calculation for {} iterations".format(method_string, iterations))
score = 0
for _ in range(iterations):
col_mean = np.nanmean(X,axis=0)
inds = np.where(np.isnan(X))
X[inds]=np.take(col_mean,inds[1])
score += method(X, y)
print("[{}] average score: {}".format(method_string, score/iterations))
def chooseRandom(x, y):
x_train, x_test, y_train, y_test = sklearn.cross_validation.train_test_split(x, y, test_size = 0.5)
return x_train, x_test, y_train, y_test
sample_size = 100
# From ecological literature- maybe feature 15
important = [0, 3, 6, 7, 15, 27, 35, 36, 37, 38, 39, 40, 41, 44]
category = [0, 3, 8, 13, 25, 29, 36, 44, 49]
warnings.filterwarnings('ignore')
X = load_data("pier_full_data.csv")
y = np.ravel(load_data("pier_full_labels.csv", False))
print("RUNNING ITERATION {} FOR AVERAGE SCORE".format(sample_size))
print("--------------PIER DATA SET LR-----------------")
sampleAndAverage(predictLR, "predictLR", sample_size, X, y)
sampleAndAverage(predictLR, "predictLR- important only", sample_size, delete_unimportant(X, important), y)
sampleAndAverage(predictLR, "predictLR- important + avg", sample_size, feature_combination(X, important, category), y)
print("--------------PIER DATA SET RF-----------------")
sampleAndAverage(predictRF, "predictRF", sample_size, X, y)
sampleAndAverage(predictRF, "predictRF- important only", sample_size, delete_unimportant(X, important), y)
sampleAndAverage(predictRF, "predictRF- important + avg", sample_size, feature_combination(X, important, category), y)
print("--------------PIER DATA SET GBC-----------------")
sampleAndAverage(predictGBC, "predictGBC", sample_size, X, y)
sampleAndAverage(predictGBC, "predictGBC- important only", sample_size, delete_unimportant(X, important), y)
sampleAndAverage(predictGBC, "predictGBC- important + avg", sample_size, feature_combination(X, important, category), y)
| bsd-2-clause |
xiaoxiaoyao/PythonApplication1 | PythonApplication1/爬虫练习/chromedriver/readmgr.py | 2 | 1396 | # -*- coding: utf-8 -*-
'''
下载网页上的表格数据,然后存系统,简单点,在readmgr
'''
__author__ = 'lai yao (lake.lai)'
import os,sys,time
from selenium import webdriver
URL ='http://baidu.com/'
downloadDir='/Users/laiyao/Downloads/download/'
os.chdir(downloadDir)
print('当前目录',os.getcwd())
if True:
option = webdriver.ChromeOptions()#自定义设置
prefs = {'profile.default_content_settings.popups': 0,
'download.default_directory': downloadDir}
option.add_experimental_option('prefs', prefs)
option.add_argument('--user-data-dir=/Users/laiyao/Library/Application Support/Google/Chrome')
option.add_argument('--process-per-site') #每个站点使用单独进程
option.add_argument('--lang=zh-CN') #设置语言为简体中文
chromedriver='/usr/local/bin/chromedriver'
driver = webdriver.Chrome(chromedriver,chrome_options=option)
time.sleep(1)
driver.get(URL)
time.sleep(20)
# 数据区,GitHub前需要删掉
url=[]
# 数据区,GitHub前需要删掉
for i in url:
time.sleep(11)
print(i)
driver.get(i)
# 合并
#%%
import glob
import pandas as pd
df = dict()
file_names = glob.glob("*.csv")
for file_name in file_names:
df[file_name] = pd.read_csv(file_name,error_bad_lines=False,encoding='gb2312')
#%%
for i in df:
df[i].to_csv('save.csv',mode='a', header=False, index=False)
| unlicense |
briandrawert/pyurdme | examples/coral_reef/coral.py | 5 | 7211 | #!/usr/bin/env python
import math
import matplotlib.pyplot as plt
import numpy
import pyurdme
class CoralReef(pyurdme.URDMEModel):
""" Model developed by Briggs and Drawert 3/31/2014, based on a
non-spatial model by Briggs and Adam.
"""
def __init__(self, name="coral_reef", D_c=1.0, D_m=1.0, version=1):
pyurdme.URDMEModel.__init__(self, name)
# Species
Coral = pyurdme.Species(name="Coral",diffusion_constant=0.0)
Coral_m = pyurdme.Species(name="Coral_m",diffusion_constant=D_c)
MA = pyurdme.Species(name="MA", diffusion_constant=0.0)
MA_m = pyurdme.Species(name="MA_m", diffusion_constant=D_m)
Turf = pyurdme.Species(name="Turf", diffusion_constant=0.0)
self.add_species([Coral, MA, Coral_m, MA_m, Turf])
# Parameters
phi_c = pyurdme.Parameter(name="phi_c", expression=0.0011) #1/year
phi_m = pyurdme.Parameter(name="phi_m", expression=0.001) #1/year
g_tc = pyurdme.Parameter(name="g_tc", expression=0.1) #1/year
g_tm = pyurdme.Parameter(name="g_tm", expression=0.2) #1/year
Gamma = pyurdme.Parameter(name="Gamma", expression=0.05)
dc = pyurdme.Parameter(name="dc", expression=0.05) #1/year
dm = pyurdme.Parameter(name="dm", expression=1.0) #1/year
#dm = pyurdme.Parameter(name="dm", expression=0.2) #1/year
phi_g = pyurdme.Parameter(name="psi_g", expression=0.0)
# Death rate of mobile propgules. Combine with diffusion to determine spread.
mu_c = pyurdme.Parameter(name="mu_c", expression=1.0) #1/year
mu_m = pyurdme.Parameter(name="mu_m", expression=1.0) #1/year
# mobile propogules destroyed by estabilished
alpha_c = pyurdme.Parameter(name="alpha_c", expression=0.1) #1/year
alpha_m = pyurdme.Parameter(name="alpha_m", expression=0.5) #1/year
# Production of mobile propogules
R_c = pyurdme.Parameter(name="R_c", expression=1.0) #1/year
R_m = pyurdme.Parameter(name="R_m", expression=1.0) #1/year
self.add_parameter([phi_c, phi_m, g_tc, g_tm, Gamma, dc, dm, phi_g, mu_c, mu_m, alpha_c, alpha_m, R_c, R_m])
# Reactions:
# C -> T : dc
self.add_reaction(pyurdme.Reaction(name="R3", reactants={Coral:1}, products={Turf:1}, rate=dc))
# MA -> T : dm
self.add_reaction(pyurdme.Reaction(name="R4", reactants={MA:1}, products={Turf:1}, rate=dm))
# T + C_m -> C : phi_c
self.add_reaction(pyurdme.Reaction(name="R5", reactants={Turf:1, Coral_m:1}, products={Coral:1}, rate=phi_c))
# T + MA_m -> MA : phi_m
self.add_reaction(pyurdme.Reaction(name="R6", reactants={Turf:1, MA_m:1}, products={MA:1}, rate=phi_m))
# C + T -> 2C : g_tc * exp(-1.0 * psi_g * MA / 100)
self.add_reaction(pyurdme.Reaction(name="R7", reactants={Turf:1, Coral:1}, products={Coral:2}, propensity_function="g_tc*Turf*Coral*exp(-1.0 * psi_g * MA / Space_per_voxel)/vol"))
# MA + T -> 2MA : g_tm
self.add_reaction(pyurdme.Reaction(name="R8", reactants={Turf:1, MA:1}, products={MA:2}, rate=g_tm))
# C + MA -> 2MA : Gamma * g_tm
self.add_reaction(pyurdme.Reaction(name="R9", reactants={Coral:1, MA:1}, products={MA:2}, propensity_function="g_tm*Gamma*Coral*MA/vol"))
# C -> C + C_m : R_c
self.add_reaction(pyurdme.Reaction(name="R10", reactants={Coral:1}, products={Coral:1, Coral_m:1}, rate=R_c))
# MA -> MA + MA_m : R_m
self.add_reaction(pyurdme.Reaction(name="R11", reactants={MA:1}, products={MA:1, MA_m:1}, rate=R_m))
# C_m -> 0 : mu_c
self.add_reaction(pyurdme.Reaction(name="R12", reactants={Coral_m:1}, products={}, rate=mu_c))
# MA_m -> 0 : mu_m
self.add_reaction(pyurdme.Reaction(name="R13", reactants={MA_m:1}, products={}, rate=mu_m))
# MA + C_m -> MA : alpha_c
self.add_reaction(pyurdme.Reaction(name="R14", reactants={MA:1, Coral_m:1}, products={MA:1}, rate=alpha_c))
# C + MA_m -> C : alpha_m
self.add_reaction(pyurdme.Reaction(name="R15", reactants={Coral:1, MA_m:1}, products={Coral:1}, rate=alpha_m))
# A unit square
# each grid point is 10cm x 10cm, domain is 5m x 5m
self.mesh = pyurdme.URDMEMesh.generate_square_mesh(L=5, nx=50, ny=50, periodic=True)
Space_per_voxel = 10
self.add_parameter(pyurdme.Parameter(name="Space_per_voxel", expression=Space_per_voxel)) #1/year
if True:
# Start with two colonys
self.set_initial_condition_distribute_uniformly({Turf:Space_per_voxel})
self.set_initial_condition_place_near({Coral:Space_per_voxel}, point=[1,1])
self.set_initial_condition_place_near({Turf:0}, point=[1,1])
self.set_initial_condition_place_near({MA:Space_per_voxel}, point=[4,4])
self.set_initial_condition_place_near({Turf:0}, point=[4,4])
else:
# Every voxel is the same
self.set_initial_condition_distribute_uniformly({Turf:0})
self.set_initial_condition_distribute_uniformly({Coral:Space_per_voxel-1})
self.set_initial_condition_distribute_uniformly({MA:1})
for vndx in range(self.u0.shape[1]):
tot = 0
for sndx, sname in enumerate(self.listOfSpecies):
tot += self.u0[sndx][vndx]
if tot > 100:
for sndx, sname in enumerate(self.listOfSpecies):
print "u0[{0}][{1}] = {2}".format(sname, vndx, self.u0[sndx][vndx])
#self.timespan(numpy.linspace(0,500,501)) #500 years
#self.timespan(numpy.linspace(0,5,72)) #5 years, by months
self.timespan(numpy.linspace(0,11,66)) #10 years, by 2 months
if __name__ == "__main__":
model = CoralReef()
result = model.run(report_level=1)
print "Writing PavaView compatable output to 'output_coral' directory"
result.export_to_vtk(species='Coral',folder_name="output_coral")
x_vals = model.mesh.coordinates()[:, 0]
y_vals = model.mesh.coordinates()[:, 1]
C_vals = result.get_species("Coral")
MA_vals = result.get_species("MA")
Turf_vals = result.get_species("Turf")
num_vox = len(x_vals)
plt.figure(figsize=(12,6), dpi=100)
tndx = -1 #show end timepoint
tval = model.tspan[tndx]
plt.subplot(1,3,1)
heatmap, xedges, yedges = numpy.histogram2d(x=x_vals, y=y_vals, weights=C_vals[tndx,:], bins=int(math.sqrt(num_vox)))
plt.imshow(heatmap)
cb = plt.colorbar()
cb.set_label('Coral population')
plt.title('t={0}'.format(tval))
plt.subplot(1,3,2)
heatmap, xedges, yedges = numpy.histogram2d(x=x_vals, y=y_vals, weights=MA_vals[tndx,:], bins=int(math.sqrt(num_vox)))
plt.imshow(heatmap)
cb = plt.colorbar()
cb.set_label('MA population')
plt.title('t={0}'.format(tval))
plt.subplot(1,3,3)
heatmap, xedges, yedges = numpy.histogram2d(x=x_vals, y=y_vals, weights=Turf_vals[tndx,:], bins=int(math.sqrt(num_vox)))
plt.imshow(heatmap)
cb = plt.colorbar()
cb.set_label('Free Turf')
plt.title('t={0}'.format(tval))
plt.show()
| gpl-3.0 |
fdeheeger/mpld3 | examples/mpld3_logo.py | 19 | 3751 | """
mpld3 Logo Idea
===============
This example shows how mpld3 can be used to generate relatively intricate
vector graphics in the browser. This is an adaptation of a logo proposal by
github user debjan, in turn based on both the matplotlib and D3js logos.
"""
# Author: Jake VanderPlas
import matplotlib.pyplot as plt
from matplotlib import image, patches, colors
from matplotlib.colors import colorConverter
import numpy as np
import mpld3
imsize = np.array([319, 217])
center = [108.5, 108.5]
max_radius = 108.5
radii = np.linspace(16, max_radius, 5)
angles = np.arange(0, 360, 45)
fig = plt.figure(figsize=imsize / 50.)
ax = fig.add_axes([0, 0, 1, 1], frameon=False, xticks=[], yticks=[])
# Create a clip path for the elements
clip_path = patches.Rectangle((0, 0), imsize[0], imsize[1],
transform=ax.transData)
# Create the background gradient
x = np.array([0, 104, 196, 300])
y = np.linspace(150, 450, 86)[:, None]
c = np.cos(-np.pi / 4)
s = np.sin(-np.pi / 4)
X, Y = (c * x - s * y) - 116, (s * x + c * y)
C = np.arange(255).reshape((3, 85)).T
C = C[::-1, :]
cmap = colors.LinearSegmentedColormap.from_list("mpld3",
[[0.97, 0.6, 0.29],
[0.97, 0.59, 0.27],
[0.97, 0.58, 0.25],
[0.95, 0.44, 0.34],
[0.92, 0.51, 0.29],
[0.68, 0.21, 0.20]])
mesh = ax.pcolormesh(X, Y, C, cmap=cmap, shading='gourand', zorder=0)
mesh.set_clip_path(clip_path)
# cut-off the background to form the "D" and "3" using white patches
# (this could also be done with a clip path)
kwargs = dict(fc='white', ec='none', zorder=1)
ax.add_patch(patches.Rectangle([0, 0], center[0], imsize[1], **kwargs))
ax.add_patch(patches.Circle(center, radii[2], **kwargs))
ax.add_patch(patches.Wedge(center, 127, -90, 90, width=18.5, **kwargs))
ax.add_patch(patches.Circle((252, 66), 18, **kwargs))
ax.add_patch(patches.Rectangle([216, 48], 36, 36, **kwargs))
ax.add_patch(patches.Wedge((252, 66), 101, -90, 40.1, width=35, **kwargs))
ax.add_patch(patches.Circle((252, 151), 18, **kwargs))
ax.add_patch(patches.Rectangle([216, 133], 36, 36, **kwargs))
ax.add_patch(patches.Wedge((252, 151), 101, -40.1, 90, width=35, **kwargs))
ax.add_patch(patches.Rectangle([-200, -200], 719, 200, **kwargs))
ax.add_patch(patches.Rectangle([-200, -200], 200, 617, **kwargs))
ax.add_patch(patches.Rectangle([-200, imsize[1]], 719, 200, **kwargs))
ax.add_patch(patches.Rectangle([imsize[0], -200], 200, 617, **kwargs))
# plot circles and lines
for radius in radii:
ax.add_patch(patches.Circle(center, radius, lw=0.5,
ec='gray', fc='none', zorder=2))
for angle in angles:
dx, dy = np.sin(np.radians(angle)), np.cos(np.radians(angle))
ax.plot([max_radius * (1 - dx), max_radius * (1 + dx)],
[max_radius * (1 - dy), max_radius * (1 + dy)],
'-', color='gray', lw=0.5, zorder=2)
# plot wedges within the graph
wedges = [(98, 231, 258, '#FF6600'),
(85, 170, 205, '#FFC500'),
(60, 80, 103, '#7DFF78'),
(96, 45, 58, '#FD7C1A'),
(73, 291, 308, '#CCFF28'),
(47, 146, 155, '#28FFCC'),
(25, 340, 360, '#004AFF')]
for (radius, theta1, theta2, color) in wedges:
ax.add_patch(patches.Wedge(center, radius, theta1, theta2,
fc=color, ec='black', alpha=0.6, zorder=3))
for patch in ax.patches:
patch.set_clip_path(clip_path)
ax.set_xlim(0, imsize[0])
ax.set_ylim(imsize[1], 0)
#plt.savefig('mpld3.png')
mpld3.show()
| bsd-3-clause |
phoebe-project/phoebe2-docs | 2.0/tutorials/intens_weighting.py | 1 | 2760 | #!/usr/bin/env python
# coding: utf-8
# Intensity Weighting
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.0 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.0,<2.1"')
# As always, let's do imports and initialize a logger and a new Bundle. See [Building a System](building_a_system.html) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
# In[3]:
b.add_dataset('lc', times=np.linspace(0,1,101))
# In[4]:
b.add_dataset('mesh', times=[0])
# Relevant Parameters
# -------------------------------
# In[5]:
b['intens_weighting']
# In[6]:
print b['intens_weighting']
# Influence on Light Curves (fluxes)
# ---------------------------------------------
#
# Let's (roughtly) reproduce Figure 5 from [Prsa et al. 2016](http://phoebe-project.org/publications/2016Prsa+) which shows the difference between photon and energy intensity weighting.
#
# <img src="prsa+2016_fig5.png" alt="Figure 8" width="600px"/>
# In[6]:
for teff_primary in [5000,7500,10000,12500,15000]:
b['teff@primary'] = teff_primary
b['teff@secondary'] = 0.9 * teff_primary
for weighting in ['energy', 'photon']:
b['intens_weighting'] = weighting
b.run_compute(irrad_method='none', model='{}_{}'.format(teff_primary, weighting))
# In[8]:
teff_colormap = {5000: 'm', 7500: 'r', 10000: 'g', 12500: 'c', 15000: 'b'}
fig = plt.figure()
ax1, ax2 = fig.add_subplot(211), fig.add_subplot(212)
for teff, color in teff_colormap.items():
fluxes_energy = b.get_value('fluxes@{}_energy'.format(teff))
fluxes_photon = b.get_value('fluxes@{}_photon'.format(teff))
phases = b.to_phase('times@lc@dataset')
# alias data from -0.6 to 0.6
fluxes_energy = np.append(fluxes_energy, fluxes_energy[abs(phases) > 0.4])
fluxes_photon = np.append(fluxes_photon, fluxes_photon[abs(phases) > 0.4])
phases = np.append(phases, phases[abs(phases)>0.4]+1.0)
phases[phases > 1.0] = phases[phases > 1.0] - 2.0
sort = phases.argsort()
phases = phases[sort]
fluxes_energy = fluxes_energy[sort]
fluxes_photon = fluxes_photon[sort]
ax1.plot(phases, fluxes_energy, color=color)
ax2.plot(phases, fluxes_photon-fluxes_energy, color=color)
lbl = ax1.set_xlabel('')
lbl = ax1.set_ylabel('flux')
lbl = ax2.set_xlabel('phase')
lbl = ax2.set_ylabel('flux diff')
# In[ ]:
| gpl-3.0 |
NMGRL/pychron | pychron/core/ui/qt/map_editor.py | 2 | 4675 | # ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import smopy
from PIL import Image
from pyface.qt.QtGui import QPainter, QFrame, QPixmap, QImage
from traitsui.qt4.basic_editor_factory import BasicEditorFactory
from traitsui.qt4.editor import Editor
class QMapWidget(QFrame):
def __init__(self, *args, **kw):
super(QMapWidget, self).__init__(*args, **kw)
self._pix_maps = []
# self._pix_map = QPixmap(self.width(), self.height())
# self._pix_map.fill(Qt.transparent)
def set_tile(self, image):
data = image.tobytes('raw', 'RGB')
im = QImage(data, image.size[0], image.size[1], QImage.Format_RGB888)
pix = QPixmap.fromImage(im)
self._pix_maps.append(pix)
# print(self._pix_map)
self.update()
#
# def resizeEvent(self, event):
# print('asdf', event.size().width())
# esize = event.size()
# csize = self._pix_map.size()
#
# try:
#
# print(csize.width(), esize.width(), csize.height(), esize.height())
# self._pix_map = self._pix_map.scaled(csize.width()/esize.width(),
# csize.height()/esize.height(),)
# except ZeroDivisionError:
# pass
#
# # self.update()
# self.repaint()
def paintEvent(self, event):
super(QMapWidget, self).paintEvent(event)
qp = QPainter()
qp.begin(self)
for p in self._pix_maps:
qp.drawPixmap(0, 0, p)
qp.end()
# def set_screen(self):
# self._screen = QPixMap()
class _MapEditor(Editor):
def init(self, parent):
self.control = self._create_control(parent)
def update_editor(self):
if self.control:
self.control.update()
# def set_size_policy(self, direction, resizable, springy, stretch):
# pass
def _create_control(self, parent):
control = QMapWidget()
# control.setMaximumSize(200,200)
lat_min = 34.052999
lon_min = -106.924551
lat_max = 34.076752
lon_max = -106.885971
#
# lat_min = 34.052999
# lon_min = -106.81
# lat_max = 34.08
# lon_max = -106.83
rect = (lat_min, lon_min, lat_max, lon_max)
server = 'https://tiles.wmflabs.org/bw-mapnik/{z}/{x}/{y}.png'
server = 'http://c.tile.stamen.com/watercolor/{z}/{x}/{y}.png'
# server = 'https://c.tiles.wmflabs.org/hillshading/{z}/{x}{y}.png'
# server = 'https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}' # satelite
# server = 'https://mt1.google.com/vt/lyrs=t&x={x}&y={y}&z={z}' # terrain
# server = 'https://mt1.google.com/vt/lyrs=r&x={x}&y={y}&z={z}' # maps
smopy.TILE_SERVER = server
m = smopy.Map(rect, z=10)
# m = smopy.Map(rect)
# m = smopy.Map(lat_min, lon_min, z=10, tileserver='http://c.tile.stamen.com/watercolor/{z}/{x}/{y}.png')
# m.show_ipython()
# control.set_tile(m.img)
base = m.img
base = base.convert('RGBA')
base.putalpha(200)
control.set_tile(base)
# return control
server = 'https://tiles.wmflabs.org/bw-mapnik/{z}/{x}/{y}.png'
# server = 'https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}' # satelite
smopy.TILE_SERVER = server
m = smopy.Map(rect, z=10)
# img = m.img
# img = img.convert('RGBA')
# img.putalpha(128)
# img = img.convert('RGB')
img = m.img.convert('RGBA')
img.putalpha(128)
img = Image.alpha_composite(base, img)
control.set_tile(img)
# control.set_tile(Image.blend(base, l1, 129))
# m.show_mpl()
# from matplotlib.pyplot import show
# show()
# control.set_screen()
return control
class MapViewEditor(BasicEditorFactory):
klass = _MapEditor
# ============= EOF =============================================
| apache-2.0 |
ibis-project/ibis | ibis/backends/dask/execution/selection.py | 1 | 7512 | """Dispatching code for Selection operations.
"""
from __future__ import absolute_import
import functools
import operator
from typing import Optional
import dask.dataframe as dd
import pandas
from toolz import concatv
import ibis.expr.operations as ops
import ibis.expr.types as ir
from ibis.backends.pandas.execution.selection import (
compute_projection,
compute_projection_table_expr,
map_new_column_names_to_data,
remap_overlapping_column_names,
)
from ibis.expr.scope import Scope
from ibis.expr.typing import TimeContext
from ..core import execute
from ..dispatch import execute_node
from ..execution import constants
from ..execution.util import coerce_to_output, compute_sorted_frame
@compute_projection.register(ir.ScalarExpr, ops.Selection, dd.DataFrame)
def compute_projection_scalar_expr(
expr,
parent,
data,
scope: Scope = None,
timecontext: Optional[TimeContext] = None,
**kwargs,
):
name = expr._name
assert name is not None, 'Scalar selection name is None'
op = expr.op()
parent_table_op = parent.table.op()
data_columns = frozenset(data.columns)
scope = scope.merge_scopes(
Scope(
{
t: map_new_column_names_to_data(
remap_overlapping_column_names(
parent_table_op, t, data_columns
),
data,
)
},
timecontext,
)
for t in op.root_tables()
)
scalar = execute(expr, scope=scope, **kwargs)
return data.assign(**{name: scalar})[name]
@compute_projection.register(ir.ColumnExpr, ops.Selection, dd.DataFrame)
def compute_projection_column_expr(
expr,
parent,
data,
scope: Scope,
timecontext: Optional[TimeContext],
**kwargs,
):
result_name = getattr(expr, '_name', None)
op = expr.op()
parent_table_op = parent.table.op()
if isinstance(op, ops.TableColumn):
# slightly faster path for simple column selection
name = op.name
if name in data:
return data[name].rename(result_name or name)
if not isinstance(parent_table_op, ops.Join):
raise KeyError(name)
(root_table,) = op.root_tables()
left_root, right_root = ops.distinct_roots(
parent_table_op.left, parent_table_op.right
)
suffixes = {
left_root: constants.LEFT_JOIN_SUFFIX,
right_root: constants.RIGHT_JOIN_SUFFIX,
}
return data.loc[:, name + suffixes[root_table]].rename(
result_name or name
)
data_columns = frozenset(data.columns)
scope = scope.merge_scopes(
Scope(
{
t: map_new_column_names_to_data(
remap_overlapping_column_names(
parent_table_op, t, data_columns
),
data,
)
},
timecontext,
)
for t in op.root_tables()
)
result = execute(expr, scope=scope, timecontext=timecontext, **kwargs)
result = coerce_to_output(result, expr, data.index)
assert result_name is not None, 'Column selection name is None'
return result
compute_projection.register(ir.TableExpr, ops.Selection, dd.DataFrame)(
compute_projection_table_expr
)
@execute_node.register(ops.Selection, dd.DataFrame)
def execute_selection_dataframe(
op, data, scope: Scope, timecontext: Optional[TimeContext], **kwargs
):
selections = op.selections
predicates = op.predicates
sort_keys = op.sort_keys
result = data
# Build up the individual dask structures from column expressions
if selections:
data_pieces = []
for selection in selections:
dask_object = compute_projection(
selection,
op,
data,
scope=scope,
timecontext=timecontext,
**kwargs,
)
data_pieces.append(dask_object)
result = dd.concat(data_pieces, axis=1, ignore_unknown_divisions=True)
if predicates:
predicates = _compute_predicates(
op.table.op(), predicates, data, scope, timecontext, **kwargs
)
predicate = functools.reduce(operator.and_, predicates)
assert len(predicate) == len(
result
), 'Selection predicate length does not match underlying table'
result = result.loc[predicate]
if sort_keys:
if len(sort_keys) > 1:
raise NotImplementedError(
"""
Multi-key sorting is not implemented for the Dask backend
"""
)
sort_key = sort_keys[0]
ascending = getattr(sort_key.op(), 'ascending', True)
if not ascending:
raise NotImplementedError(
"Descending sort is not supported for the Dask backend"
)
result = compute_sorted_frame(
result,
order_by=sort_key,
scope=scope,
timecontext=timecontext,
**kwargs,
)
return result
else:
grouping_keys = ordering_keys = ()
# return early if we do not have any temporary grouping or ordering columns
assert not grouping_keys, 'group by should never show up in Selection'
if not ordering_keys:
return result
# create a sequence of columns that we need to drop
temporary_columns = pandas.Index(
concatv(grouping_keys, ordering_keys)
).difference(data.columns)
# no reason to call drop if we don't need to
if temporary_columns.empty:
return result
# drop every temporary column we created for ordering or grouping
return result.drop(temporary_columns, axis=1)
def _compute_predicates(
table_op,
predicates,
data,
scope: Scope,
timecontext: Optional[TimeContext],
**kwargs,
):
"""Compute the predicates for a table operation.
Parameters
----------
table_op : TableNode
predicates : List[ir.ColumnExpr]
data : pd.DataFrame
scope : Scope
timecontext: Optional[TimeContext]
kwargs : dict
Returns
-------
computed_predicate : pd.Series[bool]
Notes
-----
This handles the cases where the predicates are computed columns, in
addition to the simple case of named columns coming directly from the input
table.
"""
for predicate in predicates:
# Map each root table of the predicate to the data so that we compute
# predicates on the result instead of any left or right tables if the
# Selection is on a Join. Project data to only inlude columns from
# the root table.
root_tables = predicate.op().root_tables()
# handle suffixes
data_columns = frozenset(data.columns)
additional_scope = Scope()
for root_table in root_tables:
mapping = remap_overlapping_column_names(
table_op, root_table, data_columns
)
if mapping is not None:
new_data = data.loc[:, mapping.keys()].rename(columns=mapping)
else:
new_data = data
additional_scope = additional_scope.merge_scope(
Scope({root_table: new_data}, timecontext)
)
scope = scope.merge_scope(additional_scope)
yield execute(predicate, scope=scope, **kwargs)
| apache-2.0 |
YihaoLu/statsmodels | statsmodels/sandbox/examples/ex_gam_results.py | 37 | 1660 | # -*- coding: utf-8 -*-
"""Example results for GAM from tests
Created on Mon Nov 07 13:13:15 2011
Author: Josef Perktold
The example is loaded from a test module. The test still fails but the
results look relatively good.
I don't know yet why there is the small difference and why GAM doesn't
converge in this case
"""
from statsmodels.sandbox.tests.test_gam import _estGAMGaussianLogLink
tt = _estGAMGaussianLogLink()
comp, const = tt.res_gam.smoothed_demeaned(tt.mod_gam.exog)
comp_glm_ = tt.res2.model.exog * tt.res2.params
comp1 = comp_glm_[:,1:4].sum(1)
mean1 = comp1.mean()
comp1 -= mean1
comp2 = comp_glm_[:,4:].sum(1)
mean2 = comp2.mean()
comp2 -= mean2
comp1_true = tt.res2.model.exog[:,1:4].sum(1)
mean1 = comp1_true.mean()
comp1_true -= mean1
comp2_true = tt.res2.model.exog[:,4:].sum(1)
mean2 = comp2_true.mean()
comp2_true -= mean2
noise = tt.res2.model.endog - tt.mu_true
noise_eta = tt.family.link(tt.res2.model.endog) - tt.y_true
import matplotlib.pyplot as plt
plt.figure()
plt.plot(noise, 'k.')
plt.figure()
plt.plot(comp, 'r-')
plt.plot(comp1, 'b-')
plt.plot(comp2, 'b-')
plt.plot(comp1_true, 'k--', lw=2)
plt.plot(comp2_true, 'k--', lw=2)
#the next doesn't make sense - non-linear
#c1 = tt.family.link(tt.family.link.inverse(comp1_true) + noise)
#c2 = tt.family.link(tt.family.link.inverse(comp2_true) + noise)
#not nice in example/plot: noise variance is constant not proportional
plt.plot(comp1_true + noise_eta, 'g.', alpha=0.95)
plt.plot(comp2_true + noise_eta, 'r.', alpha=0.95)
#plt.plot(c1, 'g.', alpha=0.95)
#plt.plot(c2, 'r.', alpha=0.95)
plt.title('Gaussian loglink, GAM (red), GLM (blue), true (black)')
plt.show()
| bsd-3-clause |
eljost/pysisyphus | tests_staging/test_growingnt/test_growingnt.py | 1 | 6214 | #!/usr/bin/env python3
import copy
import matplotlib.pyplot as plt
import numpy as np
import pytest
from pysisyphus.helpers import geom_from_library
from pysisyphus.calculators.AnaPot import AnaPot
from pysisyphus.calculators.MullerBrownSympyPot import MullerBrownPot
from pysisyphus.calculators.FourWellAnaPot import FourWellAnaPot
from pysisyphus.Geometry import Geometry
from pysisyphus.cos.GrowingNT import GrowingNT
from pysisyphus.cos.GrowingString import GrowingString
from pysisyphus.plotters.AnimPlot import AnimPlot
def get_geoms(coords, calc_getter):
atoms = ("H")
geoms = [Geometry(atoms, c) for c in coords]
for geom in geoms:
geom.set_calculator(calc_getter())
return geoms
def plot(gnt, calc, levels=None):
calc.plot(levels)
conv = np.array(gnt.conv_points)
ax = calc.ax
if hasattr(gnt, "points"):
points = np.array(gnt.points)
px = points[:,0]
py = points[:,1]
ax.plot(px, py, "o-", c="r")
cx = conv[:,0]
cy = conv[:,1]
ax.plot(cx, cy, "X-", ms="8", c="k")
if hasattr(gnt, "tangents"):
tangents = gnt.tangents
tx = tangents[:,0]
ty = tangents[:,1]
ax.quiver(cx, cy, tx, ty)
# if hasattr(gnt, "cur_forces"):
# forces = gnt.cur_forces
# fx = forces[:,0]
# fy = forces[:,1]
# ax.quiver(cx, cy, fx, fy, color="b")
if hasattr(gnt, "perp_forces"):
perp_forces = gnt.perp_forces
px = perp_forces[:,0]
py = perp_forces[:,1]
ax.quiver(cx, cy, px, py, color="r")
plt.show()
def test_anapot_growingnt():
coords = (
(-1.05274, 1.02776, 0),
(1.94101, 3.85427, 0),
)
levels = np.linspace(-3, 4, 80)
calc_getter = AnaPot
eps = .05
damp = .05
gnt = GrowingNT(get_geoms(coords, calc_getter), calc_getter,
eps=eps, damp=damp)
gnt.run()
plot(gnt, calc_getter(), levels)
def test_mullerbrown_growingnt():
coords = (
(0.614, 0.031, 0),
(-.563, 1.43, 0),
)
levels=np.linspace(-150, -15, 40)
eps = .008
damp = .00065
calc_getter = MullerBrownPot
gnt = GrowingNT(get_geoms(coords, calc_getter), calc_getter,
eps=eps, damp=damp, max_nodes=23)
gnt.run()
plot(gnt, calc_getter(), levels)
def test_four_well_growingnt():
coords = (
(1.124, -1.485, 0.0),
(-1.174, 1.477, 0.0),
)
eps = .25
damp = .05
# See 10.1063/1.1885467 Sec. V.B., Eq. 11 and Fig 4a)
calc_getter = FourWellAnaPot
gnt = GrowingNT(get_geoms(coords, calc_getter), calc_getter,
eps=eps, damp=damp, max_nodes=22, readjust=False)
gnt.run()
plot(gnt, calc_getter())
def test_anapot_growingstring_opt():
coords = (
(-1.05274, 1.02776, 0),
(1.94101, 3.85427, 0),
)
calc_getter = AnaPot
eps = .05
damp = .05
images = get_geoms(coords, calc_getter)
gs_kwargs = {
"max_nodes": 10,
"perp_thresh": 0.5,
# "perp_thresh": 1,
}
gs = GrowingString(images, calc_getter, reparam_every=1)
# from pysisyphus.optimizers.QuickMin import QuickMin
# opt = QuickMin(gs)
# self.coords = [c.reshape(-1, 3) for c in self.gs.coords_list]
# self.tangents = self.gs.tangent_list
# self.perp_forces = self.gs.perp_force_list
from pysisyphus.optimizers.SteepestDescent import SteepestDescent
# opt = SteepestDescent(gs, alpha=0.05, bt_disable=True, max_cycles=175)
opt = SteepestDescent(gs, alpha=0.05, bt_disable=True, max_cycles=70)
opt.run()
xlim = (-2, 2.5)
ylim = (0, 5)
levels = (-3, 4, 80)
ap = AnimPlot(AnaPot(), opt, xlim=xlim, ylim=ylim, levels=levels)
ap.animate()
def test_mb_gs_opt():
coords = (
(0.614, 0.031, 0),
(-.563, 1.43, 0),
)
calc_getter = MullerBrownPot
# pot = calc_getter()
# pot.plot()
# plt.show()
images = get_geoms(coords, calc_getter)
gs_kwargs = {
"max_nodes": 16,
"perp_thresh": 50,
"fix_ends": True,
}
gs = GrowingString(images, calc_getter, **gs_kwargs)
from pysisyphus.optimizers.QuickMin import QuickMin
from pysisyphus.optimizers.SteepestDescent import SteepestDescent as SD
# opt = QuickMin(gs)
opt = SD(gs, alpha=0.4, bt_disable=True)
opt.run()
ap = AnimPlot(calc_getter(), opt)
ap.animate()
def test_gs():
from pysisyphus.calculators.XTB import XTB
educt = geom_from_library("ciscis_24hexadiene_xtbopt.xyz")
product = geom_from_library("trans34dimethylcyclobutene.xyz")
images = (educt, product)
def calc_getter():
return XTB(pal=4)
for img in images:
img.set_calculator(calc_getter())
gs_kwargs = {
"max_nodes": 9,
"reparam_every": 3,
}
gs = GrowingString(images, calc_getter, **gs_kwargs)
from pysisyphus.optimizers.StringOptimizer import StringOptimizer
opt_kwargs = {
"dump": True,
"max_cycles": 40,
"align": True,
}
opt = StringOptimizer(gs, **opt_kwargs)
opt.run()
def test_fs():
from pysisyphus.calculators.XTB import XTB
from pysisyphus.cos.FreezingString import FreezingString
# educt = geom_from_library("ciscis_24hexadiene_xtbopt.xyz")
# product = geom_from_library("trans34dimethylcyclobutene.xyz")
educt = AnaPot.get_geom((-1.05274, 1.02776, 0))
product = AnaPot.get_geom((1.94101, 3.85427, 0))
images = (educt, product)
def calc_getter():
return AnaPot()
fs = FreezingString(images, calc_getter, max_nodes=10)
from pysisyphus.optimizers.SteepestDescent import SteepestDescent
sd = SteepestDescent(fs)
sd.run()
pot = AnaPot()
pot.plot()
crds = fs.allcoords.reshape(-1, 3)
# pot.ax.plot(c[:,0], c[:,1], "o-")
pot.ax.plot(*crds[:,:2].T, "o-")
plt.show()
from pysisyphus.optimizers.StringOptimizer import StringOptimizer
if __name__ == "__main__":
# test_anapot_growingnt()
# test_mullerbrown_growingnt()
# test_four_well_growingnt()
test_anapot_growingstring_opt()
# test_mb_gs_opt()
plt.show()
# test_gs()
# test_fs()
| gpl-3.0 |
cpcloud/numpy | numpy/core/code_generators/ufunc_docstrings.py | 3 | 89352 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
from __future__ import division, absolute_import, print_function
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The quotient `x1/x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to `x1` / `x2` in terms of array-broadcasting.
Behavior on division by zero can be changed using `seterr`.
When both `x1` and `x2` are of an integer type, `divide` will return
integers and throw away the fractional part. Moreover, division by zero
always yields zero in integer arithmetic.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types:
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic, and does not
raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using `seterr`:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : {ndarray, bool}
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the
inputs.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the
corresponding element of the input is finite; otherwise the values
are False (element is either positive infinity, negative infinity
or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or boolean ndarray
For scalar input, the result is a new boolean with value True if
the input is positive or negative infinity; otherwise the value is
False.
For array input, the result is a boolean array with the same shape
as the input and the values are True where the corresponding
element of the input is positive or negative infinity; elsewhere
the values are False. If a second argument was supplied the result
is stored there. If the type of that array is a numeric type the
result is represented as zeros and ones, if the type is boolean
then as False and True, respectively. The return value `y` is then
a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : {ndarray, bool}
For scalar input, the result is a new boolean with value True if
the input is NaN; otherwise the value is False.
For array input, the result is a boolean array of the same
dimensions as the input and the values are True if the
corresponding element of the input is NaN; otherwise the values are
False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes ``x1 - floor(x1 / x2) * x2``, the result has the same sign as
the divisor `x2`. It is equivalent to the Python modulus operator
``x1 % x2`` and should not be confused with the Matlab(TM) ``rem``
function.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient ``x1/x2``, element-wise. Returns a
scalar if both `x1` and `x2` are scalars.
See Also
--------
fmod : Equivalent of the Matlab(TM) ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : {ndarray, scalar}
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1 : array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making
``//`` and ``/`` equivalent operators. The default floor division
operation of ``/`` can be replaced by true division with ``from
__future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa is lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1: ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2: ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
Returns
-------
(mantissa, exponent) : tuple of ndarrays, (float, int)
`mantissa` is a float array with values between -1 and 1.
`exponent` is an int array which represents the exponent of 2.
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
out : ndarray, optional
Output array for the result.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float32)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
| bsd-3-clause |
DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/lmfit-py/doc/sphinx/numpydoc/plot_directive.py | 17 | 19706 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback
import sphinx
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if options.has_key('format'):
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError, err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in options.items()
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = cStringIO.StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec setup.config.plot_pre_code in ns
exec code in ns
except (Exception, SystemExit), err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException, err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
def relpath(target, base=os.curdir):
"""
Return a relative path to the target from either the current
dir or an optional base dir. Base can be a directory
specified either as absolute or relative to current dir.
"""
if not os.path.exists(target):
raise OSError, 'Target does not exist: '+target
if not os.path.isdir(base):
raise OSError, 'Base is not a directory or does not exist: '+base
base_list = (os.path.abspath(base)).split(os.sep)
target_list = (os.path.abspath(target)).split(os.sep)
# On the windows platform the target may be on a completely
# different drive from the base.
if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]:
raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper()
# Starting from the filepath root, work out how much of the
# filepath is shared by base and target.
for i in range(min(len(base_list), len(target_list))):
if base_list[i] <> target_list[i]: break
else:
# If we broke out of the loop, i is pointing to the first
# differing path elements. If we didn't break out of the
# loop, i is pointing to identical path elements.
# Increment i so that in all cases it points to the first
# differing path elements.
i+=1
rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
return os.path.join(*rel_list)
| apache-2.0 |
alexsavio/scikit-learn | sklearn/mixture/tests/test_gmm.py | 11 | 20915 | # Important note for the deprecation cleaning of 0.20 :
# All the functions and classes of this file have been deprecated in 0.18.
# When you remove this file please remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
import unittest
import copy
import sys
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import (assert_true, assert_greater,
assert_raise_message, assert_warns_message,
ignore_warnings)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, spherecv, 'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
with ignore_warnings(category=DeprecationWarning):
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
with ignore_warnings(category=DeprecationWarning):
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
with ignore_warnings(category=DeprecationWarning):
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
with ignore_warnings(category=DeprecationWarning):
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
with ignore_warnings(category=DeprecationWarning):
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
with ignore_warnings(category=DeprecationWarning):
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.dpgmm._DPGMMBase):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def score(self, g, X):
with ignore_warnings(category=DeprecationWarning):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
with ignore_warnings(category=DeprecationWarning):
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_n_parameters():
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
with ignore_warnings(category=DeprecationWarning):
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
with ignore_warnings(category=DeprecationWarning):
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
jmetzen/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 160 | 6028 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/streamplot.py | 11 | 19119 | """
Streamline plotting for 2D vector fields.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.collections as mcollections
import matplotlib.patches as patches
__all__ = ['streamplot']
def streamplot(axes, x, y, u, v, density=1, linewidth=None, color=None,
cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
minlength=0.1, transform=None, zorder=1):
"""Draws streamlines of a vector flow.
*x*, *y* : 1d arrays
an *evenly spaced* grid.
*u*, *v* : 2d arrays
x and y-velocities. Number of rows should match length of y, and
the number of columns should match x.
*density* : float or 2-tuple
Controls the closeness of streamlines. When `density = 1`, the domain
is divided into a 30x30 grid---*density* linearly scales this grid.
Each cell in the grid can have, at most, one traversing streamline.
For different densities in each direction, use [density_x, density_y].
*linewidth* : numeric or 2d array
vary linewidth when given a 2d array with the same shape as velocities.
*color* : matplotlib color code, or 2d array
Streamline color. When given an array with the same shape as
velocities, *color* values are converted to colors using *cmap*.
*cmap* : :class:`~matplotlib.colors.Colormap`
Colormap used to plot streamlines and arrows. Only necessary when using
an array input for *color*.
*norm* : :class:`~matplotlib.colors.Normalize`
Normalize object used to scale luminance data to 0, 1. If None, stretch
(min, max) to (0, 1). Only necessary when *color* is an array.
*arrowsize* : float
Factor scale arrow size.
*arrowstyle* : str
Arrow style specification.
See :class:`~matplotlib.patches.FancyArrowPatch`.
*minlength* : float
Minimum length of streamline in axes coordinates.
*zorder* : int
any number
Returns:
*stream_container* : StreamplotSet
Container object with attributes
- lines: `matplotlib.collections.LineCollection` of streamlines
- arrows: collection of `matplotlib.patches.FancyArrowPatch`
objects representing arrows half-way along stream
lines.
This container will probably change in the future to allow changes
to the colormap, alpha, etc. for both lines and arrows, but these
changes should be backward compatible.
"""
grid = Grid(x, y)
mask = StreamMask(density)
dmap = DomainMap(grid, mask)
# default to data coordinates
if transform is None:
transform = axes.transData
if color is None:
color = six.next(axes._get_lines.color_cycle)
if linewidth is None:
linewidth = matplotlib.rcParams['lines.linewidth']
line_kw = {}
arrow_kw = dict(arrowstyle=arrowstyle, mutation_scale=10 * arrowsize)
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
assert color.shape == grid.shape
line_colors = []
color = np.ma.masked_invalid(color)
else:
line_kw['color'] = color
arrow_kw['color'] = color
if isinstance(linewidth, np.ndarray):
assert linewidth.shape == grid.shape
line_kw['linewidth'] = []
else:
line_kw['linewidth'] = linewidth
arrow_kw['linewidth'] = linewidth
line_kw['zorder'] = zorder
arrow_kw['zorder'] = zorder
## Sanity checks.
assert u.shape == grid.shape
assert v.shape == grid.shape
u = np.ma.masked_invalid(u)
v = np.ma.masked_invalid(v)
integrate = get_integrator(u, v, dmap, minlength)
trajectories = []
for xm, ym in _gen_starting_points(mask.shape):
if mask[ym, xm] == 0:
xg, yg = dmap.mask2grid(xm, ym)
t = integrate(xg, yg)
if t is not None:
trajectories.append(t)
if use_multicolor_lines:
if norm is None:
norm = mcolors.Normalize(color.min(), color.max())
if cmap is None:
cmap = cm.get_cmap(matplotlib.rcParams['image.cmap'])
else:
cmap = cm.get_cmap(cmap)
streamlines = []
arrows = []
for t in trajectories:
tgx = np.array(t[0])
tgy = np.array(t[1])
# Rescale from grid-coordinates to data-coordinates.
tx = np.array(t[0]) * grid.dx + grid.x_origin
ty = np.array(t[1]) * grid.dy + grid.y_origin
points = np.transpose([tx, ty]).reshape(-1, 1, 2)
streamlines.extend(np.hstack([points[:-1], points[1:]]))
# Add arrows half way along each trajectory.
s = np.cumsum(np.sqrt(np.diff(tx) ** 2 + np.diff(ty) ** 2))
n = np.searchsorted(s, s[-1] / 2.)
arrow_tail = (tx[n], ty[n])
arrow_head = (np.mean(tx[n:n + 2]), np.mean(ty[n:n + 2]))
if isinstance(linewidth, np.ndarray):
line_widths = interpgrid(linewidth, tgx, tgy)[:-1]
line_kw['linewidth'].extend(line_widths)
arrow_kw['linewidth'] = line_widths[n]
if use_multicolor_lines:
color_values = interpgrid(color, tgx, tgy)[:-1]
line_colors.append(color_values)
arrow_kw['color'] = cmap(norm(color_values[n]))
p = patches.FancyArrowPatch(arrow_tail,
arrow_head,
transform=transform,
**arrow_kw)
axes.add_patch(p)
arrows.append(p)
lc = mcollections.LineCollection(streamlines,
transform=transform,
**line_kw)
if use_multicolor_lines:
lc.set_array(np.ma.hstack(line_colors))
lc.set_cmap(cmap)
lc.set_norm(norm)
axes.add_collection(lc)
axes.autoscale_view()
ac = matplotlib.collections.PatchCollection(arrows)
stream_container = StreamplotSet(lc, ac)
return stream_container
class StreamplotSet(object):
def __init__(self, lines, arrows, **kwargs):
self.lines = lines
self.arrows = arrows
# Coordinate definitions
#========================
class DomainMap(object):
"""Map representing different coordinate systems.
Coordinate definitions:
* axes-coordinates goes from 0 to 1 in the domain.
* data-coordinates are specified by the input x-y coordinates.
* grid-coordinates goes from 0 to N and 0 to M for an N x M grid,
where N and M match the shape of the input data.
* mask-coordinates goes from 0 to N and 0 to M for an N x M mask,
where N and M are user-specified to control the density of streamlines.
This class also has methods for adding trajectories to the StreamMask.
Before adding a trajectory, run `start_trajectory` to keep track of regions
crossed by a given trajectory. Later, if you decide the trajectory is bad
(e.g., if the trajectory is very short) just call `undo_trajectory`.
"""
def __init__(self, grid, mask):
self.grid = grid
self.mask = mask
## Constants for conversion between grid- and mask-coordinates
self.x_grid2mask = float(mask.nx - 1) / grid.nx
self.y_grid2mask = float(mask.ny - 1) / grid.ny
self.x_mask2grid = 1. / self.x_grid2mask
self.y_mask2grid = 1. / self.y_grid2mask
self.x_data2grid = grid.nx / grid.width
self.y_data2grid = grid.ny / grid.height
def grid2mask(self, xi, yi):
"""Return nearest space in mask-coords from given grid-coords."""
return int((xi * self.x_grid2mask) + 0.5), \
int((yi * self.y_grid2mask) + 0.5)
def mask2grid(self, xm, ym):
return xm * self.x_mask2grid, ym * self.y_mask2grid
def data2grid(self, xd, yd):
return xd * self.x_data2grid, yd * self.y_data2grid
def start_trajectory(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._start_trajectory(xm, ym)
def reset_start_point(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._current_xy = (xm, ym)
def update_trajectory(self, xg, yg):
if not self.grid.within_grid(xg, yg):
raise InvalidIndexError
xm, ym = self.grid2mask(xg, yg)
self.mask._update_trajectory(xm, ym)
def undo_trajectory(self):
self.mask._undo_trajectory()
class Grid(object):
"""Grid of data."""
def __init__(self, x, y):
if len(x.shape) == 2:
x_row = x[0]
assert np.allclose(x_row, x)
x = x_row
else:
assert len(x.shape) == 1
if len(y.shape) == 2:
y_col = y[:, 0]
assert np.allclose(y_col, y.T)
y = y_col
else:
assert len(y.shape) == 1
self.nx = len(x)
self.ny = len(y)
self.dx = x[1] - x[0]
self.dy = y[1] - y[0]
self.x_origin = x[0]
self.y_origin = y[0]
self.width = x[-1] - x[0]
self.height = y[-1] - y[0]
@property
def shape(self):
return self.ny, self.nx
def within_grid(self, xi, yi):
"""Return True if point is a valid index of grid."""
# Note that xi/yi can be floats; so, for example, we can't simply check
# `xi < self.nx` since `xi` can be `self.nx - 1 < xi < self.nx`
return xi >= 0 and xi <= self.nx - 1 and yi >= 0 and yi <= self.ny - 1
class StreamMask(object):
"""Mask to keep track of discrete regions crossed by streamlines.
The resolution of this grid determines the approximate spacing between
trajectories. Streamlines are only allowed to pass through zeroed cells:
When a streamline enters a cell, that cell is set to 1, and no new
streamlines are allowed to enter.
"""
def __init__(self, density):
if np.isscalar(density):
assert density > 0
self.nx = self.ny = int(30 * density)
else:
assert len(density) == 2
self.nx = int(30 * density[0])
self.ny = int(30 * density[1])
self._mask = np.zeros((self.ny, self.nx))
self.shape = self._mask.shape
self._current_xy = None
def __getitem__(self, *args):
return self._mask.__getitem__(*args)
def _start_trajectory(self, xm, ym):
"""Start recording streamline trajectory"""
self._traj = []
self._update_trajectory(xm, ym)
def _undo_trajectory(self):
"""Remove current trajectory from mask"""
for t in self._traj:
self._mask.__setitem__(t, 0)
def _update_trajectory(self, xm, ym):
"""Update current trajectory position in mask.
If the new position has already been filled, raise `InvalidIndexError`.
"""
if self._current_xy != (xm, ym):
if self[ym, xm] == 0:
self._traj.append((ym, xm))
self._mask[ym, xm] = 1
self._current_xy = (xm, ym)
else:
raise InvalidIndexError
class InvalidIndexError(Exception):
pass
class TerminateTrajectory(Exception):
pass
# Integrator definitions
#========================
def get_integrator(u, v, dmap, minlength):
# rescale velocity onto grid-coordinates for integrations.
u, v = dmap.data2grid(u, v)
# speed (path length) will be in axes-coordinates
u_ax = u / dmap.grid.nx
v_ax = v / dmap.grid.ny
speed = np.ma.sqrt(u_ax ** 2 + v_ax ** 2)
def forward_time(xi, yi):
ds_dt = interpgrid(speed, xi, yi)
if ds_dt == 0:
raise TerminateTrajectory()
dt_ds = 1. / ds_dt
ui = interpgrid(u, xi, yi)
vi = interpgrid(v, xi, yi)
return ui * dt_ds, vi * dt_ds
def backward_time(xi, yi):
dxi, dyi = forward_time(xi, yi)
return -dxi, -dyi
def integrate(x0, y0):
"""Return x, y grid-coordinates of trajectory based on starting point.
Integrate both forward and backward in time from starting point in
grid coordinates.
Integration is terminated when a trajectory reaches a domain boundary
or when it crosses into an already occupied cell in the StreamMask. The
resulting trajectory is None if it is shorter than `minlength`.
"""
dmap.start_trajectory(x0, y0)
sf, xf_traj, yf_traj = _integrate_rk12(x0, y0, dmap, forward_time)
dmap.reset_start_point(x0, y0)
sb, xb_traj, yb_traj = _integrate_rk12(x0, y0, dmap, backward_time)
# combine forward and backward trajectories
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
if stotal > minlength:
return x_traj, y_traj
else: # reject short trajectories
dmap.undo_trajectory()
return None
return integrate
def _integrate_rk12(x0, y0, dmap, f):
"""2nd-order Runge-Kutta algorithm with adaptive step size.
This method is also referred to as the improved Euler's method, or Heun's
method. This method is favored over higher-order methods because:
1. To get decent looking trajectories and to sample every mask cell
on the trajectory we need a small timestep, so a lower order
solver doesn't hurt us unless the data is *very* high resolution.
In fact, for cases where the user inputs
data smaller or of similar grid size to the mask grid, the higher
order corrections are negligible because of the very fast linear
interpolation used in `interpgrid`.
2. For high resolution input data (i.e. beyond the mask
resolution), we must reduce the timestep. Therefore, an adaptive
timestep is more suited to the problem as this would be very hard
to judge automatically otherwise.
This integrator is about 1.5 - 2x as fast as both the RK4 and RK45
solvers in most setups on my machine. I would recommend removing the
other two to keep things simple.
"""
## This error is below that needed to match the RK4 integrator. It
## is set for visual reasons -- too low and corners start
## appearing ugly and jagged. Can be tuned.
maxerror = 0.003
## This limit is important (for all integrators) to avoid the
## trajectory skipping some mask cells. We could relax this
## condition if we use the code which is commented out below to
## increment the location gradually. However, due to the efficient
## nature of the interpolation, this doesn't boost speed by much
## for quite a bit of complexity.
maxds = min(1. / dmap.mask.nx, 1. / dmap.mask.ny, 0.1)
ds = maxds
stotal = 0
xi = x0
yi = y0
xf_traj = []
yf_traj = []
while dmap.grid.within_grid(xi, yi):
xf_traj.append(xi)
yf_traj.append(yi)
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + ds * k1x,
yi + ds * k1y)
except IndexError:
# Out of the domain on one of the intermediate integration steps.
# Take an Euler step to the boundary to improve neatness.
ds, xf_traj, yf_traj = _euler_step(xf_traj, yf_traj, dmap, f)
stotal += ds
break
except TerminateTrajectory:
break
dx1 = ds * k1x
dy1 = ds * k1y
dx2 = ds * 0.5 * (k1x + k2x)
dy2 = ds * 0.5 * (k1y + k2y)
nx, ny = dmap.grid.shape
# Error is normalized to the axes coordinates
error = np.sqrt(((dx2 - dx1) / nx) ** 2 + ((dy2 - dy1) / ny) ** 2)
# Only save step if within error tolerance
if error < maxerror:
xi += dx2
yi += dy2
try:
dmap.update_trajectory(xi, yi)
except InvalidIndexError:
break
if (stotal + ds) > 2:
break
stotal += ds
# recalculate stepsize based on step error
if error == 0:
ds = maxds
else:
ds = min(maxds, 0.85 * ds * (maxerror / error) ** 0.5)
return stotal, xf_traj, yf_traj
def _euler_step(xf_traj, yf_traj, dmap, f):
"""Simple Euler integration step that extends streamline to boundary."""
ny, nx = dmap.grid.shape
xi = xf_traj[-1]
yi = yf_traj[-1]
cx, cy = f(xi, yi)
if cx == 0:
dsx = np.inf
elif cx < 0:
dsx = xi / -cx
else:
dsx = (nx - 1 - xi) / cx
if cy == 0:
dsy = np.inf
elif cy < 0:
dsy = yi / -cy
else:
dsy = (ny - 1 - yi) / cy
ds = min(dsx, dsy)
xf_traj.append(xi + cx * ds)
yf_traj.append(yi + cy * ds)
return ds, xf_traj, yf_traj
# Utility functions
#========================
def interpgrid(a, xi, yi):
"""Fast 2D, linear interpolation on an integer grid"""
Ny, Nx = np.shape(a)
if isinstance(xi, np.ndarray):
x = xi.astype(np.int)
y = yi.astype(np.int)
# Check that xn, yn don't exceed max index
xn = np.clip(x + 1, 0, Nx - 1)
yn = np.clip(y + 1, 0, Ny - 1)
else:
x = np.int(xi)
y = np.int(yi)
# conditional is faster than clipping for integers
if x == (Nx - 2):
xn = x
else:
xn = x + 1
if y == (Ny - 2):
yn = y
else:
yn = y + 1
a00 = a[y, x]
a01 = a[y, xn]
a10 = a[yn, x]
a11 = a[yn, xn]
xt = xi - x
yt = yi - y
a0 = a00 * (1 - xt) + a01 * xt
a1 = a10 * (1 - xt) + a11 * xt
ai = a0 * (1 - yt) + a1 * yt
if not isinstance(xi, np.ndarray):
if np.ma.is_masked(ai):
raise TerminateTrajectory
return ai
def _gen_starting_points(shape):
"""Yield starting points for streamlines.
Trying points on the boundary first gives higher quality streamlines.
This algorithm starts with a point on the mask corner and spirals inward.
This algorithm is inefficient, but fast compared to rest of streamplot.
"""
ny, nx = shape
xfirst = 0
yfirst = 1
xlast = nx - 1
ylast = ny - 1
x, y = 0, 0
i = 0
direction = 'right'
for i in xrange(nx * ny):
yield x, y
if direction == 'right':
x += 1
if x >= xlast:
xlast -= 1
direction = 'up'
elif direction == 'up':
y += 1
if y >= ylast:
ylast -= 1
direction = 'left'
elif direction == 'left':
x -= 1
if x <= xfirst:
xfirst += 1
direction = 'down'
elif direction == 'down':
y -= 1
if y <= yfirst:
yfirst += 1
direction = 'right'
| mit |
abhishekkrthakur/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
sarahgrogan/scikit-learn | sklearn/covariance/tests/test_covariance.py | 69 | 11116 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
wittawatj/nips-papers | pnips/download_papers.py | 1 | 5605 | """
The MIT License (MIT)
=====================
Copyright (c) 2015 Ben Hamner
Modified copyright 2016 Wittawat Jitkrittum
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from bs4 import BeautifulSoup
import json
import os
import pandas as pd
import re
import requests
import subprocess
import sys
def text_from_pdf(pdf_path, temp_path):
if os.path.exists(temp_path):
os.remove(temp_path)
subprocess.call(["pdftotext", pdf_path, temp_path])
f = open(temp_path)
text = f.read()
f.close()
os.remove(temp_path)
return text
def main(year):
year = int(year)
count = year - 1987
base_url = "http://papers.nips.cc"
index_url = "http://papers.nips.cc/book/advances-in-neural-information-processing-systems-%d-%d"%(count, year)
r = requests.get(index_url)
soup = BeautifulSoup(r.content)
paper_links = [link for link in soup.find_all('a') if link["href"][:7]=="/paper/"]
print("%d Papers Found" % len(paper_links))
nips_authors = set()
papers = list()
paper_authors = list()
output_base = 'output%d'%year
if not os.path.exists(output_base):
os.mkdir(output_base)
temp_path = os.path.join(output_base, "temp.txt")
#for link in paper_links[:5]:
pdfs_base = os.path.join(output_base, 'pdfs')
if not os.path.exists(pdfs_base):
os.mkdir(pdfs_base)
for i, link in enumerate(paper_links):
paper_title = link.contents[0]
info_link = base_url + link["href"]
pdf_link = info_link + ".pdf"
pdf_name = link["href"][7:] + ".pdf"
txt_name = link["href"][7:] + ".txt"
info_name = link["href"][7:] + ".info"
paper_id = re.findall(r"^(\d+)-", pdf_name)[0]
pdf_path = os.path.join(pdfs_base, pdf_name)
if not os.path.exists(pdf_path):
# Only request when the pdf was not downloaded before
pdf = requests.get(pdf_link)
pdf_file = open(pdf_path, "wb")
pdf_file.write(pdf.content)
pdf_file.close()
info_path = os.path.join(pdfs_base, info_name)
if not os.path.exists(info_path):
info_content = requests.get(info_link).content
with open(info_path, 'w') as info_file:
info_file.write(info_content)
else:
with open(info_path, 'r') as info_file:
info_content = info_file.read()
paper_soup = BeautifulSoup(info_content)
abstract = paper_soup.find('p', attrs={"class": "abstract"}).contents[0]
authors = [(re.findall(r"-(\d+)$", author.contents[0]["href"])[0],
author.contents[0].contents[0])
for author in paper_soup.find_all('li', attrs={"class": "author"})]
for author in authors:
nips_authors.add(author)
paper_authors.append([len(paper_authors)+1, paper_id, author[0]])
event_types = [h.contents[0][23:] for h in paper_soup.find_all('h3') if h.contents[0][:22]=="Conference Event Type:"]
if len(event_types) != 1:
# cannot determine the event type (poster, paper)
#print(event_types)
#print([h.contents for h in paper_soup.find_all('h3')])
#raise Exception("Bad Event Data")
event_type = 'unknown_event'
else:
event_type = event_types[0]
txt_path = os.path.join(pdfs_base, txt_name)
if not os.path.exists(txt_path):
paper_text = text_from_pdf(pdf_path, temp_path)
with open(txt_path, 'w') as txt_file:
txt_file.write(paper_text)
else:
with open(txt_path, 'r') as txt_file:
paper_text = txt_file.read()
print('(%3d/%3d): %s'%(i, len(paper_links), paper_title))
papers.append([paper_id, paper_title, event_type, pdf_name, abstract, paper_text])
pd.DataFrame(list(nips_authors),
columns=["Id","Name"]).to_csv(os.path.join(output_base,
"Authors.csv"), index=False, encoding='utf-8')
pd.DataFrame(papers, columns=["Id", "Title", "EventType", "PdfName",
"Abstract", "PaperText"]).to_csv(os.path.join(output_base, "Papers.csv"),
index=False, encoding='utf-8')
pd.DataFrame(paper_authors, columns=["Id", "PaperId",
"AuthorId"]).to_csv(os.path.join(output_base, "PaperAuthors.csv"), index=False,
encoding='utf-8')
if __name__ == '__main__':
if len(sys.argv) != 2:
print('usage: %s year'%(sys.argv[0]) )
print(' year is from 1988')
sys.exit(1)
year = sys.argv[1]
main(year)
| mit |
bthirion/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.