repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
CVML/pymc3 | setup.py | 12 | 2606 | #!/usr/bin/env python
from setuptools import setup
import sys
DISTNAME = 'pymc3'
DESCRIPTION = "PyMC3"
LONG_DESCRIPTION = """Bayesian estimation, particularly using Markov chain Monte Carlo (MCMC), is an increasingly relevant approach to statistical estimation. However, few statistical software packages implement MCMC samplers, and they are non-trivial to code by hand. ``pymc3`` is a python package that implements the Metropolis-Hastings algorithm as a python class, and is extremely flexible and applicable to a large suite of problems. ``pymc3`` includes methods for summarizing output, plotting, goodness-of-fit and convergence diagnostics."""
MAINTAINER = 'John Salvatier'
MAINTAINER_EMAIL = '[email protected]'
AUTHOR = 'John Salvatier and Christopher Fonnesbeck'
AUTHOR_EMAIL = '[email protected]'
URL = "http://github.com/pymc-devs/pymc"
LICENSE = "Apache License, Version 2.0"
VERSION = "3.0"
classifiers = ['Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Operating System :: OS Independent']
install_reqs = ['numpy>=1.7.1', 'scipy>=0.12.0', 'matplotlib>=1.2.1',
'Theano<=0.7.1dev', 'pandas>=0.15.0']
test_reqs = ['nose']
if sys.version_info[0] == 2: # py3 has mock in stdlib
test_reqs.append('mock')
dep_links = ['https://github.com/Theano/Theano/tarball/master#egg=Theano-0.7.1dev']
if __name__ == "__main__":
setup(name=DISTNAME,
version=VERSION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
packages=['pymc3', 'pymc3.distributions',
'pymc3.step_methods', 'pymc3.tuning',
'pymc3.tests', 'pymc3.glm', 'pymc3.examples',
'pymc3.backends'],
package_data = {'pymc3.examples': ['data/*.*']},
classifiers=classifiers,
install_requires=install_reqs,
dependency_links=dep_links,
tests_require=test_reqs,
test_suite='nose.collector')
| apache-2.0 |
akpetty/ArcticSeaIcePrediction2017 | Scripts/plot_skill.py | 1 | 4408 | ##############################################################
# Date: 01/10/17
# Name: plot_skill.py
# Author: Alek Petty
# Description: Script to plot skill values for different forecasts.
import matplotlib
matplotlib.use("AGG")
from mpl_toolkits.basemap import Basemap, shiftgrid
import numpy as np
from pylab import *
import numpy.ma as ma
from matplotlib import rc
rcParams['xtick.major.size'] = 2
rcParams['ytick.major.size'] = 2
rcParams['axes.linewidth'] = .5
rcParams['lines.linewidth'] = .5
rcParams['patch.linewidth'] = .5
rcParams['axes.labelsize'] = 8
rcParams['xtick.labelsize']=8
rcParams['ytick.labelsize']=8
rcParams['legend.fontsize']=8
rcParams['font.size']=8
rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
skilldatapath='../DataOutput/SkillVals/'
#dataoutpathC='./Data_output/CONC_OUT/'
figpath='../Figures/'
endYear=2016
startYearPred=1985
skills=[]
skillsP=[]
# PUT SKILL ON ONE SIDE AND FERR ON OTHER AXIS.
varstrs=["conc", "melt", "melt_nan", "pond"]
skill_num=0
skills=np.zeros((6, 6, 2))
for m in xrange(2, 8):
skillC = loadtxt(skilldatapath+'Skill_'+varstrs[0]+str(m)+str(startYearPred)+str(endYear)+'W1.txt', skiprows=1)
skillCU = loadtxt(skilldatapath+'Skill_'+varstrs[0]+str(m)+str(startYearPred)+str(endYear)+'W0.txt', skiprows=1)
skillM = loadtxt(skilldatapath+'Skill_'+varstrs[1]+str(m)+str(startYearPred)+str(endYear)+'W1.txt', skiprows=1)
skillMU = loadtxt(skilldatapath+'Skill_'+varstrs[1]+str(m)+str(startYearPred)+str(endYear)+'W0.txt', skiprows=1)
skillMN = loadtxt(skilldatapath+'Skill_'+varstrs[2]+str(m)+str(startYearPred)+str(endYear)+'W1.txt', skiprows=1)
if ((m==4)|(m==5)):
skillP = loadtxt(skilldatapath+'Skill_'+varstrs[3]+str(m)+str(startYearPred)+str(endYear)+'W1.txt', skiprows=1)
else:
skillP=[np.nan, np.nan, np.nan, np.nan]
skills[:,m-2, 0]=(skillC[0], skillCU[0], skillM[0], skillMU[0], skillMN[0], skillP[0])
skills[:,m-2, 1]=(skillC[3], skillCU[3], skillM[3], skillMU[3], skillMN[3], skillP[3])
Concdays=np.arange(90, 260, 30)
Meltdays=np.arange(90, 220, 30)
Combdays=np.arange(150, 200, 30)
fig = figure(figsize=(3.5,3.8))
ax1=subplot(2, 1, 1)
im1 = plot(Concdays, skills[0, :, 0], 'o',color='b', linestyle='-', markersize=5, alpha=0.8)
im2 = plot(Concdays, skills[1, :, 0], 'v',color='b', linestyle='--', markersize=3, alpha=0.8)
im3 = plot(Concdays, skills[2, :, 0], 'o',color='r', linestyle='-', markersize=5, alpha=0.8)
im4 = plot(Concdays, skills[3, :, 0], 'v',color='r', linestyle='--', markersize=3, alpha=0.8)
im5 = plot(Concdays, skills[4, :, 0], 'o',color='g', linestyle='-', markersize=5, alpha=0.8)
im6 = plot(Combdays, skills[5, 2:4, 0], 's',color='k', linestyle='-', markersize=5, alpha=0.8)
ax1.axhline(0, linestyle='--', color='k')
ylim(-0.5, 1.)
xlim(20, 250)
ax2=subplot(2, 1, 2)
im1 = plot(Concdays, skills[0, :, 1], 'o',color='b', linestyle='-', markersize=5, alpha=0.8)
im2 = plot(Concdays, skills[1, :, 1], 'v',color='b', linestyle='--', markersize=3, alpha=0.8)
im3 = plot(Concdays, skills[2, :, 1], 'o',color='r', linestyle='-', markersize=5, alpha=0.8)
im4 = plot(Concdays, skills[3, :, 1], 'v',color='r', linestyle='--', markersize=3, alpha=0.8)
im5 = plot(Concdays, skills[4, :, 1], 'o',color='g', linestyle='-', markersize=5, alpha=0.8)
im6 = plot(Combdays, skills[5, 2:4, 1], 's',color='k', linestyle='-', markersize=5, alpha=0.8)
ax2.axhline(0, linestyle='--', color='k')
ylim(-0.5, 1.)
xlim(20, 250)
ax1.set_ylabel('Skill', labelpad=3)
ax2.set_ylabel('Skill', labelpad=3)
ax2.set_xlabel('Forecast month')
ax1.set_xticks(np.arange(30, 250, 30))
ax1.set_xticklabels([])
ax2.set_xticks(np.arange(30, 250, 30))
ax2.set_xticklabels(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug'])
ax1.annotate('(a) 1985-2016', xy=(0.03, 1.01), xycoords='axes fraction', verticalalignment='bottom')
ax2.annotate('(b) 2008-2016', xy=(0.03, 1.01), xycoords='axes fraction', verticalalignment='bottom')
#ax1.xaxis.grid(True)
#ax1.yaxis.grid(True)
plts_net = im1+im2+im3+im4+im5+im6
methods = ['SIC','SICuw','MO', 'MOuw', 'MOmask', 'MP']
leg = ax1.legend(plts_net, methods, loc=2, ncol=2,columnspacing=1., frameon=False,handletextpad=1, borderaxespad=0.1)
subplots_adjust(left=0.13, right=0.98, top=0.96, bottom=0.1, hspace=0.1)
savefig(figpath+'skill_monthsALL'+str(skill_num)+'N1.pdf', dpi=300)
close(fig)
| gpl-3.0 |
duyet-website/api.duyet.net | lib/numpy/doc/creation.py | 52 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to NumPy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic NumPy Array Creation
==============================
NumPy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| mit |
Debaq/Triada | FullAxis_GUI/DB/BASE DE DATOS EXPERIMENTO/experimento 3/isadora/medidor3.py | 27 | 3052 | import argparse
import sys
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import json
parser = argparse.ArgumentParser(description="Does some awesome things.")
parser.add_argument('message', type=str, help="pass a message into the script")
args = parser.parse_args(sys.argv[1:])
data = []
New_data=[]
dt=[]
with open(args.message) as json_file:
data = json.load(json_file)
def graph(grid,d_tiempo):
plt.switch_backend('TkAgg') #default on my system
f = plt.figure(num=args.message, figsize=(20,15))
mng = plt._pylab_helpers.Gcf.figs.get(f.number, None)
print(New_data)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.title(args.message)
if grid == 1:
tempo = d_tiempo
tempo_init = tempo[0]
tempo_end = tempo[-1]
gs1 = GridSpec(4, 1)
gs1.update(left=0.05, right=0.95, wspace=0.5, hspace=0.3, bottom=0.08)
ax1 = plt.subplot(gs1[0, :])
ax1.grid()
ax1.set_ylabel('Pitch',fontsize=8)
if grid ==1:
L1 = ax1.plot(d_tiempo,New_data['pitch'])
else:
L1 = ax1.plot(d_tiempo,data['pitch'])
ax2 = plt.subplot(gs1[1, :])
ax2.grid()
ax2.set_ylabel('Roll',fontsize=8)
if grid ==1:
L1 = ax2.plot(d_tiempo,New_data['roll'])
else:
L1 = ax2.plot(d_tiempo,data['roll'])
ax3 = plt.subplot(gs1[2, :])
ax3.grid()
ax3.set_ylabel('Yaw',fontsize=8)
if grid ==1:
L1 = ax3.plot(d_tiempo,New_data['yaw'])
else:
L1 = ax3.plot(d_tiempo,data['yaw'])
ax4 = plt.subplot(gs1[3, :])
ax4.grid()
ax4.set_ylabel('Tiempo',fontsize=8)
if grid ==1:
L1 = ax4.plot(d_tiempo,New_data['ledblue'])
L2 = ax4.plot(d_tiempo,New_data['ledred'])
else:
L1 = ax4.plot(d_tiempo,data['ledblue'])
L2 = ax4.plot(d_tiempo,data['ledred'])
plt.show()
def find_nearest(array,values):
idx = np.abs(np.subtract.outer(array, values)).argmin(0)
return idx
def corte(init_cut,end_cut,a,b,c,d,e,f,g,h,i):
a=a[init_cut:end_cut]
b=b[init_cut:end_cut]
c=c[init_cut:end_cut]
d=d[init_cut:end_cut]
e=e[init_cut:end_cut]
f=f[init_cut:end_cut]
g=g[init_cut:end_cut]
h=h[init_cut:end_cut]
i=i[init_cut:end_cut]
datos={'roll':a,'pitch':b,'yaw':c, 'X':d, 'Y':e, 'Z':f,'time':g, 'ledblue':h, 'ledred':i}
return datos
def reset_tempo(var_in,var_out):
uni = var_in[0]
for t in range(0,len(var_in)):
var_out.append(round((var_in[t]-uni),3))
return var_out
graph(0,data['time'])
init_cut = float(input("tiempo inicial: "))
init_cuty = find_nearest(data['time'],init_cut)
end_cut = float(input("tiempo final: "))
end_cuty = find_nearest(data['time'],end_cut)
New_data=corte(init_cuty,end_cuty,data['pitch'],data['roll'],data['yaw'],data['X'],data['Y'],data['Z'],data['time'],data['ledblue'],data['ledred'])
data = []
print(data)
data = New_data
print(data)
dt = reset_tempo(New_data['time'],dt)
graph(0,dt)
| gpl-3.0 |
rl-institut/reegis_hp | reegis_hp/berlin_hp/read_data.py | 3 | 2222 | import pandas as pd
import os
basic_path = '/home/uwe/chiba/RLI/data'
# wohn_gew_schul = pd.read_csv('/home/uwe/blubber.csv', ';')
# wohn_gew_schul.index += 1
# wohn_gew_schul.to_csv(os.path.join(basic_path, 'wohn_gew_schul.csv'))
#
# iwu_typen = pd.read_csv('/home/uwe/heiztyp2iwu.csv')
# iwu_typen.index += 1
# iwu_typen.to_csv(os.path.join(basic_path, 'iwu_typen.csv'))
#
# stadtstrukturtypen = pd.read_csv('/home/uwe/stadtstruk.csv', ';')
# stadtstrukturtypen.drop('heiztyp', 1, inplace=True)
# stadtstrukturtypen.index += 1
# stadtstrukturtypen.to_csv(os.path.join(basic_path, 'stadtstruktur.csv'))
iwu_typen = pd.read_csv(os.path.join(basic_path, 'iwu_typen.csv'), index_col=0)
wohn_gew_schul = pd.read_csv(
os.path.join(basic_path, 'wohn_gew_schul.csv'), index_col=0)
stadtstrukturtypen = pd.read_csv(
os.path.join(basic_path, 'stadtnutzung_erweitert.csv'), index_col=0)
# number_floors = pd.read_csv(
# os.path.join(basic_path, 'number_floors_by_city_structure.csv'),
# index_col=0)
#
# print(number_floors)
stadtstrukturtypen.fillna(0, inplace=True)
print(sum(stadtstrukturtypen.ew * stadtstrukturtypen.wohnflaeche_pro_ew))
print(sum(stadtstrukturtypen.ew))
# Todo: Script, um Stadttyp als Nummer hinzuzufügen mit Ausgabe der Typen, die
# dann keine Nummer haben
# Todo: Geschosszahl und andere fehlende Typen hinzufügen (ods-Datei) [RLI/data]
# ToDo: Verbräuche pro Gebäudetyp aus Wärmetool
# ToDo: Join infos der "Flächentypen" in Gesamtkarte
# Todo: Vergleich der Wohnfläche mit Wärmetool
# Todo: Berechnung des Wärmeverbrauchs nach Wärmetoolmethode
# ToDo Age of building by "Flächentyp"
# ToDo Berechnung des Wärmeverbrauchs nach Open_eQuarter Methode
iwu_typen['EFHv84'] *= wohn_gew_schul.Wohnungen
iwu_typen['EFHn84'] *= wohn_gew_schul.Wohnungen
iwu_typen['MFHv84'] *= wohn_gew_schul.Wohnungen
iwu_typen['MFHn84'] *= wohn_gew_schul.Wohnungen
iwu_typen['Platte'] *= wohn_gew_schul.Wohnungen
iwu_typen['Buero'] = wohn_gew_schul.Buero
iwu_typen['Schule'] = wohn_gew_schul.Schule
# heatingtypes = pd.read_csv("/home/uwe/heiztypen.csv", sep=';')
# result = result.merge(heatingtypes, on='gebaeudefu', how='inner')
# result.set_index('gid', drop=True, inplace=True)
| gpl-3.0 |
jmetzen/scikit-learn | sklearn/decomposition/pca.py | 20 | 23579 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data. The components are sorted by
explained_variance_.
explained_variance_ : array, [n_components]
The amount of variance explained by each of the selected components.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0.
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Equal to `X.mean(axis=1)`.
n_components_ : int
The estimated number of components. Relevant when `n_components` is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_) # doctest: +ELLIPSIS
[ 6.6162... 0.05038...]
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space using `n_components_`.
Returns an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components. X represents
data from the projection on to the principal components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 2 by default.
.. versionchanged:: 0.18
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
k is not set then all components are stored and the sum of explained
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=2, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=2,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
stripe/topmodel | topmodel/plot_helpers.py | 1 | 6149 | from math import floor
from cStringIO import StringIO
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker
from mpld3 import plugins, save_html
def make_points_far(xs, ys, thresholds, min_dist=0.03):
"""
Return a subset of points and thresholds such that the ys are at least
`min_dist` apart. 0.03 seems to be a readable value. Always includes the
first and last points in xs and ys.
"""
y_prev = None
new_xs = []
new_ys = []
new_thresholds = []
for x, y, threshold in zip(xs[:-1], ys[:-1], thresholds[:-1]):
if y_prev is None or (np.abs(y - y_prev) >= min_dist):
y_prev = y
new_xs.append(x)
new_ys.append(y)
new_thresholds.append(threshold)
# Always keep the last point (xs[-1], ys[-1])
new_xs.append(xs[-1])
new_ys.append(ys[-1])
new_thresholds.append(thresholds[-1])
return (new_xs, new_ys, new_thresholds)
def draw_labels(ax, xs, ys, thresholds, labels_left=False):
font = matplotlib.font_manager.FontProperties(family='Tahoma', size=6)
for x, y, threshold in zip(xs, ys, thresholds):
x_round = floor(x * 1000) / 1000
y_round = floor(y * 1000) / 1000
threshold_round = floor(threshold * 1000) / 1000
coords = (x + 0.01, y)
if labels_left:
coords = (x - 0.190, y - 0.01)
annotation = "{threshold}: [{x},{y}] ".format(x=x_round, y=y_round,
threshold=threshold_round)
ax.annotate(annotation, coords, fontproperties=font)
def plot_boxplot(vals, label):
fig, ax = plt.subplots()
ax.boxplot(vals)
plt.setp(ax, xticklabels=label)
return save_image()
def plot_scatter(x, y, xlabel, ylabel, ax=None):
if ax is None:
fig, ax = plt.subplots()
ax.scatter(x, y, marker="o", color="purple")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xlim((0.0, 1.0))
plt.ylim((0.0, 1.0))
plt.tight_layout()
return save_image()
def plot_xy(xs, ys, thresholds, xlabel, ylabel, labels=True, labels_left=False,
ax=None, xlim=(0, 1), ylim=(0, 1), autofmt_xdate=False, **plot_kwargs):
if ax is None:
fig, ax = plt.subplots()
if autofmt_xdate:
fig.autofmt_xdate()
ax.plot(xs, ys, '-o', **plot_kwargs)
if xlim is not None:
plt.xlim(*xlim)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if labels:
draw_labels(ax, xs, ys, thresholds, labels_left=labels_left)
plt.tight_layout()
return save_image()
def pretty_point(coord):
return "0.0" if (coord is None) else ("%.2f" % coord)
def plot_xy_bootstrapped(xs, ys, thresholds, xlabel, ylabel, labels=False, labels_left=False,
ax=None, fig=None, label=None, **plot_kwargs):
if ax is None or fig is None:
fig1, ax1 = plt.subplots()
if fig is None:
fig = fig1
if ax is None:
ax = ax1
for i in range(1, len(xs)):
ax.plot(xs[i], ys[i], '-', alpha=0.3)
(xs_, ys_, thresholds_) = make_points_far(xs[0], ys[0], thresholds)
label_text = ["Threshold: %s (%s, %s)" % (t, pretty_point(x), pretty_point(y))
for (x, y, t) in zip(xs_, ys_, thresholds_)]
if label is None:
scatter = ax.plot(xs_, ys_, '-o', **plot_kwargs)
plugins.connect(fig, plugins.PointHTMLTooltip(scatter[0], label_text))
else:
scatter = ax.plot(xs_, ys_, '-o', label=label, **plot_kwargs)
plugins.connect(fig, plugins.PointHTMLTooltip(scatter[0], label_text))
if labels:
draw_labels(ax, xs_, ys_, thresholds_, labels_left=labels_left)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
if label is not None:
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], loc='best')
return save_image()
def plot_scores_histogram_log(thresholds, all_counts, xlabel, true_counts=None, ax=None):
plt.figure()
# First graph
if ax is None:
_, ax = plt.subplots()
width = (thresholds[1] - thresholds[0]) / 2
offset = [i + width for i in thresholds]
if true_counts is not None:
falses = [i - j for i, j in zip(all_counts, true_counts)]
plt.bar(offset, falses, width=width,
log=True, label="False items")
plt.bar(thresholds, true_counts, width=width,
log=True, color="purple", label="True items")
else:
plt.bar(thresholds, all_counts, width=width,
log=True, color="purple", label="All items")
plt.grid(False)
ax.yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
# Write to image
image_data = StringIO()
plt.xlim((0.0, 1.0))
plt.xlabel(xlabel)
plt.legend(loc='best')
plt.savefig(image_data, format='svg')
image_data.seek(0)
return image_data
def plot_absolute_score_histogram(thresholds, all_counts, xlabel, true_counts=None, ax=None):
plt.figure()
# First graph
if ax is None:
_, ax = plt.subplots()
width = (thresholds[1] - thresholds[0]) / 2
offset = [i + width for i in thresholds]
if true_counts is not None:
falses = [i - j for i, j in zip(all_counts, true_counts)]
plt.bar(offset, falses , width=width,
log=False, label="False items")
plt.bar(thresholds, true_counts, width=width,
log=False, color="purple", label="True items")
else:
plt.bar(thresholds, all_counts, width=width,
log=False, color="purple", label="All items")
plt.grid(False)
# Write to image
image_data = StringIO()
plt.xlim((0.0, 1.0))
plt.xlabel(xlabel)
plt.legend(loc='best')
plt.savefig(image_data, format='svg')
image_data.seek(0)
return image_data
def save_image(ax=None):
image_data = StringIO()
save_html(plt.gcf(), image_data)
image_data.seek(0)
return image_data
| mit |
arjoly/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 30 | 44274 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import ignore_warnings
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@raises(ValueError)
def test_sgd_bad_alpha_for_optimal_learning_rate(self):
# Check whether expected ValueError on bad alpha, i.e. 0
# since alpha is used to compute the optimal learning rate
self.factory(alpha=0, learning_rate="optimal")
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_partial_fit_multiclass_average(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
clf.partial_fit(X2[third:], Y2[third:])
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
@ignore_warnings
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.predict([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
okadate/romspy | romspy/hview/obsolate/his.py | 1 | 10872 | # coding: utf-8
# (c) 2015-09-26 Teruhisa Okada
import netCDF4
import matplotlib.pyplot as plt
import datetime
import numpy as np
from scipy.interpolate import Rbf
import romspy
class Dataset():
def __init__(self, ncfile, mapfile=None, grdfile=None):
self.ncfile = ncfile
if mapfile is not None:
self.mapfile = mapfile
else:
self.mapfile = 'deg_OsakaBayMap_okada.bln'
self.grdfile = grdfile
self.nc = netCDF4.Dataset(self.ncfile, 'a')
self.X = None
self.Y = None
self.X2 = None
self.Y2 = None
def print_time(self, which='ends', name='ocean_time'):
print "\nprint_time(which={}, name={}, tunit={})".format(which, name, romspy.JST)
nc = self.nc
if which == 'ends':
t = len(nc.dimensions[name])
start = nc.variables[name][0]
end = nc.variables[name][t-1]
print netCDF4.num2date(start, romspy.JST), 0
print netCDF4.num2date(end, romspy.JST), t-1
elif which == 'all':
time = nc.variables[name][:]
for t in range(len(time)):
print netCDF4.num2date(time[t], romspy.JST), t
else:
print 'You should select "ends" or "all"'
def print_varname(self, ndim=None):
print '\nprint_varname(ndim={})'.format(ndim)
if ndim is not None:
for vname in self.nc.variables.keys():
if self.nc.variables[vname].ndim == ndim:
print vname,
print ''
else:
print self.nc.variables.keys()
def get_varname(self, ndim=None):
if ndim is not None:
varnames = []
for vname in self.nc.variables.keys():
if self.nc.variables[vname].ndim == ndim:
varnames.append(vname)
return varnames
else:
return self.nc.variables.keys()
def get_xy(self, method, step=1):
"""
流速はそのまま,コンターは半グリッドずらしたxyを返す関数
2015-11-08 作成
"""
if self.X is None:
if self.grdfile is not None:
grd = netCDF4.Dataset(self.grdfile, 'r')
else:
grd = self.nc
x_rho = grd.variables['lon_rho'][0,:]
y_rho = grd.variables['lat_rho'][:,0]
X, Y = np.meshgrid(x_rho, y_rho)
self.X = X - 0.5 * (x_rho[1] - x_rho[0])
self.Y = Y - 0.5 * (y_rho[1] - y_rho[0])
self.X2 = X
self.Y2 = Y
if method == 'pcolor':
return self.X, self.Y
else:
return self.X2[::step, ::step], self.Y2[::step, ::step]
def get_time(self, time):
if type(time) == datetime.datetime:
t = netCDF4.date2num(time, romspy.JST)
ocean_time = self.nc.variables['ocean_time'][:]
t = np.where(ocean_time==t)[0][0]
elif type(time) == int:
t = time
time = netCDF4.num2date(self.nc.variables['ocean_time'][t], romspy.JST)
else:
print 'ERROR: your type(time) is {}.\ntype(time) must be datetime.datetime or int\n'.format(type(time))
return t, time
def hview(self, vname, **kw):
"""
2015-11-08 ベクトルに対応させるために ax_heatmap と ax_vecmap を追加
"""
time = kw.pop('time', -1)
k = kw.pop('k', 20)
cff = kw.pop('cff', None)
method = kw.pop('method', 'pcolor')
unit = kw.pop('unit', 'g')
levels = kw.pop('levels', None)
if cff is None:
cff = romspy.unit2cff(vname, unit)
if levels is None:
levels = romspy.levels(vname, unit)
print 'cff={}'.format(cff)
if vname == 'velocity':
var = self.nc.variables['u']
else:
var = self.nc.variables[vname]
if var.ndim > 2:
t, dtime = self.get_time(time)
if vname == 'velocity':
self.add_quiver(vname, t, k, **kw)
elif method == 'limit':
self.add_pcolor_limit(vname, t, k, **kw)
else:
if 'rbf' in method:
self.add_contourf_rbf(vname, t, k, cff, levels, **kw)
if 'pcolor' in method:
self.add_pcolor(vname, t, k, cff, levels, **kw)
if 'contour' in method:
self.add_contour(vname, t, k, cff, levels, **kw)
if 'fill' in method:
self.add_contourf(vname, t, k, cff, levels, **kw)
if self.mapfile is not None:
romspy.basemap(self.mapfile)
if k == 20:
plt.text(135.25, 34.25, 'surface layer')
elif k == 1:
plt.text(135.25, 34.25, 'bottom layer')
if var.ndim == 2:
plt.title('Model domein & bathymetry')
elif ('avg' in self.ncfile) or ('dia' in self.ncfile):
time_string = datetime.datetime.strftime(dtime,'%Y-%m')
plt.title('Average ({})'.format(time_string))
else:
plt.title(datetime.datetime.strftime(dtime,'%Y-%m-%d %H:%M:%S'))
return plt.gca()
def add_pcolor(self, vname, t, k, cff, levels, **kw):
"""
コンタープロットのaxを返す関数
2015-11-08 作成
"""
cblabel = kw.pop('cblabel', vname)
X, Y = self.get_xy('pcolor')
var = self.nc.variables[vname]
if var.ndim == 4:
var2d = var[t,k-1,:,:] * cff
elif var.ndim == 3:
var2d = var[t,:,:] * cff
else:
var2d = var[:,:] * cff
var2d = np.ma.array(var2d)
inf = var2d > 10**20
zero = var2d <= 0
var2d[inf] = np.ma.masked
var2d[zero] = np.ma.masked
ax = plt.gca()
if levels is not None:
P = ax.pcolor(X, Y, var2d, vmin=levels[0], vmax=levels[-1])
else:
P = ax.pcolor(X, Y, var2d)
cbar = plt.colorbar(P)
cbar.ax.set_ylabel(cblabel)
return P
def add_pcolor_limit(self, vname, t, k, **kw):
"""
2015-11-30 作成
"""
X, Y = self.get_xy('pcolor')
if vname == 'PO4':
cblabel = kw.pop('cblabel', '{}-limitation'.format(vname))
K_PO4 = kw.pop('K_PO4')
var2d = self.nc.variables[vname][t,k-1,:,:] * K_PO4
var2d = 1.0 / (1.0 + var2d)
elif vname in ['NH4', 'NO3']:
cblabel = kw.pop('cblabel', 'DIN-limitation')
K_NH4 = kw.pop('K_NH4')
K_NO3 = kw.pop('K_NO3')
cff1 = self.nc.variables['NH4'][t,k-1,:,:] * K_NH4
cff2 = self.nc.variables['NO3'][t,k-1,:,:] * K_NO3
inhNH4 = 1.0 / (1.0 + cff1)
L_NH4 = cff1 / (1.0 + cff1)
L_NO3 = cff2 * inhNH4 / (1.0 + cff2)
var2d = L_NH4 + L_NO3
else:
raise AssertionError('vname must be NH4, NO3 or PO4.')
ax = plt.gca()
P = ax.pcolor(X, Y, var2d, vmin=0, vmax=1)
cbar = plt.colorbar(P)
cbar.ax.set_ylabel(cblabel)
return P
def add_contour(self, vname, t, k, cff, levels, **kw):
"""
コンタープロットのaxを返す関数
2015-11-08 作成
"""
fmt = kw.pop('fmt', '%i')
extend = kw.pop('extend', 'max')
X, Y = self.get_xy('contour')
var = self.nc.variables[vname]
if var.ndim == 4:
var2d = var[t,k-1,:,:] * cff
elif var.ndim == 3:
var2d = var[t,:,:] * cff
else:
var2d = var[:,:] * cff
ax = plt.gca()
if levels is not None:
C = ax.contour(X, Y, var2d, levels, colors='w', extend=extend)
else:
C = ax.contour(X, Y, var2d, colors='w')
if fmt is not 'off':
C.clabel(fmt=fmt, colors='k') # ,fontsize=9)
return C
def add_contourf(self, vname, t, k, cff, levels, **kw):
"""
コンタープロットのaxを返す関数
2015-11-08 作成
"""
cblabel = kw.pop('cblabel', vname)
extend = kw.pop('extend', 'max')
X, Y = self.get_xy('contour')
var = self.nc.variables[vname]
if var.ndim == 4:
var2d = var[t,k-1,:,:] * cff
elif var.ndim == 3:
var2d = var[t,:,:] * cff
else:
var2d = var[:,:] * cff
var2d = np.ma.array(var2d)
inf = var2d > 10**20
zero = var2d <= 0
var2d[inf] = np.ma.masked
var2d[zero] = np.ma.masked
print var2d
ax = plt.gca()
if levels is not None:
F = ax.contourf(X, Y, var2d, levels, extend=extend)
else:
F = ax.contourf(X, Y, var2d)
CB = plt.colorbar(F)
CB.ax.set_ylabel(cblabel)
return F
def add_contourf_rbf(self, vname, t, k, cff, levels, **kw):
"""
"""
cblabel = kw.pop('cblabel', vname)
extend = kw.pop('extend', 'max')
X, Y = self.get_xy('contour')
var = self.nc.variables[vname]
if var.ndim == 4:
var2d = var[t,k-1,:,:] * cff
elif var.ndim == 3:
var2d = var[t,:,:] * cff
else:
var2d = var[:,:] * cff
mask1 = (0 <= var2d)
mask2 = (var2d < 10**20)
var1d = var2d[mask1 & mask2].flatten()
x1d = X[mask1 & mask2].flatten()
y1d = Y[mask1 & mask2].flatten()
print var1d, x1d, y1d
rbf = Rbf(x1d, y1d, var1d)
var2d = rbf(X, Y)
ax = plt.gca()
if levels is not None:
F = ax.contourf(X, Y, var2d, levels, extend=extend)
else:
F = ax.contourf(X, Y, var2d)
CB = plt.colorbar(F)
CB.ax.set_ylabel(cblabel)
return F
def add_quiver(self, vname, t, k, **kw):
"""
ベクトルの ax を返す関数
2015-11-08 作成
"""
step = kw.pop('step', 3)
scale = kw.pop('scale', 5)
X, Y = self.get_xy('quiver', step)
if 'u_eastward' in self.nc.variables.keys():
u = self.nc.variables['u_eastward'][t,k-1,::step,::step]
v = self.nc.variables['v_northward'][t,k-1,::step,::step]
else:
u = self.nc.variables['u'][t,k-1,::step,::step]
v = self.nc.variables['v'][t,k-1,::step,::step]
ax = plt.gca()
print X.shape, Y.shape, u.shape, v.shape
if 'u_eastward' in self.nc.variables.keys():
Q = ax.quiver(X, Y, u, v, units='width', angles='xy', scale=scale)
else:
Q = ax.quiver(X[:-1,:], Y[:-1,:], u[:-1,:], v, units='width', angles='xy', scale=scale)
plt.quiverkey(Q, 0.9, 0.1, 1.0/scale, '1 m/s')
return Q
| mit |
low-sky/pyspeckit | pyspeckit/spectrum/models/gaussfitter.py | 4 | 10301 | """
===============
Gaussian fitter
===============
.. moduleauthor:: Adam Ginsburg <[email protected]>
Created 3/17/08
Original version available at http://code.google.com/p/agpy/source/browse/trunk/agpy/gaussfitter.py
(the version below uses a Class instead of independent functions)
"""
from __future__ import print_function
import numpy
from numpy.ma import median
from numpy import pi
from pyspeckit.mpfit import mpfit
import matplotlib.cbook as mpcb
from . import mpfit_messages
from . import model
from six.moves import xrange
class gaussian_fitter(model.SpectralModel):
"""
A rather complicated Gaussian fitter class. Inherits from, but overrides
most components of, :mod:`model.SpectralModel`
"""
def __init__(self):
self.npars = 3
self.npeaks = 1
self.onepeakgaussfit = self._fourparfitter(self.onepeakgaussian)
def __call__(self,*args,**kwargs):
return self.multigaussfit(*args,**kwargs)
def onepeakgaussian(self, x,H,A,dx,w):
"""
Returns a 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
[height,amplitude,center,width]
"""
x = numpy.array(x) # make sure xarr is no longer a spectroscopic axis
return H+A*numpy.exp(-(x-dx)**2/(2*w**2))
def multipeakgaussian(self, x, pars):
"""
Returns flux at position x due to contributions from multiple Gaussians.
"""
x = numpy.array(x) # make sure xarr is no longer a spectroscopic axis
pars = numpy.reshape(pars, (len(pars) / 3, 3))
result = 0
for fit in pars: result += self.onepeakgaussian(x, 0, fit[0], fit[1], fit[2])
return result
def slope(self, x):
"""
Return slope at position x for multicomponent Gaussian fit. Need this in measurements class for
finding the FWHM of multicomponent lines whose centroids are not identical.
"""
pars = numpy.reshape(self.mpp, (len(self.mpp) / 3, 3))
result = 0
for fit in pars:
result += self.onepeakgaussian(x, 0, fit[0], fit[1], fit[2]) * (-2. * (x - fit[1]) / 2. / fit[2]**2)
return result
def n_gaussian(self, pars=None,a=None,dx=None,sigma=None):
"""
Returns a function that sums over N gaussians, where N is the length of
a,dx,sigma *OR* N = len(pars) / 3
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 3n, assuming a,dx,sigma repeated
dx - offset (velocity center) values
sigma - line widths
a - amplitudes
"""
if len(pars) % 3 == 0:
a = [pars[ii] for ii in xrange(0,len(pars),3)]
dx = [pars[ii] for ii in xrange(1,len(pars),3)]
sigma = [pars[ii] for ii in xrange(2,len(pars),3)]
elif not(len(dx) == len(sigma) == len(a)):
raise ValueError("Wrong array lengths! dx: %i sigma: %i a: %i" % (len(dx),len(sigma),len(a)))
def g(x):
v = numpy.zeros(len(x))
for ii in range(len(pars)/3):
v += a[ii] * numpy.exp( - ( x - dx[ii] )**2 / (2.0*sigma[ii]**2) )
return v
return g
def multigaussfit(self, xax, data, npeaks=1, err=None, params=[1,0,1],
fixed=[False,False,False], limitedmin=[False,False,True],
limitedmax=[False,False,False], minpars=[0,0,0], maxpars=[0,0,0],
quiet=True, shh=True, veryverbose=False, negamp=None,
tied = ['', '', ''], parinfo=None, debug=False, **kwargs):
"""
An improvement on onepeakgaussfit. Lets you fit multiple gaussians.
Inputs:
xax - x axis
data - y axis
npeaks - How many gaussians to fit? Default 1 (this could supersede onepeakgaussfit)
err - error corresponding to data
These parameters need to have length = 3*npeaks. If npeaks > 1 and length = 3, they will
be replicated npeaks times, otherwise they will be reset to defaults:
params - Fit parameters: [amplitude, offset, width] * npeaks
If len(params) % 3 == 0, npeaks will be set to len(params) / 3
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
tied - link parameters together
quiet - should MPFIT output each iteration?
shh - output final parameters?
kwargs are passed to mpfit
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if len(params) != npeaks and (len(params) / 3) > npeaks:
self.npeaks = len(params) / 3
else:
self.npeaks = npeaks
if isinstance(params,numpy.ndarray): params=params.tolist()
# make sure all various things are the right length; if they're not, fix them using the defaults
# multiformaldehydefit should process negamp directly if kwargs.has_key('negamp') is False: kwargs['negamp'] = None
pardict = {"params":params,"fixed":fixed,"limitedmin":limitedmin,"limitedmax":limitedmax,"minpars":minpars,"maxpars":maxpars,"tied":tied}
for parlistname in pardict:
parlist = pardict[parlistname]
if len(parlist) != 3*self.npeaks:
# if you leave the defaults, or enter something that can be multiplied by 3 to get to the
# right number of formaldehydeians, it will just replicate
if veryverbose: print("Correcting length of parameter %s" % parlistname)
if len(parlist) == 3:
parlist *= self.npeaks
elif parlistname=="params":
parlist[:] = [1,0,1] * self.npeaks
elif parlistname=="fixed":
parlist[:] = [False,False,False] * self.npeaks
elif parlistname=="limitedmax":
if negamp is None: parlist[:] = [False,False,False] * self.npeaks
elif negamp is False: parlist[:] = [False,False,False] * self.npeaks
else: parlist[:] = [True,False,False] * self.npeaks
elif parlistname=="limitedmin":
if negamp is None: parlist[:] = [False,False,True] * self.npeaks # Lines can't have negative width!
elif negamp is False: parlist[:] = [True,False,True] * self.npeaks
else: parlist[:] = [False,False,True] * self.npeaks
elif parlistname=="minpars" or parlistname=="maxpars":
parlist[:] = [0,0,0] * self.npeaks
elif parlistname=="tied":
parlist[:] = ['','',''] * self.npeaks
# mpfit doesn't recognize negamp, so get rid of it now that we're done setting limitedmin/max and min/maxpars
#if kwargs.has_key('negamp'): kwargs.pop('negamp')
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-self.n_gaussian(pars=p)(x))]
else:
def f(p,fjac=None): return [0,(y-self.n_gaussian(pars=p)(x))/err]
return f
if xax is None:
xax = numpy.arange(len(data))
parnames = {0:"AMPLITUDE",1:"SHIFT",2:"WIDTH"}
if parinfo is None:
parinfo = [ {'n':ii, 'value':params[ii],
'limits':[minpars[ii],maxpars[ii]],
'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii],
'parname':parnames[ii%3]+str(ii/3), 'error':ii, 'tied':tied[ii]}
for ii in xrange(len(params)) ]
if veryverbose:
print("GUESSES: ")
print("\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo]))
if debug:
for p in parinfo: print(p)
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet,**kwargs)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if not shh:
print("Fit status: ",mp.status)
print("Fit error message: ",mp.errmsg)
print("Fit message: ",mpfit_messages[mp.status])
print("Final fit values: ")
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print(parinfo[i]['parname'],p," +/- ",mpperr[i])
print("Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp))
self.mp = mp
self.mpp = mpp
self.mpperr = mpperr
self.model = self.n_gaussian(pars=mpp)(xax)
return mpp,self.n_gaussian(pars=mpp)(xax),mpperr,chi2
def annotations(self):
label_list = [(
"$A(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[0+jj*self.npars],self.mpperr[0+jj*self.npars]),
"$x(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[1+jj*self.npars],self.mpperr[1+jj*self.npars]),
"$\\sigma(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[2+jj*self.npars],self.mpperr[2+jj*self.npars])
) for jj in range(self.npeaks)]
labels = tuple(mpcb.flatten(label_list))
return labels
def components(self,xarr,modelpars):
modelcomponents = [ self.onepeakgaussian(xarr,
0.0,modelpars[3*i],modelpars[3*i+1],modelpars[3*i+2]) for i in range(self.npeaks)]
return modelcomponents
def integral(self, modelpars):
"""
Return the integral of the individual components (ignoring height)
"""
return self.model.sum()
# this is the "proper" way to do it, but the above line was used for compatibility with other models
integ = 0
if len(modelpars) % 3 == 0:
for amp,cen,width in numpy.reshape(modelpars,[len(modelpars)/3,3]):
integ += amp*width*numpy.sqrt(2.0*numpy.pi)
return integ
n_modelfunc = n_gaussian
| mit |
jun-wan/scilifelab | scripts/plot_complexity_curves.py | 4 | 5925 | import sys
import os
import yaml
import glob
import subprocess
import argparse
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
def main(ccurves, output_name='complexity_curves', x_min=0, x_max=500000000):
"""
This script plots the complexity curves generated for one or several libraries. The script is designed to work using
the output produced by preseq (http://smithlabresearch.org/software/preseq/). preseq version 1.0.0 is currently
supported by this script (the script is compatible also with version 0.1.0). Preseq is a tool used to estimate the
library complexity and/or to predict the library complexity. In the first case "preseq c_curve" should be use. In
the second case "preseq lc_extrap" should be usued. Please, refer to preseq manual available at
http://smithlabresearch.org/wp-content/uploads/manual.pdf for examples (pages 12 to 14 are the most informatives ones)
"""
if x_min < 0 or x_max <= x_min:
sys.exit("problem with x-min or x-max ({}, {}). x-min must be equal or higher to 0 and less than x-max".format(x_min, x_max))
# Set up plot params
legend = [[],[]]
global_x_max_ccurve_limit = 0
global_y_max_ccurve_limit = 0
fig = plt.figure()
ax = fig.add_subplot(111)
max_label_length = 0
# Each ccurve will get a different color
colormap = plt.cm.gist_ncar
plt.gca().set_color_cycle([colormap(i) for i in np.linspace(0, 0.9, len(ccurves))])
# Go through inputs and plot line
for ccurve in ccurves:
print "processing {}".format(ccurve)
ccurve_table = pd.io.parsers.read_csv(ccurve, sep='\t', header=0)
ccurve_TOTAL_READS = []
ccurve_EXPECTED_DISTINCT = []
if "TOTAL_READS" in ccurve_table:
ccurve_TOTAL_READS = ccurve_table["TOTAL_READS"].tolist()
ccurve_EXPECTED_DISTINCT = ccurve_table["EXPECTED_DISTINCT"].tolist()
elif "total_reads" in ccurve_table:
ccurve_TOTAL_READS = ccurve_table["total_reads"].tolist()
ccurve_EXPECTED_DISTINCT = ccurve_table["distinct_reads"].tolist()
else:
sys.exit("Error, table {} is not in the expected format... has been generated with preseq?".format(ccurve))
# I need to find the interpolation point to print the plots
x_mim_ccurve_limit = computeLimit(x_min, ccurve_TOTAL_READS)
x_max_ccurve_limit = computeLimit(x_max, ccurve_TOTAL_READS)
if x_max_ccurve_limit > global_x_max_ccurve_limit:
global_x_max_ccurve_limit = x_max_ccurve_limit
if ccurve_EXPECTED_DISTINCT[x_max_ccurve_limit] > global_y_max_ccurve_limit:
global_y_max_ccurve_limit = ccurve_EXPECTED_DISTINCT[x_max_ccurve_limit]
p, = ax.plot(ccurve_TOTAL_READS[x_mim_ccurve_limit:x_max_ccurve_limit], ccurve_EXPECTED_DISTINCT[x_mim_ccurve_limit:x_max_ccurve_limit])
sample_name = os.path.splitext(ccurve)[0]
if(sample_name[:32] == 'accepted_hits_sorted_dupRemoved_'):
sample_name = sample_name[32:]
if(len(sample_name) > max_label_length):
max_label_length = len(sample_name)
legend[0].append(p)
legend[1].append(sample_name)
# plot perfect library as dashed line
plt.plot([0, x_max], [0, x_max], color='black', linestyle='--', linewidth=1)
plt.ylim(0, global_y_max_ccurve_limit + global_y_max_ccurve_limit*0.2)
# label the axis
plt.ylabel('Unique Molecules')
plt.xlabel('Total Molecules (including duplicates)')
plt.title("preseq Complexity Curves")
# Sort out some of the nastier plotting defaults
ax.tick_params(top=False, right=False, direction='out')
# Move the subplot around to fit in the legend
box = ax.get_position()
ax.set_position([0.08, box.y0, (box.width * 0.78)-0.02, box.height])
font = {'size': 5}
if len(legend[1]) <= 20 and max_label_length <= 45:
font = {'size': 6}
if len(legend[1]) <= 20 and max_label_length <= 30:
font = {'size': 8}
if len(legend[1]) <= 20 and max_label_length <= 10:
font = {'size': 12}
ax.legend(legend[0], legend[1],loc='center left', bbox_to_anchor=(1.01, 0.5), prop=font)
# now save the plot
plt.savefig(output_name)
plt.clf()
return 0
def computeLimit(value, ccurve_TOTAL_READS):
"""This function returns the index of ccurve_TOTAL_READS containing the closest value to x_max"""
if ccurve_TOTAL_READS[-1] < value:
sys.exit("Attention: value is set to a value higher than the highest extrapolated point by preseq (value={}, ccurve_TOTAL_READS[-1]={}). Please specify a lower m-max.".format(value, ccurve_TOTAL_READS[-1]))
first_point = 0
last_point = len(ccurve_TOTAL_READS)
while first_point != last_point:
middle_point = (first_point + last_point)/2
middle_value = ccurve_TOTAL_READS[middle_point]
if middle_value == value:
return middle_point
elif middle_value >= value:
last_point = middle_point -1
else:
first_point = middle_point +1
return first_point
if __name__ == '__main__':
parser = argparse.ArgumentParser("plot_complexity_curves.py", description=main.__doc__)
parser.add_argument('ccurves', metavar='<preseq file>', nargs='+',
help="List of input files generated by preseq")
parser.add_argument('-o', '--output-name', dest='output_name', type=str, default='complexity_curves',
help="output name (.png will be automatically added)")
parser.add_argument('-m', '--x-min', dest='x_min', type=int, default=0,
help="lower x-limit (default 0)")
parser.add_argument('-x', '--x-max', dest='x_max', type=int, default=500000000,
help="upper x-limit (default 500 million)")
kwargs = vars(parser.parse_args())
main(**kwargs)
| mit |
Bismarrck/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops_test.py | 17 | 282180 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
NAN = float('nan')
metrics = metrics_lib
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_metric_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
test_case.assertEquals(
set(expected),
set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
class StreamingMeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean(array_ops.ones([4, 3]))
_assert_metric_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
class StreamingMeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_tensor(array_ops.ones([4, 3]))
_assert_metric_variables(self,
('mean/total_tensor:0', 'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class StreamingAccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_metric_variables(self,
('my_accuracy/count:0', 'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=2)
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.cached_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.cached_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.cached_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class StreamingTruePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(1, tp_update_op.eval())
self.assertEqual(1, tp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels, weights=37.0)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(37.0, tp_update_op.eval())
self.assertEqual(37.0, tp.eval())
class StreamingFalseNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('false_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(2, fn_update_op.eval())
self.assertEqual(2, fn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels, weights=((3.0,), (5.0,), (7.0,)))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(8.0, fn_update_op.eval())
self.assertEqual(8.0, fn.eval())
class StreamingFalsePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('false_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fp, fp_update_op = metrics.streaming_false_positives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(4, fp_update_op.eval())
self.assertEqual(4, fp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
fp, fp_update_op = metrics.streaming_false_positives(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,
29.0, 31.0)))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(42.0, fp_update_op.eval())
self.assertEqual(42.0, fp.eval())
class StreamingTrueNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(5, tn_update_op.eval())
self.assertEqual(5, tn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(15.0, tn_update_op.eval())
self.assertEqual(15.0, tn.eval())
class StreamingTruePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, weights=37.0, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
class StreamingFalseNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(
0.15,
0.5,
0.85,
))
_assert_metric_variables(self, ('false_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions,
labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
class StreamingFalsePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('false_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,
29.0, 31.0)),
thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
class StreamingTrueNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions,
labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
class StreamingPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class StreamingRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('recall/false_negatives/count:0', 'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class StreamingFPRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_positive_rate/false_positives/count:0',
'false_positive_rate/true_negatives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertEqual(initial_fpr, fpr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fpr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 2.0 + 5.0
weighted_f = (2.0 + 2.0) + (5.0 + 5.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 1.0 + 3.0
weighted_f = (1.0 + 4.0) + (2.0 + 3.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fpr.eval())
def testZeroFalsePositivesAndTrueNegativesGivesZeroFPR(self):
predictions = array_ops.ones((1, 4))
labels = array_ops.ones((1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
class StreamingFNRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_negative_rate/false_negatives/count:0',
'false_negative_rate/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertEqual(initial_fnr, fnr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fnr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 4.0
weighted_t = (2.0 + 3.0) + (1.0 + 4.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fnr.eval())
def testZeroFalseNegativesAndTruePositivesGivesZeroFNR(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
class StreamingCurvePointsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metric_ops.streaming_curve_points(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('curve_points/true_positives:0', 'curve_points/false_negatives:0',
'curve_points/false_positives:0', 'curve_points/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
points, _ = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [points])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def _testValueTensorIsIdempotent(self, curve):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(high=2, size=(10, 3)), dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels, predictions=predictions, curve=curve)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
initial_points = points.eval()
sess.run(update_op)
self.assertAllClose(initial_points, points.eval())
def testValueTensorIsIdempotentROC(self):
self._testValueTensorIsIdempotent(curve='ROC')
def testValueTensorIsIdempotentPR(self):
self._testValueTensorIsIdempotent(curve='PR')
def _testCase(self, labels, predictions, curve, expected_points):
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels=labels_tensor,
predictions=predictions_tensor,
num_thresholds=3,
curve=curve)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAllClose(expected_points, points.eval())
def testEdgeCasesROC(self):
self._testCase([[1]], [[1]], 'ROC', [[0, 1], [0, 1], [0, 0]])
self._testCase([[0]], [[0]], 'ROC', [[1, 1], [0, 1], [0, 1]])
self._testCase([[0]], [[1]], 'ROC', [[1, 1], [1, 1], [0, 1]])
self._testCase([[1]], [[0]], 'ROC', [[0, 1], [0, 0], [0, 0]])
def testManyValuesROC(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'ROC',
[[1.0, 1.0], [0.0, 0.75], [0.0, 0.0]])
def testEdgeCasesPR(self):
self._testCase([[1]], [[1]], 'PR', [[1, 1], [1, 1], [0, 1]])
self._testCase([[0]], [[0]], 'PR', [[1, 0], [1, 1], [1, 1]])
self._testCase([[0]], [[1]], 'PR', [[1, 0], [1, 0], [1, 1]])
self._testCase([[1]], [[0]], 'PR', [[1, 1], [0, 1], [0, 1]])
def testManyValuesPR(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'PR',
[[1.0, 4.0 / 6.0], [0.75, 1.0], [0.0, 1.0]])
def _np_auc(predictions, labels, weights=None):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
class StreamingAUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_auc(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_auc(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testPredictionsOutOfRange(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, -1, 1, -1], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
_, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertRaises(errors_impl.InvalidArgumentError, update_op.eval)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.cached_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples),
np.random.exponential(scale=1.0, size=num_samples)):
expected_auc = _np_auc(predictions, labels, weights)
with self.cached_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (
_enqueue_as_batches(weights, enqueue_ops)
if weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.streaming_auc(
tf_predictions,
tf_labels,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class StreamingDynamicAUCTest(test.TestCase):
def setUp(self):
super(StreamingDynamicAUCTest, self).setUp()
np.random.seed(1)
ops.reset_default_graph()
def testUnknownCurve(self):
with self.assertRaisesRegexp(
ValueError, 'curve must be either ROC or PR, TEST_CURVE unknown'):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
curve='TEST_CURVE')
def testVars(self):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)), predictions=array_ops.ones((10, 1)))
_assert_metric_variables(self, [
'dynamic_auc/concat_labels/array:0', 'dynamic_auc/concat_labels/size:0',
'dynamic_auc/concat_preds/array:0', 'dynamic_auc/concat_preds/size:0'
])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
auc, _ = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [auc])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in xrange(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in xrange(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllLabelsOnes(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([1, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testAllLabelsZeros(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([0, 0, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testNonZeroOnePredictions(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2.5, -2.5, 2.5, -2.5], dtype=dtypes_lib.float32)
labels = constant_op.constant([1, 0, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(auc.eval(), 1.0)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0, 1, 0])
labels = constant_op.constant([0, 1, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.5, auc.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, auc.eval())
def testExceptionOnIncompatibleShapes(self):
with self.cached_session() as sess:
predictions = array_ops.ones([5])
labels = array_ops.zeros([6])
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
def testExceptionOnGreaterThanOneLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([2, 1, 0])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is >1.*'):
sess.run(update_op)
def testExceptionOnNegativeLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([1, 0, -1])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is <0.*'):
sess.run(update_op)
def testWithMultipleUpdates(self):
batch_size = 10
num_batches = 100
labels = np.array([])
predictions = np.array([])
tf_labels = variables.VariableV1(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(tf_labels, tf_predictions)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.normal(0.0, scale=0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
sess.run(tf_labels.assign(new_labels))
sess.run(tf_predictions.assign(new_predictions))
sess.run(update_op)
expected_auc = _np_auc(predictions, labels)
self.assertAlmostEqual(expected_auc, auc.eval())
def testAUCPRReverseIncreasingPredictions(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-5)
def testAUCPRJumbledPredictions(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81], dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-6)
def testAUCPRPredictionsLessThanHalf(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-5)
def testWithWeights(self):
batch_size = 10
num_batches = 100
labels = np.array([])
predictions = np.array([])
weights = np.array([])
tf_labels = variables.VariableV1(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
tf_weights = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(tf_labels,
tf_predictions,
weights=tf_weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.uniform(-0.2, 0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
new_weights = np.random.uniform(0.0, 3.0, size=batch_size)
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
weights = np.concatenate([weights, new_weights])
sess.run([tf_labels.assign(new_labels),
tf_predictions.assign(new_predictions),
tf_weights.assign(new_weights)])
sess.run(update_op)
expected_auc = _np_auc(predictions, labels, weights)
self.assertAlmostEqual(expected_auc, auc.eval())
class AucWithConfidenceIntervalsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def _testResultsEqual(self, expected_dict, gotten_result):
"""Tests that 2 results (dicts) represent the same data.
Args:
expected_dict: A dictionary with keys that are the names of properties
of PrecisionRecallData and whose values are lists of floats.
gotten_result: A AucWithConfidenceIntervalData object.
"""
gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}
self.assertItemsEqual(
list(expected_dict.keys()), list(gotten_dict.keys()))
for key, expected_values in expected_dict.items():
self.assertAllClose(expected_values, gotten_dict[key])
def _testCase(self, predictions, labels, expected_result, weights=None):
"""Performs a test given a certain scenario of labels, predictions, weights.
Args:
predictions: The predictions tensor. Of type float32.
labels: The labels tensor. Of type bool.
expected_result: The expected result (dict) that maps to tensors.
weights: Optional weights tensor.
"""
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.int64)
weights_tensor = None
if weights:
weights_tensor = constant_op.constant(weights, dtype=dtypes_lib.float32)
gotten_result, update_op = (
metric_ops.auc_with_confidence_intervals(
labels=labels_tensor,
predictions=predictions_tensor,
weights=weights_tensor))
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self._testResultsEqual(expected_result, gotten_result)
def testAucAllCorrect(self):
self._testCase(
predictions=[0., 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[0, 0, 1, 0, 0, 1, 0, 1, 1, 0],
expected_result={
'auc': 0.66666667,
'lower': 0.27826795,
'upper': 0.91208512,
})
def testAucUnorderedInput(self):
self._testCase(
predictions=[1.0, 0.6, 0., 0.3, 0.4, 0.2, 0.5, 0.3, 0.6, 0.8],
labels=[0, 1, 0, 1, 0, 0, 1, 0, 0, 1],
expected_result={
'auc': 0.66666667,
'lower': 0.27826795,
'upper': 0.91208512,
})
def testAucWithWeights(self):
self._testCase(
predictions=[0., 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[0, 0, 1, 0, 0, 1, 0, 1, 1, 0],
weights=[0.5, 0.6, 1.2, 1.5, 2.0, 2.0, 1.5, 1.2, 0.6, 0.5],
expected_result={
'auc': 0.65151515,
'lower': 0.28918604,
'upper': 0.89573906,
})
def testAucEqualOne(self):
self._testCase(
predictions=[0, 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
expected_result={
'auc': 1.0,
'lower': 1.0,
'upper': 1.0,
})
def testAucEqualZero(self):
self._testCase(
predictions=[0, 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
expected_result={
'auc': 0.0,
'lower': 0.0,
'upper': 0.0,
})
def testNonZeroOnePredictions(self):
self._testCase(
predictions=[2.5, -2.5, .5, -.5, 1],
labels=[1, 0, 1, 0, 0],
expected_result={
'auc': 0.83333333,
'lower': 0.15229267,
'upper': 0.99286517,
})
def testAllLabelsOnes(self):
self._testCase(
predictions=[1., 1., 1., 1., 1.],
labels=[1, 1, 1, 1, 1],
expected_result={
'auc': 0.,
'lower': 0.,
'upper': 0.,
})
def testAllLabelsZeros(self):
self._testCase(
predictions=[0., 0., 0., 0., 0.],
labels=[0, 0, 0, 0, 0],
expected_result={
'auc': 0.,
'lower': 0.,
'upper': 0.,
})
def testWeightSumLessThanOneAll(self):
self._testCase(
predictions=[1., 1., 0., 1., 0., 0.],
labels=[1, 1, 1, 0, 0, 0],
weights=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
expected_result={
'auc': 0.,
'lower': 0.,
'upper': 0.,
})
def testWithMultipleUpdates(self):
batch_size = 50
num_batches = 100
labels = np.array([])
predictions = np.array([])
tf_labels = variables.VariableV1(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.auc_with_confidence_intervals(tf_labels,
tf_predictions)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.normal(0.0, scale=0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
sess.run(tf_labels.assign(new_labels))
sess.run(tf_predictions.assign(new_predictions))
sess.run(update_op)
expected_auc = _np_auc(predictions, labels)
self.assertAllClose(expected_auc, auc.auc.eval())
def testExceptionOnFloatLabels(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)
labels = constant_op.constant([0.7, 0, 1, 0, 1])
_, update_op = metrics.auc_with_confidence_intervals(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertRaises(TypeError, sess.run(update_op))
def testExceptionOnGreaterThanOneLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)
labels = constant_op.constant([2, 1, 0, 1, 0])
_, update_op = metrics.auc_with_confidence_intervals(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is >1.*'):
sess.run(update_op)
def testExceptionOnNegativeLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)
labels = constant_op.constant([1, 0, -1, 1, 0])
_, update_op = metrics.auc_with_confidence_intervals(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is <0.*'):
sess.run(update_op)
class StreamingPrecisionRecallAtEqualThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def _testResultsEqual(self, expected_dict, gotten_result, eps=None):
"""Tests that 2 results (dicts) represent the same data.
Args:
expected_dict: A dictionary with keys that are the names of properties
of PrecisionRecallData and whose values are lists of floats.
gotten_result: A PrecisionRecallData object.
eps: Epsilon value to use for testing output values. If unspecified, use
default from assertAllClose.
"""
gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}
self.assertItemsEqual(list(expected_dict.keys()), list(gotten_dict.keys()))
for key, expected_values in expected_dict.items():
if eps is not None:
self.assertAllClose(expected_values, gotten_dict[key], atol=eps)
else:
self.assertAllClose(expected_values, gotten_dict[key])
def testVars(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32))
_assert_metric_variables(
self, ('precision_recall_at_equal_thresholds/variables/tp_buckets:0',
'precision_recall_at_equal_thresholds/variables/fp_buckets:0'))
def testVarsWithName(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32),
name='foo')
_assert_metric_variables(
self, ('foo/variables/tp_buckets:0', 'foo/variables/fp_buckets:0'))
def testValuesAreIdempotent(self):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(size=(10, 3)) > 0.5, dtype=dtypes_lib.bool)
result, update_op = metric_ops.precision_recall_at_equal_thresholds(
labels=labels, predictions=predictions)
with self.cached_session() as sess:
# Run several updates.
sess.run(variables.local_variables_initializer())
for _ in range(3):
sess.run(update_op)
# Then verify idempotency.
initial_result = {
k: value.eval().tolist()
for k, value in result._asdict().items()
}
for _ in range(3):
self._testResultsEqual(initial_result, result)
def _testCase(self,
predictions,
labels,
expected_result,
dtype=dtypes_lib.float32,
eps=None,
weights=None):
"""Performs a test given a certain scenario of labels, predictions, weights.
Args:
predictions: The predictions tensor. Of type dtype.
labels: The labels tensor. Of type bool.
expected_result: The expected result (dict) that maps to tensors.
dtype: Data type to use for predictions and weights tensor. Default
is float32.
eps: Epsilon value to use for testing output values. If unspecified, use
default from assertAllClose.
weights: Optional weights tensor.
"""
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(predictions, dtype=dtype)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.bool)
weights_tensor = None
if weights:
weights_tensor = constant_op.constant(weights, dtype=dtype)
gotten_result, update_op = (
metric_ops.precision_recall_at_equal_thresholds(
labels=labels_tensor,
predictions=predictions_tensor,
weights=weights_tensor,
num_thresholds=3))
self.assertEqual(gotten_result.tp.dtype, dtype)
self.assertEqual(gotten_result.fp.dtype, dtype)
self.assertEqual(gotten_result.tn.dtype, dtype)
self.assertEqual(gotten_result.fn.dtype, dtype)
self.assertEqual(gotten_result.precision.dtype, dtype)
self.assertEqual(gotten_result.recall.dtype, dtype)
self.assertEqual(gotten_result.thresholds.dtype, dtype)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self._testResultsEqual(expected_result, gotten_result, eps=eps)
def testAllTruePositives(self):
self._testCase(
[[1]], [[True]], {
'tp': [1, 1, 1],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [1.0, 1.0, 1.0],
'recall': [1.0, 1.0, 1.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllTrueNegatives(self):
self._testCase(
[[0]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 0, 0],
'tn': [0, 1, 1],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalsePositives(self):
self._testCase(
[[1]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 1, 1],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalseNegatives(self):
self._testCase(
[[0]], [[True]], {
'tp': [1, 0, 0],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 1, 1],
'precision': [1.0, 0.0, 0.0],
'recall': [1.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValues(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValuesWithWeights(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [1.5, 1.5, 0.0],
'fp': [2.5, 0.0, 0.0],
'tn': [0.0, 2.5, 2.5],
'fn': [0.0, 0.0, 1.5],
'precision': [0.375, 1.0, 0.0],
'recall': [1.0, 1.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
weights=[[0.0, 0.5, 2.0, 0.0, 0.5, 1.0]])
def testFloat64(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
dtype=dtypes_lib.float64)
def testFloat16(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
dtype=dtypes_lib.float16,
eps=1e-3)
class StreamingSpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_metric_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class StreamingSensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_metric_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, weights=weights, specificity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class StreamingPrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run([prec_op, rec_op])
# Then verify idempotency.
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.streaming_precision_at_thresholds(
tf_predictions, tf_labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
class StreamingFPRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_positive_rate_at_thresholds/false_positives:0',
'false_positive_rate_at_thresholds/true_negatives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fpr, _ = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fpr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fpr_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertAllClose(initial_fpr, fpr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.5, fpr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1, fpr.eval())
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0, fpr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fp = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 0:
fp += 1
else:
if labels[i] == 0:
tn += 1
epsilon = 1e-7
expected_fpr = fp / (epsilon + fp + tn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fpr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fpr, fpr.eval(), 2)
class RecallAtPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7)
_assert_metric_variables(self, ('recall_at_precision/true_positives:0',
'recall_at_precision/false_negatives:0',
'recall_at_precision/false_positives:0',
'recall_at_precision/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertAlmostEqual(initial_recall, recall.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=1.0)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, recall.eval())
def testSomeCorrectHighPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3]
labels_values = [1, 1, 1, 1, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, recall.eval())
def testSomeCorrectLowPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3, .2, .1]
labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
def testWeighted(self):
predictions_values = [1, .9, .8, .7, .6]
labels_values = [1, 1, 0, 0, 1]
weights_values = [1, 1, 3, 4, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, weights=weights, precision=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
def _test_strict_mode(self, strict_mode, target_precision, expected_recall):
num_thresholds = 11
predictions_values = [.2, .3, .5, .6, .7, .8, .9, .9, .9, .1]
labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
# Resulting thresholds and the corresponding precision and recall values at
# each threshold:
# Thresholds [0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9]
# precisions: [0.3 0.2 0.1 0 0 0 0 0 0]
# recalls: [1.0 0.7 0.3 0 0 0 0 0 0]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels,
predictions,
num_thresholds=num_thresholds,
precision=target_precision,
strict_mode=strict_mode)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_recall, sess.run(update_op))
self.assertAlmostEqual(expected_recall, recall.eval())
def testStrictMode_Off(self):
# strict_mode is turned off and return the recall at the threshold where the
# precision (0.3) is closest to target precision (0.9). The recall
# corresponding to the threshold is 1.0.
self._test_strict_mode(
strict_mode=False, target_precision=0.9, expected_recall=1.0)
def testStrictMode_OnAndFail(self):
# strict_mode is turned on and we fail to reach the target precision at any
# threshold.
# Target precision: 0.9
# Diff: [-0.6 -0.7 -0.8 -0.9 -0.9 -0.9 -0.9 -0.9 -0.9]
# Reciprocal: [-1.6 -1.4 -1.3 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1]
# Max index: 3 and corresponding precision is: 0 which is smaller than
# target precsion 0.9. As a result, the expected recall is 0.
self._test_strict_mode(
strict_mode=True, target_precision=0.9, expected_recall=.0)
def testStrictMode_OnAndSucceed(self):
# strict_mode is on and we can reach the target precision at certain
# threshold.
# Target precision: 0.2
# Diff: [0.1 0 -0.1 -0.2 -0.2 -0.2 -0.2 -0.2 -0.2]
# Reciprocal: [10 infty -10.0 -5.0 -5.0 -5.0 -5.0 -5.0 -5.0]
# Max index: 1 and corresponding precision is: 0.2 which is no smaller than
# target precsion 0.2. In this case, we return the recall at index 1, which
# is 2.0/3 (0.7).
self._test_strict_mode(
strict_mode=True, target_precision=0.2, expected_recall=2.0 / 3)
class PrecisionAtRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.precision_at_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
target_recall=0.7)
_assert_metric_variables(self,
('precision_at_recall/true_positives:0',
'precision_at_recall/false_negatives:0',
'precision_at_recall/false_positives:0',
'precision_at_recall/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.precision_at_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
target_recall=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.precision_at_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
target_recall=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertAlmostEqual(initial_precision, precision.eval(), places=5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, precision.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = 1.0 - predictions
label_prior = math_ops.reduce_mean(labels)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(sess.run(label_prior), sess.run(update_op))
self.assertEqual(sess.run(label_prior), precision.eval())
def testSomeCorrectHighRecall(self):
predictions_values = [0.1, 0.2, 0.5, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, precision.eval())
def testSomeCorrectLowRecall(self):
predictions_values = [0.1, 0.2, 0.7, 0.3, 0.0, 0.1, 0.45, 0.5, 0.6, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(2.0/3, sess.run(update_op))
self.assertAlmostEqual(2.0/3, precision.eval())
def testWeighted_multipleLabelDtypes(self):
for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions_values = [
0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.22, 0.25, 0.31, 0.35]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = math_ops.cast(labels_values, dtype=label_dtype)
weights = constant_op.constant(weights_values)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.8, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(34.0/43, sess.run(update_op))
self.assertAlmostEqual(34.0/43, precision.eval())
class StreamingFNRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_negative_rate_at_thresholds/false_negatives:0',
'false_negative_rate_at_thresholds/true_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fnr, _ = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fnr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fnr_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertAllClose(initial_fnr, fnr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.5, fnr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(1, fnr.eval())
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval())
self.assertAlmostEqual(1.0, fnr_high.eval())
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0, fnr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fn = 0
tp = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
if labels[i] == 1:
fn += 1
epsilon = 1e-7
expected_fnr = fn / (epsilon + fn + tp)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fnr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fnr, fnr.eval(), 2)
# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.
# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.
# Until then, this test validates that both ops yield the same results.
class StreamingRecallAtKTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
self._batch_size = 4
self._num_classes = 3
self._np_predictions = np.matrix(('0.1 0.2 0.7;'
'0.6 0.2 0.2;'
'0.0 0.9 0.1;'
'0.2 0.0 0.8'))
self._np_labels = [0, 0, 0, 0]
def testVars(self):
metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1)
_assert_metric_variables(self,
('recall_at_1/count:0', 'recall_at_1/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testSingleUpdateKIs1(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=1)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.25, sess.run(update_op))
self.assertEqual(0.25, recall.eval())
self.assertEqual(0.25, sess.run(sp_update_op))
self.assertEqual(0.25, sp_recall.eval())
def testSingleUpdateKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=2)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, recall.eval())
self.assertEqual(0.5, sess.run(sp_update_op))
self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=3)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=3)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
weights = constant_op.constant(
[0, 1, 0, 1], shape=(self._batch_size,), dtype=dtypes_lib.float32)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2, weights=weights)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions,
array_ops.reshape(labels, (self._batch_size, 1)),
k=2,
weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
class StreamingSparsePrecisionTest(test.TestCase):
def _test_streaming_sparse_precision_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
labels=labels,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_k(self,
predictions,
labels,
k,
expected,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_top_k(
top_k_predictions, labels, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def test_top_k_rank_invalid(self):
with self.cached_session():
# top_k_predictions has rank < 2.
top_k_predictions = [9, 4, 6, 2, 0]
sp_labels = sparse_tensor.SparseTensorValue(
indices=np.array([[
0,
], [
1,
], [
2,
]], np.int64),
values=np.array([2, 7, 8], np.int64),
dense_shape=np.array([
10,
], np.int64))
with self.assertRaises(ValueError):
precision, _ = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int64),
labels=sp_labels)
variables.variables_initializer(variables.local_variables()).run()
precision.eval()
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_top_k_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [
(ex1 + ex2) / 2 for ex1, ex2 in zip(precision_ex1, precision_ex2)
]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])
self._test_streaming_sparse_precision_at_top_k(
predictions_top_k, labels, expected=streaming_precision[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
self._test_streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k], predictions_top_k_ex2[:k]),
labels,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
def test_average_precision_at_top_k_static_shape_check(self):
predictions_top_k = array_ops.placeholder(
shape=(2, None), dtype=dtypes_lib.int64)
labels = np.array(((1,), (2,)), dtype=np.int64)
# Fails due to non-static predictions_idx shape.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
predictions_top_k = (2, 1)
# Fails since rank of predictions_idx is less than one.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
predictions_top_k = ((2,), (1,))
# Valid static shape.
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,
3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=7.0 / 20)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected_precision = 0.5
with self.cached_session():
_, precision = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_precision, precision.eval())
class StreamingSparseRecallTest(test.TestCase):
def _test_streaming_sparse_recall_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_sparse_recall_at_top_k(self,
labels,
top_k_predictions,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metric_ops.sparse_recall_at_top_k(
labels=labels,
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in (sparse_labels, dense_labels):
for class_id in (-1, 0, 1, 4):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_one_label_at_k1_no_predictions(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 0 predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2)
def _test_one_label_at_k1_weighted(self, labels):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=3, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.6 / 0.9, weights=(0.6, 0.3))
def test_one_label_at_k1_weighted_sparse_labels(self):
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
self._test_one_label_at_k1_weighted(sparse_labels)
def test_one_label_at_k1_weighted_dense_labels(self):
dense_labels = np.array([[3], [2]], dtype=np.int64)
self._test_one_label_at_k1_weighted(dense_labels)
def test_three_labels_at_k5_nan(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 8: 1 label, no predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=8)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=3.0 / 6)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 6)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,
3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=2.0 / 2,
class_id=2)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=1.0 / 1,
class_id=5)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=0.0 / 1,
class_id=7)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 8 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=3.0 / 8)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 labels, all correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=7.0 / 12)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=7.0 / 12)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 1, 0], [0, 0, 0, 1]]
expected_recall = 0.5
with self.cached_session():
_, recall = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_recall, recall.eval())
class StreamingMeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_absolute_error/count:0', 'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels, weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class StreamingMeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_relative_error/count:0', 'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=array_ops.zeros_like(labels))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class StreamingMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_squared_error/count:0', 'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels, weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.cached_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.streaming_mean_squared_error(
predictions0, labels0, name='msd0')
mse1, update_op1 = metrics.streaming_mean_squared_error(
predictions1, labels1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
mse, ms_update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class StreamingRootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('root_mean_squared_error/count:0', 'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels, weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
class StreamingCovarianceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_metric_variables(self, (
'covariance/comoment:0',
'covariance/count:0',
'covariance/mean_label:0',
'covariance/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
cov, update_op = metrics.streaming_covariance(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_cov = cov.eval()
for _ in range(10):
self.assertEqual(initial_cov, cov.eval())
def testSingleUpdateIdentical(self):
with self.cached_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)
self.assertAlmostEqual(expected_cov, cov.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, update_op.eval())
self.assertAlmostEqual(expected_cov, cov.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 7], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 3, 1], shape=(1, 4), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(
predictions, labels, weights=weights)
expected_cov = np.cov(
[2, 4, 6, 8], [1, 3, 2, 7], fweights=[0, 1, 3, 1])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op))
self.assertAlmostEqual(expected_cov, cov.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(prev_expected_cov,
sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_cov,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),
5)
prev_expected_cov = expected_cov
def testMultiUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(prev_expected_cov,
sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_cov,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),
5)
prev_expected_cov = expected_cov
class StreamingPearsonRTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_metric_variables(self, (
'pearson_r/covariance/comoment:0',
'pearson_r/covariance/count:0',
'pearson_r/covariance/mean_label:0',
'pearson_r/covariance/mean_prediction:0',
'pearson_r/variance_labels/count:0',
'pearson_r/variance_labels/comoment:0',
'pearson_r/variance_labels/mean_label:0',
'pearson_r/variance_labels/mean_prediction:0',
'pearson_r/variance_predictions/comoment:0',
'pearson_r/variance_predictions/count:0',
'pearson_r/variance_predictions/mean_label:0',
'pearson_r/variance_predictions/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_r = pearson_r.eval()
for _ in range(10):
self.assertEqual(initial_r, pearson_r.eval())
def testSingleUpdateIdentical(self):
with self.cached_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op), 5)
self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, update_op.eval())
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
predictions = np.array([2, 4, 6, 8])
labels = np.array([1, 3, 2, 7])
weights = np.array([0, 1, 3, 1])
predictions_t = constant_op.constant(
predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels_t = constant_op.constant(
labels, shape=(1, 4), dtype=dtypes_lib.float32)
weights_t = constant_op.constant(
weights, shape=(1, 4), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
cmat = np.cov(predictions, labels, fweights=weights)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op))
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(prev_expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
expected_r = np.corrcoef(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_r,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(prev_expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
cmat = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
self.assertAlmostEqual(expected_r,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndSingletonBatches(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
weights = (np.arange(n).reshape(n // stride, stride) % stride == 0)
for row in weights:
np.random.shuffle(row)
# Now, weights is one-hot by row - one item per batch has non-zero weight.
weights = weights.reshape((n,))
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
cmat = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
actual_r = sess.run(update_op, feed_dict=feed_dict)
self.assertEqual(np.isnan(expected_r), np.isnan(actual_r))
self.assertEqual(
np.isnan(expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(expected_r):
self.assertAlmostEqual(expected_r, actual_r, 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
class StreamingMeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_metric_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_percentage_less(values=array_ops.ones((10,)), threshold=2)
_assert_metric_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.cached_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.cached_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class StreamingMeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_metric_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(predictions, labels, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=2)
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes=num_classes)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_miou = miou.eval()
for _ in range(10):
self.assertEqual(initial_miou, miou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.cached_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]),
constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]),
constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]),
constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]),
constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
weights = array_ops.concat([
constant_op.constant(0, shape=[1]),
constant_op.constant(1, shape=[8]),
constant_op.constant(0, shape=[1])
], 0)
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testMissingClassInLabels(self):
labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant(
[[[0, 0, 2, 1, 1, 0], [0, 1, 2, 2, 0, 1]], [[0, 0, 2, 1, 1, 1],
[1, 1, 2, 0, 0, 0]]])
num_classes = 3
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[7, 4, 3], [3, 5, 2], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(1 / 3 * (7 / (7 + 3 + 7) + 5 / (5 + 4 + 5) + 0 /
(0 + 5 + 0)), miou.eval())
def testMissingClassOverallSmall(self):
labels = constant_op.constant([0])
predictions = constant_op.constant([0])
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[1, 0], [0, 0]], update_op.eval())
self.assertAlmostEqual(1, miou.eval())
def testMissingClassOverallLarge(self):
labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant(
[[[0, 0, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1]], [[0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0]]])
num_classes = 3
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[9, 5, 0], [3, 7, 0], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(1 / 2 * (9 / (9 + 3 + 5) + 7 / (7 + 5 + 3)),
miou.eval())
class StreamingConcatTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_concat(values=array_ops.ones((10,)))
_assert_metric_variables(self, (
'streaming_concat/array:0',
'streaming_concat/size:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
value, _ = metrics.streaming_concat(
values=array_ops.ones((10,)), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [value])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_concat(
values=array_ops.ones((10,)), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testNextArraySize(self):
next_array_size = metric_ops._next_array_size # pylint: disable=protected-access
with self.cached_session():
self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)
self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)
self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)
def testStreamingConcat(self):
with self.cached_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())
sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
self.assertAllEqual(np.arange(10), concatenated.eval())
def testStreamingConcatStringValues(self):
with self.cached_session() as sess:
values = array_ops.placeholder(dtypes_lib.string, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertItemsEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: ['a', 'b', 'c']})
self.assertItemsEqual([b'a', b'b', b'c'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['d', 'e']})
self.assertItemsEqual([b'a', b'b', b'c', b'd', b'e'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['f', 'g', 'h', 'i', 'j']})
self.assertItemsEqual(
[b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j'],
concatenated.eval())
def testStreamingConcatMaxSize(self):
with self.cached_session() as sess:
values = math_ops.range(3)
concatenated, update_op = metrics.streaming_concat(values, max_size=5)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
def testStreamingConcat2D(self):
with self.cached_session() as sess:
values = array_ops.reshape(math_ops.range(3), (3, 1))
concatenated, update_op = metrics.streaming_concat(values, axis=-1)
sess.run(variables.local_variables_initializer())
for _ in range(10):
sess.run([update_op])
self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10], concatenated.eval())
def testStreamingConcatErrors(self):
with self.assertRaises(ValueError):
metrics.streaming_concat(array_ops.placeholder(dtypes_lib.float32))
values = array_ops.zeros((2, 3))
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=-3, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=2, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(
array_ops.placeholder(dtypes_lib.float32, [None, None]))
def testStreamingConcatReset(self):
with self.cached_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run(variables.local_variables_initializer())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([3, 4], concatenated.eval())
class AggregateMetricsTest(test.TestCase):
def testAggregateNoMetricsRaisesValueError(self):
with self.assertRaises(ValueError):
metrics.aggregate_metrics()
def testAggregateSingleMetricReturnsOneItemLists(self):
values = array_ops.ones((10, 4))
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean(values))
self.assertEqual(len(value_tensors), 1)
self.assertEqual(len(update_ops), 1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, update_ops[0].eval())
self.assertEqual(1, value_tensors[0].eval())
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean_absolute_error(predictions, labels),
metrics.streaming_mean_squared_error(predictions, labels))
self.assertEqual(len(value_tensors), 2)
self.assertEqual(len(update_ops), 2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, update_ops[0].eval())
self.assertEqual(4, update_ops[1].eval())
self.assertEqual(2, value_tensors[0].eval())
self.assertEqual(4, value_tensors[1].eval())
class AggregateMetricMapTest(test.TestCase):
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
names_to_values, names_to_updates = metrics.aggregate_metric_map({
'm1': metrics.streaming_mean_absolute_error(predictions, labels),
'm2': metrics.streaming_mean_squared_error(predictions, labels),
})
self.assertEqual(2, len(names_to_values))
self.assertEqual(2, len(names_to_updates))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, names_to_updates['m1'].eval())
self.assertEqual(4, names_to_updates['m2'].eval())
self.assertEqual(2, names_to_values['m1'].eval())
self.assertEqual(4, names_to_values['m2'].eval())
class CountTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.count(array_ops.ones([4, 3]))
_assert_metric_variables(self, ['count/count:0'])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.count(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.count(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testReturnType(self):
c, op = metrics.count(array_ops.ones([4, 3]))
self.assertTrue(isinstance(c, ops.Tensor))
self.assertTrue(isinstance(op, ops.Operation) or isinstance(op, ops.Tensor))
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(2.0, sess.run(update_op), 5)
self.assertAlmostEqual(4.0, sess.run(update_op), 5)
self.assertAlmostEqual(6.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def test1dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [0.5])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1.2])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(3.4, result.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 0.5, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1.2, shape=(1,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(3.4, result.eval(), 5)
def test2dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1.1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(4.1, result.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1.1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(4.1, result.eval(), 5)
class CohenKappaTest(test.TestCase):
def _confusion_matrix_to_samples(self, confusion_matrix):
x, y = confusion_matrix.shape
pairs = []
for label in range(x):
for feature in range(y):
pairs += [label, feature] * confusion_matrix[label, feature]
pairs = np.array(pairs).reshape((-1, 2))
return pairs[:, 0], pairs[:, 1]
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2)
_assert_metric_variables(self, (
'cohen_kappa/po:0',
'cohen_kappa/pe_row:0',
'cohen_kappa/pe_col:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
kappa, _ = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [kappa])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=2)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 3)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_kappa = kappa.eval()
for _ in range(10):
self.assertAlmostEqual(initial_kappa, kappa.eval(), 5)
def testBasic(self):
confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])
# overall total = 36
# po = [9, 8, 6], sum(po) = 23
# pe_row = [15, 12, 9], pe_col = [13, 14, 9], so pe = [5.42, 4.67, 2.25]
# finally, kappa = (sum(po) - sum(pe)) / (N - sum(pe))
# = (23 - 12.34) / (36 - 12.34)
# = 0.45
# see: http://psych.unl.edu/psycrs/handcomp/hckappa.PDF
expect = 0.45
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
dtypes = [dtypes_lib.int16, dtypes_lib.int32, dtypes_lib.int64]
shapes = [
(len(labels,)), # 1-dim
(len(labels), 1)
] # 2-dim
weights = [None, np.ones_like(labels)]
for dtype in dtypes:
for shape in shapes:
for weight in weights:
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(
np.reshape(predictions, shape), dtype=dtype)
labels_tensor = constant_op.constant(
np.reshape(labels, shape), dtype=dtype)
kappa, update_op = metrics.cohen_kappa(
labels_tensor, predictions_tensor, 3, weights=weight)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 2)
self.assertAlmostEqual(expect, kappa.eval(), 2)
def testAllCorrect(self):
inputs = np.arange(0, 100) % 4
# confusion matrix
# [[25, 0, 0],
# [0, 25, 0],
# [0, 0, 25]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(inputs, inputs)
expect = 1.0
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testAllIncorrect(self):
labels = np.arange(0, 100) % 4
predictions = (labels + 1) % 4
# confusion matrix
# [[0, 25, 0],
# [0, 0, 25],
# [25, 0, 0]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(labels, predictions)
expect = -0.333333333333
with self.cached_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWeighted(self):
confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels, predictions, sample_weight=weights)
expect = 0.453466583385
with self.cached_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(
labels, predictions, 4, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWithMultipleUpdates(self):
confusion_matrix = np.array([[90, 30, 10, 20], [40, 80, 20, 30],
[20, 10, 60, 35], [15, 25, 30, 25]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
num_classes = confusion_matrix.shape[0]
batch_size = num_samples // 10
predictions_t = array_ops.placeholder(
dtypes_lib.float32, shape=(batch_size,))
labels_t = array_ops.placeholder(dtypes_lib.int32, shape=(batch_size,))
weights_t = array_ops.placeholder(dtypes_lib.float32, shape=(batch_size,))
kappa, update_op = metrics.cohen_kappa(
labels_t, predictions_t, num_classes, weights=weights_t)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for idx in range(0, num_samples, batch_size):
batch_start, batch_end = idx, idx + batch_size
sess.run(
update_op,
feed_dict={
labels_t: labels[batch_start:batch_end],
predictions_t: predictions[batch_start:batch_end],
weights_t: weights[batch_start:batch_end]
})
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels_np, predictions_np, sample_weight=weights_np)
expect = 0.289965397924
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testInvalidNumClasses(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaisesRegexp(ValueError, 'num_classes'):
metrics.cohen_kappa(labels, predictions, 1)
def testInvalidDimension(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
invalid_labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 2))
with self.assertRaises(ValueError):
metrics.cohen_kappa(invalid_labels, predictions, 3)
invalid_predictions = array_ops.placeholder(
dtypes_lib.float32, shape=(4, 2))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaises(ValueError):
metrics.cohen_kappa(labels, invalid_predictions, 3)
def testConditionalPackingOptimization(self):
placeholder = array_ops.placeholder(dtypes_lib.float32, [None])
values, update_op = metric_ops.streaming_concat(placeholder)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for feed in range(10):
sess.run(update_op, feed_dict={placeholder: [feed]})
print(sess.run(values))
if __name__ == '__main__':
test.main()
| apache-2.0 |
daniel-e/pymltools | plot_scripts/plot_perceptron_xor.py | 1 | 1294 | #!/usr/bin/env python3
import random, math, sys
import matplotlib.pyplot as plt
sys.path.append("../")
import perceptron
import synthetic_data
plt.grid()
# initialize prng and weights to get deterministic results
random.seed(2)
w = [2.0, -0.1, -0.8, 0.2]
# create random examples of two classes
l0, l1 = synthetic_data.xor_binary_class(50, 0.5)
# TODO
l0 = list(l0)
l1 = list(l1)
x, y = zip(*l0)
plt.plot(x, y, ".", marker = "+")
x, y = zip(*l1)
plt.plot(x, y, ".", marker = "x")
# map the data into a space with one addition dimension so that
# it becomes linearly separable
l0 = [(x, y, x*y) for x, y in l0]
l1 = [(x, y, x*y) for x, y in l1]
w, its, err = perceptron.binary_learning(l0, l1, weights = w)
print ("iterations =", its, "error =", err)
def frange(start, end, step):
r = start
while r < end:
yield r
r += step
# plot the decision boundary
x = list(frange(-2, 0.52, 0.001))
y = []
for i in x:
a = (-w[0] - w[1] * i) / (w[2] + w[3] * i)
y.append(a)
plt.plot(x, y, "r", label = "decision boundary")
x = list(frange(0.53, 1.5, 0.001))
y = []
for i in x:
a = (-w[0] - w[1] * i) / (w[2] + w[3] * i)
y.append(a)
plt.plot(x, y, "r")
plt.legend(loc = "lower right")
plt.xlim([-0.5, 1.5])
plt.ylim([-0.5, 1.5])
plt.savefig("perceptron_xor.png")
#plt.show()
| gpl-2.0 |
Sentient07/scikit-learn | examples/svm/plot_svm_margin.py | 88 | 2540 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors (margin away from hyperplane in direction
# perpendicular to hyperplane). This is sqrt(1+a^2) away vertically in
# 2-d.
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy - np.sqrt(1 + a ** 2) * margin
yy_up = yy + np.sqrt(1 + a ** 2) * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10, edgecolors='k')
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired,
edgecolors='k')
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
YihaoLu/statsmodels | statsmodels/graphics/tests/test_boxplots.py | 28 | 1257 | import numpy as np
from numpy.testing import dec
from statsmodels.graphics.boxplots import violinplot, beanplot
from statsmodels.datasets import anes96
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_violinplot_beanplot():
# Test violinplot and beanplot with the same dataset.
data = anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
age = [data.exog['age'][data.endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
violinplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
| bsd-3-clause |
spbguru/repo1 | external/linux32/lib/python2.6/site-packages/matplotlib/bezier.py | 70 | 14387 | """
A module providing some utility functions regarding bezier path manipulation.
"""
import numpy as np
from math import sqrt
from matplotlib.path import Path
from operator import xor
# some functions
def get_intersection(cx1, cy1, cos_t1, sin_t1,
cx2, cy2, cos_t2, sin_t2):
""" return a intersecting point between a line through (cx1, cy1)
and having angle t1 and a line through (cx2, cy2) and angle t2.
"""
# line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.
# line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1
line1_rhs = sin_t1 * cx1 - cos_t1 * cy1
line2_rhs = sin_t2 * cx2 - cos_t2 * cy2
# rhs matrix
a, b = sin_t1, -cos_t1
c, d = sin_t2, -cos_t2
ad_bc = a*d-b*c
if ad_bc == 0.:
raise ValueError("Given lines do not intersect")
#rhs_inverse
a_, b_ = d, -b
c_, d_ = -c, a
a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]
x = a_* line1_rhs + b_ * line2_rhs
y = c_* line1_rhs + d_ * line2_rhs
return x, y
def get_normal_points(cx, cy, cos_t, sin_t, length):
"""
For a line passing through (*cx*, *cy*) and having a angle *t*,
return locations of the two points located along its perpendicular line at the distance of *length*.
"""
if length == 0.:
return cx, cy, cx, cy
cos_t1, sin_t1 = sin_t, -cos_t
cos_t2, sin_t2 = -sin_t, cos_t
x1, y1 = length*cos_t1 + cx, length*sin_t1 + cy
x2, y2 = length*cos_t2 + cx, length*sin_t2 + cy
return x1, y1, x2, y2
## BEZIER routines
# subdividing bezier curve
# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html
def _de_casteljau1(beta, t):
next_beta = beta[:-1] * (1-t) + beta[1:] * t
return next_beta
def split_de_casteljau(beta, t):
"""split a bezier segment defined by its controlpoints *beta*
into two separate segment divided at *t* and return their control points.
"""
beta = np.asarray(beta)
beta_list = [beta]
while True:
beta = _de_casteljau1(beta, t)
beta_list.append(beta)
if len(beta) == 1:
break
left_beta = [beta[0] for beta in beta_list]
right_beta = [beta[-1] for beta in reversed(beta_list)]
return left_beta, right_beta
def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t, inside_closedpath,
t0=0., t1=1., tolerence=0.01):
""" Find a parameter t0 and t1 of the given bezier path which
bounds the intersecting points with a provided closed
path(*inside_closedpath*). Search starts from *t0* and *t1* and it
uses a simple bisecting algorithm therefore one of the end point
must be inside the path while the orther doesn't. The search stop
when |t0-t1| gets smaller than the given tolerence.
value for
- bezier_point_at_t : a function which returns x, y coordinates at *t*
- inside_closedpath : return True if the point is insed the path
"""
# inside_closedpath : function
start = bezier_point_at_t(t0)
end = bezier_point_at_t(t1)
start_inside = inside_closedpath(start)
end_inside = inside_closedpath(end)
if not xor(start_inside, end_inside):
raise ValueError("the segment does not seemed to intersect with the path")
while 1:
# return if the distance is smaller than the tolerence
if (start[0]-end[0])**2 + (start[1]-end[1])**2 < tolerence**2:
return t0, t1
# calculate the middle point
middle_t = 0.5*(t0+t1)
middle = bezier_point_at_t(middle_t)
middle_inside = inside_closedpath(middle)
if xor(start_inside, middle_inside):
t1 = middle_t
end = middle
end_inside = middle_inside
else:
t0 = middle_t
start = middle
start_inside = middle_inside
class BezierSegment:
"""
A simple class of a 2-dimensional bezier segment
"""
# Highrt order bezier lines can be supported by simplying adding
# correcponding values.
_binom_coeff = {1:np.array([1., 1.]),
2:np.array([1., 2., 1.]),
3:np.array([1., 3., 3., 1.])}
def __init__(self, control_points):
"""
*control_points* : location of contol points. It needs have a
shpae of n * 2, where n is the order of the bezier line. 1<=
n <= 3 is supported.
"""
_o = len(control_points)
self._orders = np.arange(_o)
_coeff = BezierSegment._binom_coeff[_o - 1]
_control_points = np.asarray(control_points)
xx = _control_points[:,0]
yy = _control_points[:,1]
self._px = xx * _coeff
self._py = yy * _coeff
def point_at_t(self, t):
"evaluate a point at t"
one_minus_t_powers = np.power(1.-t, self._orders)[::-1]
t_powers = np.power(t, self._orders)
tt = one_minus_t_powers * t_powers
_x = sum(tt * self._px)
_y = sum(tt * self._py)
return _x, _y
def split_bezier_intersecting_with_closedpath(bezier,
inside_closedpath,
tolerence=0.01):
"""
bezier : control points of the bezier segment
inside_closedpath : a function which returns true if the point is inside the path
"""
bz = BezierSegment(bezier)
bezier_point_at_t = bz.point_at_t
t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,
inside_closedpath,
tolerence=tolerence)
_left, _right = split_de_casteljau(bezier, (t0+t1)/2.)
return _left, _right
def find_r_to_boundary_of_closedpath(inside_closedpath, xy,
cos_t, sin_t,
rmin=0., rmax=1., tolerence=0.01):
"""
Find a radius r (centered at *xy*) between *rmin* and *rmax* at
which it intersect with the path.
inside_closedpath : function
cx, cy : center
cos_t, sin_t : cosine and sine for the angle
rmin, rmax :
"""
cx, cy = xy
def _f(r):
return cos_t*r + cx, sin_t*r + cy
find_bezier_t_intersecting_with_closedpath(_f, inside_closedpath,
t0=rmin, t1=rmax, tolerence=tolerence)
## matplotlib specific
def split_path_inout(path, inside, tolerence=0.01, reorder_inout=False):
""" divide a path into two segment at the point where inside(x, y)
becomes False.
"""
path_iter = path.iter_segments()
ctl_points, command = path_iter.next()
begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
bezier_path = None
ctl_points_old = ctl_points
concat = np.concatenate
iold=0
i = 1
for ctl_points, command in path_iter:
iold=i
i += len(ctl_points)/2
if inside(ctl_points[-2:]) != begin_inside:
bezier_path = concat([ctl_points_old[-2:], ctl_points])
break
ctl_points_old = ctl_points
if bezier_path is None:
raise ValueError("The path does not seem to intersect with the patch")
bp = zip(bezier_path[::2], bezier_path[1::2])
left, right = split_bezier_intersecting_with_closedpath(bp,
inside,
tolerence)
if len(left) == 2:
codes_left = [Path.LINETO]
codes_right = [Path.MOVETO, Path.LINETO]
elif len(left) == 3:
codes_left = [Path.CURVE3, Path.CURVE3]
codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
elif len(left) == 4:
codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
else:
raise ValueError()
verts_left = left[1:]
verts_right = right[:]
#i += 1
if path.codes is None:
path_in = Path(concat([path.vertices[:i], verts_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]))
else:
path_in = Path(concat([path.vertices[:iold], verts_left]),
concat([path.codes[:iold], codes_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]),
concat([codes_right, path.codes[i:]]))
if reorder_inout and begin_inside == False:
path_in, path_out = path_out, path_in
return path_in, path_out
def inside_circle(cx, cy, r):
r2 = r**2
def _f(xy):
x, y = xy
return (x-cx)**2 + (y-cy)**2 < r2
return _f
# quadratic bezier lines
def get_cos_sin(x0, y0, x1, y1):
dx, dy = x1-x0, y1-y0
d = (dx*dx + dy*dy)**.5
return dx/d, dy/d
def get_parallels(bezier2, width):
"""
Given the quadraitc bezier control points *bezier2*, returns
control points of quadrativ bezier lines roughly parralel to given
one separated by *width*.
"""
# The parallel bezier lines constructed by following ways.
# c1 and c2 are contol points representing the begin and end of the bezier line.
# cm is the middle point
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c2x, c2y = bezier2[2]
# t1 and t2 is the anlge between c1 and cm, cm, c2.
# They are also a angle of the tangential line of the path at c1 and c2
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c2_left and
# c2_right with respect to c2.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
c2x_left, c2y_left, c2x_right, c2y_right = \
get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
# find cm_left which is the intersectng point of a line through
# c1_left with angle t1 and a line throught c2_left with angle
# t2. Same with cm_right.
cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1, sin_t1,
c2x_left, c2y_left, cos_t2, sin_t2)
cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1, sin_t1,
c2x_right, c2y_right, cos_t2, sin_t2)
# the parralel bezier lines are created with control points of
# [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
path_left = [(c1x_left, c1y_left), (cmx_left, cmy_left), (c2x_left, c2y_left)]
path_right = [(c1x_right, c1y_right), (cmx_right, cmy_right), (c2x_right, c2y_right)]
return path_left, path_right
def make_wedged_bezier2(bezier2, length, shrink_factor=0.5):
"""
Being similar to get_parallels, returns
control points of two quadrativ bezier lines having a width roughly parralel to given
one separated by *width*.
"""
xx1, yy1 = bezier2[2]
xx2, yy2 = bezier2[1]
xx3, yy3 = bezier2[0]
cx, cy = xx3, yy3
x0, y0 = xx2, yy2
dist = sqrt((x0-cx)**2 + (y0-cy)**2)
cos_t, sin_t = (x0-cx)/dist, (y0-cy)/dist,
x1, y1, x2, y2 = get_normal_points(cx, cy, cos_t, sin_t, length)
xx12, yy12 = (xx1+xx2)/2., (yy1+yy2)/2.,
xx23, yy23 = (xx2+xx3)/2., (yy2+yy3)/2.,
dist = sqrt((xx12-xx23)**2 + (yy12-yy23)**2)
cos_t, sin_t = (xx12-xx23)/dist, (yy12-yy23)/dist,
xm1, ym1, xm2, ym2 = get_normal_points(xx2, yy2, cos_t, sin_t, length*shrink_factor)
l_plus = [(x1, y1), (xm1, ym1), (xx1, yy1)]
l_minus = [(x2, y2), (xm2, ym2), (xx1, yy1)]
return l_plus, l_minus
def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
""" Find control points of the bezier line throught c1, mm, c2. We
simply assume that c1, mm, c2 which have parameteric value 0, 0.5, and 1.
"""
cmx = .5 * (4*mmx - (c1x + c2x))
cmy = .5 * (4*mmy - (c1y + c2y))
return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
"""
Being similar to get_parallels, returns
control points of two quadrativ bezier lines having a width roughly parralel to given
one separated by *width*.
"""
# c1, cm, c2
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c3x, c3y = bezier2[2]
# t1 and t2 is the anlge between c1 and cm, cm, c3.
# They are also a angle of the tangential line of the path at c1 and c3
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c3_left and
# c3_right with respect to c3.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width*w1)
c3x_left, c3y_left, c3x_right, c3y_right = \
get_normal_points(c3x, c3y, cos_t2, sin_t2, width*w2)
# find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and c12-c23
c12x, c12y = (c1x+cmx)*.5, (c1y+cmy)*.5
c23x, c23y = (cmx+c3x)*.5, (cmy+c3y)*.5
c123x, c123y = (c12x+c23x)*.5, (c12y+c23y)*.5
# tangential angle of c123 (angle between c12 and c23)
cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
c123x_left, c123y_left, c123x_right, c123y_right = \
get_normal_points(c123x, c123y, cos_t123, sin_t123, width*wm)
path_left = find_control_points(c1x_left, c1y_left,
c123x_left, c123y_left,
c3x_left, c3y_left)
path_right = find_control_points(c1x_right, c1y_right,
c123x_right, c123y_right,
c3x_right, c3y_right)
return path_left, path_right
if 0:
path = Path([(0, 0), (1, 0), (2, 2)],
[Path.MOVETO, Path.CURVE3, Path.CURVE3])
left, right = divide_path_inout(path, inside)
clf()
ax = gca()
| gpl-3.0 |
ushiro/persistlab | persistlab/testsuite/test_measfile.py | 1 | 3022 | #!/usr/bin/env python
"""
persistlab.testsuite.measfile
~~~~~~~~~~~~~
This module tests the data file parsing.
:copyright: (c) 2013 by Stephane Henry..
:license: BSD, see LICENSE for more details.
"""
import sys
import unittest
import pandas
import pandas as pd
import numpy as np
from persistlab import measfile
from persistlab import data
def dataframe():
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
M = pd.DataFrame(np.random.randn(5,4), index=index, columns=columns)
return M
class Selection(unittest.TestCase):
def setUp(self, df=dataframe()):
self.df = df
self.all = range(len(df.columns))
def add(self, attr, res):
selection = measfile.Selection(self.df, [])
selection.add(attr)
self.assertEqual(selection, res)
def drop(self, attr, res):
selection = measfile.Selection(self.df)
selection.drop(attr)
self.assertEqual(selection, res)
def test_droplist(self):
self.drop([0, 5], [1, 2, 3])
def test_dropint(self):
self.drop(0, [1, 2, 3])
def test_add_int(self):
self.add(0, [0])
def test_add_list(self):
self.add([0, 1], [0, 1])
def test_add_non_existing(self):
self.add(range(-3, 5), range(4))
def test_selectall(self):
selection = measfile.Selection(self.df)
self.assertEqual(selection, self.all)
class FlatFile(unittest.TestCase):
def setUp(self):
self.mf = measfile.FlatFile(data.FLATFILE_1)
def test_parse(self):
# Check shape of the data
self.assertEqual(self.mf.df.shape, (3, 4))
class XSeg(unittest.TestCase):
def test_no_segment_selection(self):
mf = measfile.XSegFile(data.LI_FN_XSEG[0])
# Test shape
self.assertEqual(mf.df.shape, (320, 3))
# Test one value
self.assertEqual(mf.df.ix[0][1] , 3.388e-09)
def test_oneseg(self):
mf = measfile.XSegFile(data.LI_FN_XSEG[0], data_segment_range=[1])
self.assertEqual(mf.df.shape, (80, 3))
def test_twoseg(self):
mf = measfile.XSegFile(data.LI_FN_XSEG[0], data_segment_range=[0, 1])
self.assertEqual(mf.df.shape, (160, 3))
def test_threeseg(self):
mf = measfile.XSegFile(data.LI_FN_XSEG[0], data_segment_range=[1, 3])
self.assertEqual(mf.df.shape, (240, 3))
class DataFileParser(unittest.TestCase):
def setUp(self):
dfp = measfile.DataFileParser()
self.limf = dfp.parse_files(data.LI_FLATFILE_ALL)
def test_set_filetype(self):
self.assertIsNotNone(self.limf[0])
def test_parse_files(self):
self.assertEqual(len(self.limf),3)
class DataProcessor(DataFileParser):
def test_process(self):
dp = measfile.DataProcessor()
stats = dp.compute(self.limf, data_process='trapz')
self.assertIsInstance(stats,pandas.DataFrame)
| bsd-3-clause |
dr-leo/pandaSDMX | pandasdmx/util.py | 1 | 9243 | import collections
import logging
import typing
from enum import Enum
from typing import TYPE_CHECKING, Any, List, Type, TypeVar, Union, no_type_check
import pydantic
from pydantic import DictError, Extra, ValidationError, validator # noqa: F401
from pydantic.class_validators import make_generic_validator
KT = TypeVar("KT")
VT = TypeVar("VT")
try:
from typing import OrderedDict
except ImportError:
# Python < 3.7.2 compatibility; see
# https://github.com/python/cpython/commit/68b56d0
from typing import _alias # type: ignore
OrderedDict = _alias(collections.OrderedDict, (KT, VT))
log = logging.getLogger(__name__)
class Resource(str, Enum):
"""Enumeration of SDMX REST API endpoints.
====================== ================================================
:class:`Enum` member :mod:`pandasdmx.model` class
====================== ================================================
``categoryscheme`` :class:`.CategoryScheme`
``codelist`` :class:`.Codelist`
``conceptscheme`` :class:`.ConceptScheme`
``data`` :class:`.DataSet`
``dataflow`` :class:`.DataflowDefinition`
``datastructure`` :class:`.DataStructureDefinition`
``provisionagreement`` :class:`.ProvisionAgreement`
====================== ================================================
"""
# agencyscheme = 'agencyscheme'
# attachementconstraint = 'attachementconstraint'
# categorisation = 'categorisation'
categoryscheme = "categoryscheme"
codelist = "codelist"
conceptscheme = "conceptscheme"
# contentconstraint = 'contentconstraint'
data = "data"
# dataconsumerscheme = 'dataconsumerscheme'
dataflow = "dataflow"
# dataproviderscheme = 'dataproviderscheme'
datastructure = "datastructure"
# hierarchicalcodelist = 'hierarchicalcodelist'
# metadata = 'metadata'
# metadataflow = 'metadataflow'
# metadatastructure = 'metadatastructure'
# organisationscheme = 'organisationscheme'
# organisationunitscheme = 'organisationunitscheme'
# process = 'process'
provisionagreement = "provisionagreement"
# reportingtaxonomy = 'reportingtaxonomy'
# schema = 'schema'
# structure = 'structure'
# structureset = 'structureset'
@classmethod
def from_obj(cls, obj):
"""Return an enumeration value based on the class of *obj*."""
clsname = {"DataStructureDefinition": "datastructure"}.get(
obj.__class__.__name__, obj.__class__.__name__
)
return cls[clsname.lower()]
@classmethod
def describe(cls):
return "{" + " ".join(v.name for v in cls._member_map_.values()) + "}"
if TYPE_CHECKING:
Model = TypeVar("Model", bound="BaseModel")
class BaseModel(pydantic.BaseModel):
"""Shim for pydantic.BaseModel.
This class changes two behaviours in pydantic. The methods are direct
copies from pydantic's code, with marked changes.
1. https://github.com/samuelcolvin/pydantic/issues/524
- "Multiple RecursionErrors with self-referencing models"
- In e.g. :class:`.Item`, having both .parent and .child references
leads to infinite recursion during validation.
- Fix: override BaseModel.__setattr__.
- New value 'limited' for Config.validate_assignment: no sibling
field values are passed to Field.validate().
- New key Config.validate_assignment_exclude: list of field names that
are not validated per se *and* not passed to Field.validate() when
validating a sibling field.
2. https://github.com/samuelcolvin/pydantic/issues/521
- "Assignment to attribute changes id() but not referenced object,"
marked as wontfix by pydantic maintainer.
- When cls.attr is typed as BaseModel (or a subclass), then
a.attr is b.attr is always False, even when set to the same reference.
- Fix: override BaseModel.validate() without copy().
"""
class Config:
validate_assignment = "limited"
validate_assignment_exclude: List[str] = []
# Workaround for https://github.com/samuelcolvin/pydantic/issues/521
@classmethod
def validate(cls: Type["Model"], value: Any) -> "Model":
if isinstance(value, dict):
return cls(**value)
elif isinstance(value, cls):
return value # ***
elif cls.__config__.orm_mode:
return cls.from_orm(value)
else:
try:
value_as_dict = dict(value)
except (TypeError, ValueError) as e:
raise DictError() from e
return cls(**value_as_dict)
# Workaround for https://github.com/samuelcolvin/pydantic/issues/524
@no_type_check
def __setattr__(self, name, value):
if self.__config__.extra is not Extra.allow and name not in self.__fields__:
raise ValueError(
f'"{self.__class__.__name__}" object has no field' f' "{name}"'
)
elif not self.__config__.allow_mutation:
raise TypeError(
f'"{self.__class__.__name__}" is immutable and '
"does not support item assignment"
)
elif (
self.__config__.validate_assignment
and name not in self.__config__.validate_assignment_exclude
):
if self.__config__.validate_assignment == "limited":
kw = {"include": {}}
else:
kw = {"exclude": {name}}
known_field = self.__fields__.get(name, None)
if known_field:
value, error_ = known_field.validate(value, self.dict(**kw), loc=name)
if error_:
raise ValidationError([error_], type(self))
self.__dict__[name] = value
self.__fields_set__.add(name)
class DictLike(collections.OrderedDict, typing.MutableMapping[KT, VT]):
"""Container with features of a dict & list, plus attribute access."""
def __getitem__(self, key: Union[KT, int]) -> VT:
try:
return super().__getitem__(key)
except KeyError:
if isinstance(key, int):
return list(self.values())[key]
elif isinstance(key, str) and key.startswith("__"):
raise AttributeError
else:
raise
def __setitem__(self, key: KT, value: VT) -> None:
key = self._apply_validators("key", key)
value = self._apply_validators("value", value)
super().__setitem__(key, value)
# Access items as attributes
def __getattr__(self, name) -> VT:
try:
return self.__getitem__(name)
except KeyError as e:
raise AttributeError(*e.args) from None
def validate(cls, value, field):
if not isinstance(value, (dict, DictLike)):
raise ValueError(value)
result = DictLike()
result.__fields = {"key": field.key_field, "value": field}
result.update(value)
return result
def _apply_validators(self, which, value):
try:
field = self.__fields[which]
except AttributeError:
return value
result, error = field._apply_validators(
value, validators=field.validators, values={}, loc=(), cls=None
)
if error:
raise ValidationError([error], self.__class__)
else:
return result
def compare(self, other, strict=True):
"""Return :obj:`True` if `self` is the same as `other`.
Two DictLike instances are identical if they contain the same set of keys, and
corresponding values compare equal.
Parameters
----------
strict : bool, optional
Passed to :func:`compare` for the values.
"""
if set(self.keys()) != set(other.keys()):
log.info(f"Not identical: {sorted(self.keys())} / {sorted(other.keys())}")
return False
for key, value in self.items():
if not value.compare(other[key], strict):
return False
return True
def summarize_dictlike(dl, maxwidth=72):
"""Return a string summary of the DictLike contents."""
value_cls = dl[0].__class__.__name__
count = len(dl)
keys = " ".join(dl.keys())
result = f"{value_cls} ({count}): {keys}"
if len(result) > maxwidth:
# Truncate the list of keys
result = result[: maxwidth - 3] + "..."
return result
def validate_dictlike(*fields):
def decorator(cls):
v = make_generic_validator(DictLike.validate)
for field in fields:
cls.__fields__[field].post_validators = [v]
return cls
return decorator
def compare(attr, a, b, strict: bool) -> bool:
"""Return :obj:`True` if ``a.attr`` == ``b.attr``.
If strict is :obj:`False`, :obj:`None` is permissible as `a` or `b`; otherwise,
"""
return getattr(a, attr) == getattr(b, attr) or (
not strict and None in (getattr(a, attr), getattr(b, attr))
)
# if not result:
# log.info(f"Not identical: {attr}={getattr(a, attr)} / {getattr(b, attr)}")
# return result
| apache-2.0 |
tmbdev/ocropy | OLD/lineproc.py | 15 | 6891 | ################################################################
### functions specific to text line processing
### (text line segmentation is in lineseg)
################################################################
from scipy import stats
from scipy.ndimage import interpolation,morphology,filters
from pylab import *
import morph
from toplevel import *
################################################################
### line segmentation geometry estimates based on
### segmentations
################################################################
seg_geometry_display = 0
geowin = None
geoax = None
@checks(SEGMENTATION,math=BOOL)
def seg_geometry(segmentation,math=1):
"""Given a line segmentation (either an rseg--preferably connected
component based--or a cseg, return (mh,a,b), where mh is the
medium component height, and y=a*x+b is a line equation (in
Postscript coordinates) for the center of the text line. This
function is used as a simple, standard estimator of text line
geometry. The intended use is to encode the size and centers of
bounding boxes relative to these estimates and add these as
features to the input of a character classifier, allowing it to
distinguish otherwise ambiguous pairs like ,/' and o/O."""
boxes = seg_boxes(segmentation,math=math)
heights = [(y1-y0) for (y0,y1,x0,x1) in boxes]
mh = stats.scoreatpercentile(heights,per=40)
centers = [(avg(y0,y1),avg(x0,x1)) for (y0,y1,x0,x1) in boxes]
xs = array([x for y,x in centers])
ys = array([y for y,x in centers])
a,b = polyfit(xs,ys,1)
if seg_geometry_display:
print "seggeo",math
from matplotlib import patches
global geowin,geoax
old = gca()
if geowin is None:
geowin = figure()
geoax = geowin.add_subplot(111)
geoax.cla()
geoax.imshow(segmentation!=0,cmap=cm.gray)
for (y0,y1,x0,x1) in boxes:
p = patches.Rectangle((x0,y0),x1-x0,y1-y0,edgecolor="red",fill=0)
geoax.add_patch(p)
xm = max(xs)
geoax.plot([0,xm],[b,a*xm+b],'b')
geoax.plot([0,xm],[b-mh/2,a*xm+b-mh/2],'y')
geoax.plot([0,xm],[b+mh/2,a*xm+b+mh/2],'y')
geoax.plot(xs,[y for y in ys],"g.")
sca(old)
print "mh",mh,"a",a,"b",b
return mh,a,b
def avg(*args):
return mean(args)
@deprecated
def rel_char_geom(box,params):
"""Given a character bounding box and a set of line geometry parameters,
compute relative character position and size."""
y0,y1,x0,x1 = box
assert y1>y0 and x1>x0,"%s %s"%((x0,x1),(y0,y1))
mh,a,b = params
y = avg(y0,y1)
x = avg(x0,x1)
yl = a*x+b
rel_ypos = (y-yl)/mh
rel_width = (x1-x0)*1.0/mh
rel_height = (y1-y0)*1.0/mh
# ensure some reasonable bounds
assert rel_ypos>-100 and rel_ypos<100
assert rel_width>0 and rel_width<100
assert rel_height>0 and rel_height<100
return rel_ypos,rel_width,rel_height
@deprecated
def rel_geo_normalize(rel):
"""Given a set of geometric parameters, normalize them into the
range -1...1 so that they can be used as input to a neural network."""
if rel is None: return None
if type(rel)==str:
rel = [float(x) for x in rel.split()]
ry,rw,rh = rel
if not (rw>0 and rh>0): return None
ry = clip(2*ry,-1.0,1.0)
rw = clip(log(rw),-1.0,1.0)
rh = clip(log(rh),-1.0,1.0)
geometry = array([ry,rw,rh],'f')
return geometry
@deprecated
def seg_boxes(seg,math=0):
"""Given a color segmentation, return a list of bounding boxes.
Bounding boxes are returned as tuples (y0,y1,x0,x1). With
math=0, raster coordinates are used, with math=1, Postscript
coordinates are used (however, the order of the values in the
tuple doesn't change)."""
seg = array(seg,'uint32')
slices = morph.find_objects(seg)
h = seg.shape[0]
result = []
for i in range(len(slices)):
if slices[i] is None: continue
(ys,xs) = slices[i]
if math:
result += [(h-ys.stop-1,h-ys.start-1,xs.start,xs.stop)]
else:
result += [(ys.start,ys.stop,xs.start,xs.stop)]
return result
################################################################
### image based estimation of line geometry, as well
### as dewarping
################################################################
@checks(DARKLINE)
def estimate_baseline(line,order=3):
"""Compute the baseline by fitting a polynomial to the gradient.
TODO: use robust fitting, special case very short line, limit parameter ranges"""
line = line*1.0/amax(line)
vgrad = morphology.grey_closing(line,(1,40))
vgrad = filters.gaussian_filter(vgrad,(2,60),(1,0))
if amin(vgrad)>0 or amax(vgrad)<0: raise BadImage()
h,w = vgrad.shape
ys = argmin(vgrad,axis=0)
xs = arange(w)
baseline = polyfit(xs,ys,order)
print baseline
return baseline
@checks(DARKLINE)
def dewarp_line(line,show=0,order=3):
"""Dewarp the baseline of a line based in estimate_baseline.
Returns the dewarped image."""
line = line*1.0/amax(line)
line = r_[zeros(line.shape),line]
h,w = line.shape
baseline = estimate_baseline(line,order=order)
ys = polyval(baseline,arange(w))
base = 2*h/3
temp = zeros(line.shape)
for x in range(w):
temp[:,x] = interpolation.shift(line[:,x],(base-ys[x]),order=1)
return temp
#line = line*1.0/amax(line)
@checks(DARKLINE)
def estimate_xheight(line,scale=1.0,debug=0):
"""Estimates the xheight of a line based on image processing and
filtering."""
vgrad = morphology.grey_closing(line,(1,int(scale*40)))
vgrad = filters.gaussian_filter(vgrad,(2,int(scale*60)),(1,0))
if amin(vgrad)>0 or amax(vgrad)<0: raise BadImage("bad line")
if debug: imshow(vgrad)
proj = sum(vgrad,1)
proj = filters.gaussian_filter(proj,0.5)
top = argmax(proj)
bottom = argmin(proj)
return bottom-top,bottom
@checks(DARKLINE)
def latin_mask(line,scale=1.0,r=1.2,debug=0):
"""Estimate a mask that covers letters and diacritics of a text
line for Latin alphabets."""
vgrad = morphology.grey_closing(1.0*line,(1,int(scale*40)))
vgrad = filters.gaussian_filter(vgrad,(2,int(scale*60)),(1,0))
tops = argmax(vgrad,0)
bottoms = argmin(vgrad,0)
mask = zeros(line.shape)
# xheight = mean(bottoms-tops)
for i in range(len(bottoms)):
d = bottoms[i]-tops[i]
y0 = int(maximum(0,bottoms[i]-r*d))
mask[y0:bottoms[i],i] = 1
return mask
@checks(DARKLINE)
def latin_filter(line,scale=1.0,r=1.5,debug=0):
"""Filter out noise from a text line in Latin alphabets."""
bin = (line>0.5*amax(line))
mask = latin_mask(bin,scale=scale,r=r,debug=debug)
mask = morph.keep_marked(bin,mask)
mask = filters.maximum_filter(mask,3)
return line*mask
| apache-2.0 |
LantaoJin/spark | python/pyspark/sql/tests/test_pandas_udf_scalar.py | 3 | 37309 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import random
import shutil
import sys
import tempfile
import time
import unittest
if sys.version >= '3':
unicode = str
from datetime import date, datetime
from decimal import Decimal
from pyspark.rdd import PythonEvalType
from pyspark.sql import Column
from pyspark.sql.functions import array, col, expr, lit, sum, struct, udf, pandas_udf
from pyspark.sql.types import Row
from pyspark.sql.types import *
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase, test_compiled,\
test_not_compiled_message, have_pandas, have_pyarrow, pandas_requirement_message, \
pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
if have_pyarrow:
import pyarrow as pa
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class ScalarPandasUDFTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.sc.environment["TZ"] = tz
cls.spark.conf.set("spark.sql.session.timeZone", tz)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
@property
def nondeterministic_vectorized_udf(self):
import numpy as np
@pandas_udf('double')
def random_udf(v):
return pd.Series(np.random.random(len(v)))
random_udf = random_udf.asNondeterministic()
return random_udf
def test_pandas_udf_tokenize(self):
tokenize = pandas_udf(lambda s: s.apply(lambda str: str.split(' ')),
ArrayType(StringType()))
self.assertEqual(tokenize.returnType, ArrayType(StringType()))
df = self.spark.createDataFrame([("hi boo",), ("bye boo",)], ["vals"])
result = df.select(tokenize("vals").alias("hi"))
self.assertEqual([Row(hi=[u'hi', u'boo']), Row(hi=[u'bye', u'boo'])], result.collect())
def test_pandas_udf_nested_arrays(self):
tokenize = pandas_udf(lambda s: s.apply(lambda str: [str.split(' ')]),
ArrayType(ArrayType(StringType())))
self.assertEqual(tokenize.returnType, ArrayType(ArrayType(StringType())))
df = self.spark.createDataFrame([("hi boo",), ("bye boo",)], ["vals"])
result = df.select(tokenize("vals").alias("hi"))
self.assertEqual([Row(hi=[[u'hi', u'boo']]), Row(hi=[[u'bye', u'boo']])], result.collect())
def test_vectorized_udf_basic(self):
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'),
array(col('id')).alias('array_long'))
f = lambda x: x
str_f = pandas_udf(f, StringType())
int_f = pandas_udf(f, IntegerType())
long_f = pandas_udf(f, LongType())
float_f = pandas_udf(f, FloatType())
double_f = pandas_udf(f, DoubleType())
decimal_f = pandas_udf(f, DecimalType())
bool_f = pandas_udf(f, BooleanType())
array_long_f = pandas_udf(f, ArrayType(LongType()))
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')), array_long_f('array_long'))
self.assertEquals(df.collect(), res.collect())
def test_register_nondeterministic_vectorized_udf_basic(self):
random_pandas_udf = pandas_udf(
lambda x: random.randint(6, 6) + x, IntegerType()).asNondeterministic()
self.assertEqual(random_pandas_udf.deterministic, False)
self.assertEqual(random_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
nondeterministic_pandas_udf = self.spark.catalog.registerFunction(
"randomPandasUDF", random_pandas_udf)
self.assertEqual(nondeterministic_pandas_udf.deterministic, False)
self.assertEqual(nondeterministic_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
[row] = self.spark.sql("SELECT randomPandasUDF(1)").collect()
self.assertEqual(row[0], 7)
def test_vectorized_udf_null_boolean(self):
data = [(True,), (True,), (None,), (False,)]
schema = StructType().add("bool", BooleanType())
df = self.spark.createDataFrame(data, schema)
bool_f = pandas_udf(lambda x: x, BooleanType())
res = df.select(bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_byte(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("byte", ByteType())
df = self.spark.createDataFrame(data, schema)
byte_f = pandas_udf(lambda x: x, ByteType())
res = df.select(byte_f(col('byte')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_short(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("short", ShortType())
df = self.spark.createDataFrame(data, schema)
short_f = pandas_udf(lambda x: x, ShortType())
res = df.select(short_f(col('short')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_int(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("int", IntegerType())
df = self.spark.createDataFrame(data, schema)
int_f = pandas_udf(lambda x: x, IntegerType())
res = df.select(int_f(col('int')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_long(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("long", LongType())
df = self.spark.createDataFrame(data, schema)
long_f = pandas_udf(lambda x: x, LongType())
res = df.select(long_f(col('long')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_float(self):
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("float", FloatType())
df = self.spark.createDataFrame(data, schema)
float_f = pandas_udf(lambda x: x, FloatType())
res = df.select(float_f(col('float')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_double(self):
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("double", DoubleType())
df = self.spark.createDataFrame(data, schema)
double_f = pandas_udf(lambda x: x, DoubleType())
res = df.select(double_f(col('double')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_decimal(self):
data = [(Decimal(3.0),), (Decimal(5.0),), (Decimal(-1.0),), (None,)]
schema = StructType().add("decimal", DecimalType(38, 18))
df = self.spark.createDataFrame(data, schema)
decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18))
res = df.select(decimal_f(col('decimal')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_string(self):
data = [("foo",), (None,), ("bar",), ("bar",)]
schema = StructType().add("str", StringType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, StringType())
res = df.select(str_f(col('str')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_string_in_udf(self):
df = self.spark.range(10)
str_f = pandas_udf(lambda x: pd.Series(map(str, x)), StringType())
actual = df.select(str_f(col('id')))
expected = df.select(col('id').cast('string'))
self.assertEquals(expected.collect(), actual.collect())
def test_vectorized_udf_datatype_string(self):
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'))
f = lambda x: x
str_f = pandas_udf(f, 'string')
int_f = pandas_udf(f, 'integer')
long_f = pandas_udf(f, 'long')
float_f = pandas_udf(f, 'float')
double_f = pandas_udf(f, 'double')
decimal_f = pandas_udf(f, 'decimal(38, 18)')
bool_f = pandas_udf(f, 'boolean')
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_binary(self):
data = [(bytearray(b"a"),), (None,), (bytearray(b"bb"),), (bytearray(b"ccc"),)]
schema = StructType().add("binary", BinaryType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, BinaryType())
res = df.select(str_f(col('binary')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_array_type(self):
data = [([1, 2],), ([3, 4],)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_null_array(self):
data = [([1, 2],), (None,), (None,), ([3, 4],), (None,)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_struct_type(self):
df = self.spark.range(10)
return_type = StructType([
StructField('id', LongType()),
StructField('str', StringType())])
def func(id):
return pd.DataFrame({'id': id, 'str': id.apply(unicode)})
f = pandas_udf(func, returnType=return_type)
expected = df.select(struct(col('id'), col('id').cast('string').alias('str'))
.alias('struct')).collect()
actual = df.select(f(col('id')).alias('struct')).collect()
self.assertEqual(expected, actual)
g = pandas_udf(func, 'id: long, str: string')
actual = df.select(g(col('id')).alias('struct')).collect()
self.assertEqual(expected, actual)
struct_f = pandas_udf(lambda x: x, return_type)
actual = df.select(struct_f(struct(col('id'), col('id').cast('string').alias('str'))))
self.assertEqual(expected, actual.collect())
def test_vectorized_udf_struct_complex(self):
df = self.spark.range(10)
return_type = StructType([
StructField('ts', TimestampType()),
StructField('arr', ArrayType(LongType()))])
@pandas_udf(returnType=return_type)
def f(id):
return pd.DataFrame({'ts': id.apply(lambda i: pd.Timestamp(i)),
'arr': id.apply(lambda i: [i, i + 1])})
actual = df.withColumn('f', f(col('id'))).collect()
for i, row in enumerate(actual):
id, f = row
self.assertEqual(i, id)
self.assertEqual(pd.Timestamp(i).to_pydatetime(), f[0])
self.assertListEqual([i, i + 1], f[1])
def test_vectorized_udf_nested_struct(self):
nested_type = StructType([
StructField('id', IntegerType()),
StructField('nested', StructType([
StructField('foo', StringType()),
StructField('bar', FloatType())
]))
])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Invalid returnType with scalar Pandas UDFs'):
pandas_udf(lambda x: x, returnType=nested_type)
def test_vectorized_udf_complex(self):
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'),
col('id').cast('double').alias('c'))
add = pandas_udf(lambda x, y: x + y, IntegerType())
power2 = pandas_udf(lambda x: 2 ** x, IntegerType())
mul = pandas_udf(lambda x, y: x * y, DoubleType())
res = df.select(add(col('a'), col('b')), power2(col('a')), mul(col('b'), col('c')))
expected = df.select(expr('a + b'), expr('power(2, a)'), expr('b * c'))
self.assertEquals(expected.collect(), res.collect())
def test_vectorized_udf_exception(self):
df = self.spark.range(10)
raise_exception = pandas_udf(lambda x: x * (1 / 0), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'division( or modulo)? by zero'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_invalid_length(self):
df = self.spark.range(10)
raise_exception = pandas_udf(lambda _: pd.Series(1), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Result vector from pandas_udf was not the required length'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_chained(self):
df = self.spark.range(10)
f = pandas_udf(lambda x: x + 1, LongType())
g = pandas_udf(lambda x: x - 1, LongType())
res = df.select(g(f(col('id'))))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_chained_struct_type(self):
df = self.spark.range(10)
return_type = StructType([
StructField('id', LongType()),
StructField('str', StringType())])
@pandas_udf(return_type)
def f(id):
return pd.DataFrame({'id': id, 'str': id.apply(unicode)})
g = pandas_udf(lambda x: x, return_type)
expected = df.select(struct(col('id'), col('id').cast('string').alias('str'))
.alias('struct')).collect()
actual = df.select(g(f(col('id'))).alias('struct')).collect()
self.assertEqual(expected, actual)
def test_vectorized_udf_wrong_return_type(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x * 1.0, MapType(LongType(), LongType()))
def test_vectorized_udf_return_scalar(self):
df = self.spark.range(10)
f = pandas_udf(lambda x: 1.0, DoubleType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Return.*type.*Series'):
df.select(f(col('id'))).collect()
def test_vectorized_udf_decorator(self):
df = self.spark.range(10)
@pandas_udf(returnType=LongType())
def identity(x):
return x
res = df.select(identity(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_empty_partition(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda x: x, LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_struct_with_empty_partition(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))\
.withColumn('name', lit('John Doe'))
@pandas_udf("first string, last string")
def split_expand(n):
return n.str.split(expand=True)
result = df.select(split_expand('name')).collect()
self.assertEqual(1, len(result))
row = result[0]
self.assertEqual('John', row[0]['first'])
self.assertEqual('Doe', row[0]['last'])
def test_vectorized_udf_varargs(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda *v: v[0], LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_unsupported_types(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x, MapType(StringType(), IntegerType()))
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*ArrayType.StructType'):
pandas_udf(lambda x: x, ArrayType(StructType([StructField('a', IntegerType())])))
def test_vectorized_udf_dates(self):
schema = StructType().add("idx", LongType()).add("date", DateType())
data = [(0, date(1969, 1, 1),),
(1, date(2012, 2, 2),),
(2, None,),
(3, date(2100, 4, 4),),
(4, date(2262, 4, 12),)]
df = self.spark.createDataFrame(data, schema=schema)
date_copy = pandas_udf(lambda t: t, returnType=DateType())
df = df.withColumn("date_copy", date_copy(col("date")))
@pandas_udf(returnType=StringType())
def check_data(idx, date, date_copy):
msgs = []
is_equal = date.isnull()
for i in range(len(idx)):
if (is_equal[i] and data[idx[i]][1] is None) or \
date[i] == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"date values are not equal (date='%s': data[%d][1]='%s')"
% (date[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data",
check_data(col("idx"), col("date"), col("date_copy"))).collect()
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "date" col
self.assertEquals(data[i][1], result[i][2]) # "date_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_timestamps(self):
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(0, datetime(1969, 1, 1, 1, 1, 1)),
(1, datetime(2012, 2, 2, 2, 2, 2)),
(2, None),
(3, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
# Check that a timestamp passed through a pandas_udf will not be altered by timezone calc
f_timestamp_copy = pandas_udf(lambda t: t, returnType=TimestampType())
df = df.withColumn("timestamp_copy", f_timestamp_copy(col("timestamp")))
@pandas_udf(returnType=StringType())
def check_data(idx, timestamp, timestamp_copy):
msgs = []
is_equal = timestamp.isnull() # use this array to check values are equal
for i in range(len(idx)):
# Check that timestamps are as expected in the UDF
if (is_equal[i] and data[idx[i]][1] is None) or \
timestamp[i].to_pydatetime() == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"timestamp values are not equal (timestamp='%s': data[%d][1]='%s')"
% (timestamp[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data", check_data(col("idx"), col("timestamp"),
col("timestamp_copy"))).collect()
# Check that collection values are correct
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "timestamp" col
self.assertEquals(data[i][1], result[i][2]) # "timestamp_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_return_timestamp_tz(self):
df = self.spark.range(10)
@pandas_udf(returnType=TimestampType())
def gen_timestamps(id):
ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id]
return pd.Series(ts)
result = df.withColumn("ts", gen_timestamps(col("id"))).collect()
spark_ts_t = TimestampType()
for r in result:
i, ts = r
ts_tz = pd.Timestamp(i, unit='D', tz='America/Los_Angeles').to_pydatetime()
expected = spark_ts_t.fromInternal(spark_ts_t.toInternal(ts_tz))
self.assertEquals(expected, ts)
def test_vectorized_udf_check_config(self):
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}):
df = self.spark.range(10, numPartitions=1)
@pandas_udf(returnType=LongType())
def check_records_per_batch(x):
return pd.Series(x.size).repeat(x.size)
result = df.select(check_records_per_batch(col("id"))).collect()
for (r,) in result:
self.assertTrue(r <= 3)
def test_vectorized_udf_timestamps_respect_session_timezone(self):
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(1, datetime(1969, 1, 1, 1, 1, 1)),
(2, datetime(2012, 2, 2, 2, 2, 2)),
(3, None),
(4, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
f_timestamp_copy = pandas_udf(lambda ts: ts, TimestampType())
internal_value = pandas_udf(
lambda ts: ts.apply(lambda ts: ts.value if ts is not pd.NaT else None), LongType())
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_la = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_la = df_la.select(col("idx"), col("internal_value")).collect()
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
diff = 3 * 60 * 60 * 1000 * 1000 * 1000
result_la_corrected = \
df_la.select(col("idx"), col("tscopy"), col("internal_value") + diff).collect()
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_ny = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_ny = df_ny.select(col("idx"), col("tscopy"), col("internal_value")).collect()
self.assertNotEqual(result_ny, result_la)
self.assertEqual(result_ny, result_la_corrected)
def test_nondeterministic_vectorized_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
@pandas_udf('double')
def plus_ten(v):
return v + 10
random_udf = self.nondeterministic_vectorized_udf
df = self.spark.range(10).withColumn('rand', random_udf(col('id')))
result1 = df.withColumn('plus_ten(rand)', plus_ten(df['rand'])).toPandas()
self.assertEqual(random_udf.deterministic, False)
self.assertTrue(result1['plus_ten(rand)'].equals(result1['rand'] + 10))
def test_nondeterministic_vectorized_udf_in_aggregate(self):
df = self.spark.range(10)
random_udf = self.nondeterministic_vectorized_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.groupby(df.id).agg(sum(random_udf(df.id))).collect()
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.agg(sum(random_udf(df.id))).collect()
def test_register_vectorized_udf_basic(self):
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'))
original_add = pandas_udf(lambda x, y: x + y, IntegerType())
self.assertEqual(original_add.deterministic, True)
self.assertEqual(original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
new_add = self.spark.catalog.registerFunction("add1", original_add)
res1 = df.select(new_add(col('a'), col('b')))
res2 = self.spark.sql(
"SELECT add1(t.a, t.b) FROM (SELECT id as a, id as b FROM range(10)) t")
expected = df.select(expr('a + b'))
self.assertEquals(expected.collect(), res1.collect())
self.assertEquals(expected.collect(), res2.collect())
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime(2015, 11, 1, 0, 30),
datetime(2015, 11, 1, 1, 30),
datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda x: x, 'timestamp')
result = df.withColumn('time', foo_udf(df.time))
self.assertEquals(df.collect(), result.collect())
@unittest.skipIf(sys.version_info[:2] < (3, 5), "Type hints are supported from Python 3.5.")
def test_type_annotation(self):
# Regression test to check if type hints can be used. See SPARK-23569.
# Note that it throws an error during compilation in lower Python versions if 'exec'
# is not used. Also, note that we explicitly use another dictionary to avoid modifications
# in the current 'locals()'.
#
# Hyukjin: I think it's an ugly way to test issues about syntax specific in
# higher versions of Python, which we shouldn't encourage. This was the last resort
# I could come up with at that time.
_locals = {}
exec(
"import pandas as pd\ndef noop(col: pd.Series) -> pd.Series: return col",
_locals)
df = self.spark.range(1).select(pandas_udf(f=_locals['noop'], returnType='bigint')('id'))
self.assertEqual(df.first()[0], 0)
def test_mixed_udf(self):
df = self.spark.range(0, 1).toDF('v')
# Test mixture of multiple UDFs and Pandas UDFs.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
@pandas_udf('int')
def f2(x):
assert type(x) == pd.Series
return x + 10
@udf('int')
def f3(x):
assert type(x) == int
return x + 100
@pandas_udf('int')
def f4(x):
assert type(x) == pd.Series
return x + 1000
# Test single expression with chained UDFs
df_chained_1 = df.withColumn('f2_f1', f2(f1(df['v'])))
df_chained_2 = df.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
df_chained_3 = df.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(df['v'])))))
df_chained_4 = df.withColumn('f4_f2_f1', f4(f2(f1(df['v']))))
df_chained_5 = df.withColumn('f4_f3_f1', f4(f3(f1(df['v']))))
expected_chained_1 = df.withColumn('f2_f1', df['v'] + 11)
expected_chained_2 = df.withColumn('f3_f2_f1', df['v'] + 111)
expected_chained_3 = df.withColumn('f4_f3_f2_f1', df['v'] + 1111)
expected_chained_4 = df.withColumn('f4_f2_f1', df['v'] + 1011)
expected_chained_5 = df.withColumn('f4_f3_f1', df['v'] + 1101)
self.assertEquals(expected_chained_1.collect(), df_chained_1.collect())
self.assertEquals(expected_chained_2.collect(), df_chained_2.collect())
self.assertEquals(expected_chained_3.collect(), df_chained_3.collect())
self.assertEquals(expected_chained_4.collect(), df_chained_4.collect())
self.assertEquals(expected_chained_5.collect(), df_chained_5.collect())
# Test multiple mixed UDF expressions in a single projection
df_multi_1 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(col('f1'))) \
.withColumn('f3_f1', f3(col('f1'))) \
.withColumn('f4_f1', f4(col('f1'))) \
.withColumn('f3_f2', f3(col('f2'))) \
.withColumn('f4_f2', f4(col('f2'))) \
.withColumn('f4_f3', f4(col('f3'))) \
.withColumn('f3_f2_f1', f3(col('f2_f1'))) \
.withColumn('f4_f2_f1', f4(col('f2_f1'))) \
.withColumn('f4_f3_f1', f4(col('f3_f1'))) \
.withColumn('f4_f3_f2', f4(col('f3_f2'))) \
.withColumn('f4_f3_f2_f1', f4(col('f3_f2_f1')))
# Test mixed udfs in a single expression
df_multi_2 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(f1(col('v')))) \
.withColumn('f3_f1', f3(f1(col('v')))) \
.withColumn('f4_f1', f4(f1(col('v')))) \
.withColumn('f3_f2', f3(f2(col('v')))) \
.withColumn('f4_f2', f4(f2(col('v')))) \
.withColumn('f4_f3', f4(f3(col('v')))) \
.withColumn('f3_f2_f1', f3(f2(f1(col('v'))))) \
.withColumn('f4_f2_f1', f4(f2(f1(col('v'))))) \
.withColumn('f4_f3_f1', f4(f3(f1(col('v'))))) \
.withColumn('f4_f3_f2', f4(f3(f2(col('v'))))) \
.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(col('v'))))))
expected = df \
.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f4', df['v'] + 1000) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f4_f1', df['v'] + 1001) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f4_f2', df['v'] + 1010) \
.withColumn('f4_f3', df['v'] + 1100) \
.withColumn('f3_f2_f1', df['v'] + 111) \
.withColumn('f4_f2_f1', df['v'] + 1011) \
.withColumn('f4_f3_f1', df['v'] + 1101) \
.withColumn('f4_f3_f2', df['v'] + 1110) \
.withColumn('f4_f3_f2_f1', df['v'] + 1111)
self.assertEquals(expected.collect(), df_multi_1.collect())
self.assertEquals(expected.collect(), df_multi_2.collect())
def test_mixed_udf_and_sql(self):
df = self.spark.range(0, 1).toDF('v')
# Test mixture of UDFs, Pandas UDFs and SQL expression.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
def f2(x):
assert type(x) == Column
return x + 10
@pandas_udf('int')
def f3(x):
assert type(x) == pd.Series
return x + 100
df1 = df.withColumn('f1', f1(df['v'])) \
.withColumn('f2', f2(df['v'])) \
.withColumn('f3', f3(df['v'])) \
.withColumn('f1_f2', f1(f2(df['v']))) \
.withColumn('f1_f3', f1(f3(df['v']))) \
.withColumn('f2_f1', f2(f1(df['v']))) \
.withColumn('f2_f3', f2(f3(df['v']))) \
.withColumn('f3_f1', f3(f1(df['v']))) \
.withColumn('f3_f2', f3(f2(df['v']))) \
.withColumn('f1_f2_f3', f1(f2(f3(df['v'])))) \
.withColumn('f1_f3_f2', f1(f3(f2(df['v'])))) \
.withColumn('f2_f1_f3', f2(f1(f3(df['v'])))) \
.withColumn('f2_f3_f1', f2(f3(f1(df['v'])))) \
.withColumn('f3_f1_f2', f3(f1(f2(df['v'])))) \
.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
expected = df.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f1_f2', df['v'] + 11) \
.withColumn('f1_f3', df['v'] + 101) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f2_f3', df['v'] + 110) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f1_f2_f3', df['v'] + 111) \
.withColumn('f1_f3_f2', df['v'] + 111) \
.withColumn('f2_f1_f3', df['v'] + 111) \
.withColumn('f2_f3_f1', df['v'] + 111) \
.withColumn('f3_f1_f2', df['v'] + 111) \
.withColumn('f3_f2_f1', df['v'] + 111)
self.assertEquals(expected.collect(), df1.collect())
# SPARK-24721
@unittest.skipIf(not test_compiled, test_not_compiled_message)
def test_datasource_with_udf(self):
# Same as SQLTests.test_datasource_with_udf, but with Pandas UDF
# This needs to a separate test because Arrow dependency is optional
import numpy as np
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.range(1).write.mode("overwrite").format('csv').save(path)
filesource_df = self.spark.read.option('inferSchema', True).csv(path).toDF('i')
datasource_df = self.spark.read \
.format("org.apache.spark.sql.sources.SimpleScanSource") \
.option('from', 0).option('to', 1).load().toDF('i')
datasource_v2_df = self.spark.read \
.format("org.apache.spark.sql.sources.v2.SimpleDataSourceV2") \
.load().toDF('i', 'j')
c1 = pandas_udf(lambda x: x + 1, 'int')(lit(1))
c2 = pandas_udf(lambda x: x + 1, 'int')(col('i'))
f1 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(lit(1))
f2 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(col('i'))
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c1)
expected = df.withColumn('c', lit(2))
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c2)
expected = df.withColumn('c', col('i') + 1)
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
for f in [f1, f2]:
result = df.filter(f)
self.assertEquals(0, result.count())
finally:
shutil.rmtree(path)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_scalar import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
makkemal/NGIMASEM | usnc.py | 1 | 21558 | import numpy as np
import pandas as pd
from scipy.optimize import least_squares
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from sympy import *
from numpy import array
from sympy import log
from sympy.utilities.codegen import (InputArgument, OutputArgument,InOutArgument)
from sympy.printing import fcode
To,Fo = symbols('To,Fo')
def creepstrain(P,T1,F1,stress1,Ecp1,Ecs1,T2,F2,stress2,GetEcs,GetEcp,NumSteps):
# Integrates from F1 to F2 using NumSteps equally spaced dF steps for constant stress states i.e. stress1=stress2
F = np.linspace(F1,F2,NumSteps+1)
dF = (F2-F1)/NumSteps
CS = []
CP = []
for i in range(NumSteps):
Ecs2,dEcsdS = GetEcs(T1,0.,stress1,Ecs1,T2,dF,stress2,P)
Ecp2,dEcpdS = GetEcp(T1,0.,stress1,Ecp1,T2,dF,stress2,P)
if i == 0:
Ecs2 = Ecs2/2
dEcsdS = dEcsdS/2
Ecp2 = Ecp2/2
dEcpdS = dEcpdS/2
Ecp1 = Ecp2
Ecs1 = Ecs2
CS.append(Ecs2[0])
CP.append(Ecp2[0])
return (F[0:-1]+F[1:])/2.,np.array(CP),np.array(CS)
def WRITE_FORTRAN_FUNCTION(NAMEFUNCTION,EXPRESSION,ARUGMENT_SEQUENCE):
from sympy.utilities.codegen import codegen
from sympy.parsing.sympy_parser import parse_expr
from sympy import symbols
exec(ARUGMENT_SEQUENCE+'=symbols("'+ARUGMENT_SEQUENCE+'")')
result = codegen([NAMEFUNCTION, EXPRESSION], 'F95',header=False,argument_sequence=eval('['+ARUGMENT_SEQUENCE+']'))
return result[0][1]
def WRITE_FORTRAN_SUBROUTINE(NAMESUBROUTINE,EXPRESSION,ARUGMENT_SEQUENCE):
from sympy.utilities.codegen import codegen
from sympy.parsing.sympy_parser import parse_expr
from sympy import symbols
exec(ARUGMENT_SEQUENCE+'=symbols("'+ARUGMENT_SEQUENCE+'")')
result = codegen([NAMESUBROUTINE, EXPRESSION], 'F95',header=False,argument_sequence=eval('['+ARUGMENT_SEQUENCE+']'))
return result[0][1]
def FORTRAN_FUNC(fid,name_subroutine,expression,P,Tmin,Tmax,Fmin,Fmax):
from sympy.utilities.codegen import codegen
from sympy.parsing.sympy_parser import parse_expr
OUTPUT, To, Fo, T, F = symbols('OUTPUT To Fo T F')
expression = str(eval(expression))
#normalizeT = str(eval('(To-Tmin)/(Tmax-Tmin)'))
#normalizeF = str(eval('(Fo-Fmin)/(Fmax-Fmin)'))
#expression = str(eval(expression).subs({T:normalizeT,F:normalizeF}))
return expression
def FORTRAN_SUB(fid,name_subroutine,expression,P,Tmin,Tmax,Fmin,Fmax):
from sympy.utilities.codegen import codegen
from sympy.parsing.sympy_parser import parse_expr
from sympy import Max
OUTPUT, To, Fo, T, F = symbols('OUTPUT To Fo T F')
normalizeT = str(eval('(To-Tmin)/(Tmax-Tmin)'))
normalizeF = str(eval('(Fo-Fmin)/(Fmax-Fmin)'))
expression = str(eval(expression).subs({T:normalizeT,F:normalizeF}))
# expr = Eq(OUTPUT, parse_expr(expression))
# result = codegen([name_subroutine, [expr]], 'F95',header=False,argument_sequence=[To,Fo,OUTPUT])
return expression
def symbols_from_dictionary(md):
# Construct Python Symbolic Symbols from Dictionary Keywords
string1 = ''
count = 1
for key in md.keys():
if count == 1:
string1 = string1+str(key)
count = 2
else:
string1 = string1+','+str(key)
fullstring = string1+'='+'symbols("'+string1+'")'
return fullstring
def loadColumn(filename,sheet,column):
# Load from FILENAME on an Excel Worksheet a specified Column of Data in addition to the Temperature and Fluence
# O1,O2,O3 = LOADCOLUMN(I1,I2,I3)
#
# O1 Temperature
# O2 Fluence
# O3 Column specified by I3
#
# I1 Filename of the Excel Workbook
# I2 Name of the Worksheet
# I3 Name of the Column to be Loaded
xls = pd.ExcelFile(filename)
DATA = xls.parse(sheet)
T = DATA['Temperature(C)']
F = DATA['Fluence(dpa)']
Y = DATA[column]
if not(column == 'Orientation(withregardtoextrudingdirection)'):
isData = ~np.isnan(Y)
return np.array(T[isData],dtype=np.float64),np.array(F[isData],dtype=np.float64),np.array(Y[isData],dtype=np.float64)
else:
return np.array(T),np.array(F),Y
def residual1D(FORM,PARAMETERS,XDATA,YDATA):
return FORM(XDATA,PARAMETERS) - YDATA
def residual(FORM,PARAMETERS,XDATA,YDATA):
X1 = XDATA[0]
X2 = XDATA[1]
return FORM(X1,X2,PARAMETERS) - YDATA
def normalize(X,Xmin,Xmax):
return (X-Xmin)/(Xmax-Xmin)
def original(X,Xmin,Xmax):
return X*(Xmax-Xmin)+Xmin
def normalize1(X):
Xmin = X.min()
Xmax = X.max()
return (X-Xmin)/(Xmax-Xmin)+1,Xmin,Xmax
def fit1D(t,Y,FORM,PARAMETER0):
def R(PARAMETERS,XDATA,YDATA):
return residual1D(FORM,PARAMETERS,XDATA,YDATA)
res_lsq = least_squares(R, PARAMETER0,loss='soft_l1', f_scale=0.1, args=(t,Y),method='trf', ftol=1e-08, xtol=1e-08, gtol=1e-08,max_nfev=10000)
return res_lsq
def fit(t,f,Y,FORM,PARAMETER0):
# t = normalize(t,t.min(),t.max())
# f = normalize(f,f.min(),f.max())
def R(PARAMETERS,XDATA,YDATA):
return residual(FORM,PARAMETERS,XDATA,YDATA)
res_lsq = least_squares(R, PARAMETER0,loss='soft_l1', f_scale=0.1, args=([t,f],Y),method='trf', ftol=1e-04, xtol=1e-04, gtol=1e-04,max_nfev=10000)
return res_lsq
def axis3d(number):
fig = plt.figure(number)
ax = fig.add_subplot(111, projection='3d')
return ax
def plot_fit(EVALF,T,F,ax):
NT = 11
NF = 11
TT,FF = np.meshgrid(np.linspace(T.min(),T.max(),NT),np.linspace(F.min(),F.max(),NF))
X1 = np.reshape(TT,(NT*NF))
X2 = np.reshape(FF,(NT*NF))
YFUNC = np.reshape(EVALF(X1,X2),(NT,NF))
ax.plot_surface(TT,FF,YFUNC)
return ax
def evalsurface(EVALF,T,F):
NT = 11
NF = 11
TT,FF = np.meshgrid(np.linspace(T.min(),T.max(),NT),np.linspace(F.min(),F.max(),NF))
X1 = np.reshape(TT,(NT*NF))
X2 = np.reshape(FF,(NT*NF))
YFUNC = np.reshape(EVALF(X1,X2),(NT,NF))
return TT,FF,YFUNC
def setconstantsub(Y,Name):
# O1,O2 = usnc.setconstantsub(I1,I2)
# Response (I2) is set as a constant function
#
# O1. Generated Python Function (T,F)
# O2. Generated Fortran Code with name specified by I4
#
# I1. Temperature,
# I2. Fluence,
# I3. Response to be fitted,
# I4. Name of the Fortran Subroutine to be generated as O3
# I5. Form of the function to be fitted with I6 number of parameters
# I6. Number of parameters in the form specified by I5
CODE = (
' subroutine {}(Value)\n'.format(Name)+
'!*************************************************************************\n'+
'! This subroutine constructs a subroutine that returns a constant\n'+
'! Output : Warning Message Printed to the Screen \n'+
' implicit none\n'+
' real*8 Value\n'+
' Value = {}\n'.format(fcode(Y).strip())+
' return\n'+
' end\n')
return CODE
def fitlogmodel(t,f,Y,Name,form,NumParams):
T,F = symbols('T,F')
Tmin = t.min()
Tmax = t.max()
Fmin = 0.0
Fmax = f.max()
To = (np.array(t)-Tmin)/(Tmax-Tmin)
Fo = (np.array(f)-Fmin)/(Fmax-Fmin)
Yo = np.log10(np.array(Y))
def EVAL(T,F,P):
Expression = eval(form)
return Expression
result = fit(To,Fo,Yo,EVAL,[0.1]*NumParams)
P = result['x']
def MAKE_PYTHONFUNC(form,P0,Tmin,Tmax,Fmin,Fmax):
def EVAL(To,Fo):
T = (To-Tmin)/(Tmax-Tmin)
F = (Fo-Fmin)/(Fmax-Fmin)
P = P0
Expression = eval(form)
return 10**(Expression)
return EVAL
powerform = '10**'+'('+form+')'
code = Get_Code(Name,fcode(eval(powerform),assign_to="Answer"),Tmin,Tmax,Fmin,Fmax)
return MAKE_PYTHONFUNC(form,np.copy(P),Tmin,Tmax,Fmin,Fmax),result['x'],code
def fit1Dmodel(t,Y,Name,form,NumParams):
T,F = symbols('T,F')
Tmin = t.min()
Tmax = t.max()
To = (np.array(t)-Tmin)/(Tmax-Tmin)
Yo = np.array(Y)
def EVAL(T,P):
Expression = eval(form)
return Expression
result = fit1D(To,Yo,EVAL,[0.1]*NumParams)
P = result['x']
def MAKE_PYTHONFUNC(form,P0,Tmin,Tmax):
def EVAL(To,Fo):
T = (To-Tmin)/(Tmax-Tmin)
P = P0
Expression = eval(form)
return Expression
return EVAL
code = Get_Code1D(Name,fcode(eval(form),assign_to="Answer"),Tmin,Tmax)
return MAKE_PYTHONFUNC(form,np.copy(P),Tmin,Tmax),result['x'],code
def fit1Dlogmodel(t,Y,Name,form,NumParams):
T,F = symbols('T,F')
Tmin = t.min()
Tmax = t.max()
To = (np.array(t)-Tmin)/(Tmax-Tmin)
Yo = np.log10(np.array(Y))
def EVAL(T,P):
Expression = eval(form)
return Expression
result = fit1D(To,Yo,EVAL,[0.1]*NumParams)
P = result['x']
def MAKE_PYTHONFUNC(form,P0,Tmin,Tmax):
def EVAL(To):
T = (To-Tmin)/(Tmax-Tmin)
P = P0
Expression = eval(form)
return 10**(Expression)
return EVAL
powerform = '10**'+'('+form+')'
code = Get_Code1D(Name,fcode(eval(powerform),assign_to="Answer"),Tmin,Tmax)
return MAKE_PYTHONFUNC(form,np.copy(P),Tmin,Tmax),result['x'],code
def fitmodel(t,f,Y,Name,form,NumParams):
T,F = symbols('T,F')
Tmin = t.min()
Tmax = t.max()
Fmin = 0.0
Fmax = f.max()
To = (np.array(t)-Tmin)/(Tmax-Tmin)
Fo = (np.array(f)-Fmin)/(Fmax-Fmin)
Yo = np.array(Y)
def EVAL(T,F,P):
Expression = eval(form)
return Expression
result = fit(To,Fo,Yo,EVAL,[0.1]*NumParams)
P = result['x']
def MAKE_PYTHONFUNC(form,P0,Tmin,Tmax,Fmin,Fmax):
def EVAL(To,Fo):
T = (To-Tmin)/(Tmax-Tmin)
F = (Fo-Fmin)/(Fmax-Fmin)
P = P0
Expression = eval(form)
return Expression
return EVAL
code = Get_Code(Name,fcode(eval(form),assign_to="Answer"),Tmin,Tmax,Fmin,Fmax)
return MAKE_PYTHONFUNC(form,np.copy(P),Tmin,Tmax,Fmin,Fmax),result['x'],code
def GetGet_invDelCode(Dir1,Dir2,Dir3,v1,v2,v3,G1,G2,G3):
CODE = (
' subroutine Get_invDel(T,F,iDel)\n'+
'!*************************************************************************\n'+
'! This subroutine computes the inverse elasticity tensor given the current\n'+
'! temperature and fluence\n'+
'! Input : T : Temperature\n'+
'! F : Fluence\n'+
'! Output : iDel : inverse elasticity tensor\n'+
' implicit none\n'+
' real*8 iDel(6,6)\n'+
' real*8 T,F,c0,c2,c3,T0,b1,b2,b3,b4,b5,b6,b7,b8,b9\n'+
' real*8 E10,E20,E30,nu12,nu23,nu31,G120,G230,G310\n'+
' real*8 a10,a12,a22,a23,a20,a21,k1,EoverE0per,EoverE0par\n'+
' real*8 Eper,Epar\n'+
'\n'+
' call GetE0par(Epar)\n'+
' call GetEoverE0par(T,F,EoverE0par)\n'+
' call GetE0per(Eper)\n'+
' call GetEoverE0per(T,F,EoverE0per)\n'+
' E10 = EoverE0{}*E{}\n'.format(Dir1,Dir1)+
' E20 = EoverE0{}*E{}\n'.format(Dir2,Dir2)+
' E30 = EoverE0{}*E{}\n'.format(Dir3,Dir3)+
' nu12 = {}\n'.format(fcode(v1).strip())+
' nu23 = {}\n'.format(fcode(v2).strip())+
' nu31 = {}\n'.format(fcode(v3).strip())+
' G120 = (EoverE0par+EoverE0per)/2.d0*{}\n'.format(fcode(G1).strip())+
' G230 = (EoverE0par+EoverE0per)/2.d0*{}\n'.format(fcode(G2).strip())+
' G310 = (EoverE0par+EoverE0per)/2.d0*{}\n'.format(fcode(G3).strip())+
'\n'+
' iDel = 0.d0\n'+
' iDel(1,1) = 1.d0/E10\n'+
' iDel(2,2) = 1.d0/E20\n'+
' iDel(3,3) = 1.d0/E30\n'+
' iDel(4,4) = 1.d0/G120\n'+
' iDel(5,5) = 1.d0/G310\n'+
' iDel(6,6) = 1.d0/G230\n'+
'\n'+
' iDel(1,2) = -nu12/E20\n'+
' iDel(1,3) = -nu31/E30\n'+
' iDel(2,1) = -nu12/E10\n'+
' iDel(2,3) = -nu31/E30\n'+
' iDel(3,1) = -nu12/E10\n'+
' iDel(3,2) = -nu31/E20\n'+
'\n'+
' return\n'+
' end\n')
return CODE
def GetWarningCode(Tlow,Thigh,Flow,Fhigh):
CODE = (
' subroutine WarningCode(T,F)\n'+
'!*************************************************************************\n'+
'! This subroutine warns the user when extrapolation in T or F occurs\n'+
'! Input : T : Temperature\n'+
'! F : Fluence\n'+
'! Output : Warning Message Printed to the Screen \n'+
' implicit none\n'+
' real*8 T,F\n'+
'! if (T.lt.{}) then \n'.format(fcode(Tlow).strip())+
"! write(*,*) '*WARNING EXTRAPOLOTION* T BELOW Calibration Data'\n"+
"! write(*,*) 'Temperature=', T\n"+
'! endif\n'+
'! if (T.gt.{}) then \n'.format(fcode(Thigh).strip())+
"! write(*,*) '*WARNING EXTRAPOLOTION* T ABOVE Calibration Data'\n"+
"! write(*,*) 'Temperature=', T\n"+
'! endif\n'+
'! if (F.lt.{}) then \n'.format(fcode(Flow).strip())+
"! write(*,*) '*WARNING EXTRAPOLOTION* F BELOW Calibration Data'\n"+
"! write(*,*) 'Fluence=', F\n"+
'! endif\n'+
'! if (F.gt.{}) then \n'.format(fcode(Fhigh).strip())+
"! write(*,*) '*WARNING EXTRAPOLOTION* F ABOVE Calibration Data'\n"+
"! write(*,*) 'Fluence=', F\n"+
'! endif\n'+
' return\n'+
' end\n')
return CODE
def GetGetTempPosTimeCode(Expression):
CODE = (
' subroutine GetTempPosTime(T,Coords,time)\n'+
'!************************************************************************\n'+
'! This subroutine computes the temperature as a function of time and position\n'+
'! Input : Coords : (x,y,z) coordinate in space\n'+
'! : time : time\n'+
'! Output : T : Temperature\n'+
'\n'+
' implicit none\n'+
' real*8 T,Coords(3),time,X,Y,Z\n'+
'\n'+
' X = Coords(1)\n'+
' Y = Coords(2)\n'+
' Z = Coords(3)\n'+
'{}\n'.format(Expression)+
' return\n'+
' end\n'+
'!*************************************************************************\n')
return CODE
def GetGetFluencePosTimeCode(Expression):
CODE = (
' subroutine GetFluencePosTime(F,Coords,time)\n'+
'!************************************************************************\n'+
'! This subroutine computes the temperature as a function of time and position\n'+
'! Input : Coords : (x,y,z) coordinate in space\n'+
'! : time : time\n'+
'! Output : F : Fluence\n'+
'\n'+
' implicit none\n'+
' real*8 F,Coords(3),time,X,Y,Z\n'+
'\n'+
' X = Coords(1)\n'+
' Y = Coords(2)\n'+
' Z = Coords(3)\n'+
'{}\n'.format(Expression)+
' return\n'+
' end\n'+
'!*************************************************************************\n')
return CODE
def Get_EwCODE(Term1,Term2,Term3):
CODE = (
' subroutine Get_Ew(T,F,Ew)\n'+
'!*************************************************************************\n'+
'! This subroutine computes the Wigner strain at the end of the time step\n'+
'! Input : T : Temperature\n'+
'! F : Fluence\n'+
'! Output : Eq : Wigner strain\n'+
' implicit none\n'+
' real*8 T,F,Ew(6),dL_par,dL_per\n'+
'\n'+
' call Get_Wigner_par(T,F,dL_par)\n'+
' call Get_Wigner_per(T,F,dL_per)\n'+
#" write(*,*) 'DL PAR:',dL_par,'| DL PER:',dL_per\n"+
' Ew = 0.d0\n'+
' Ew(1) = dL_{}\n'.format(Term1.lower())+
' Ew(2) = dL_{}\n'.format(Term2.lower())+
' Ew(3) = dL_{}\n'.format(Term3.lower())+
'\n'+
' return\n'+
' end\n')
return CODE
def Get_EthCODE(Term1,Term2,Term3,Ti):
CODE = (
' subroutine Get_Eth(T,F,Eth)\n'+
'!*************************************************************************\n'+
'! This subroutine computes the thermal strain at the end of the time step\n'+
'! Input : T : Temperature\n'+
'! F : Fluence\n'+
'! Output : Eth : Thermal strain\n'+
' implicit none\n'+
' real*8 T,F,Eth(6),CTE0(3),Ti,b1,b2,b3,b4,b5,b6\n'+
' real*8 a0,a1,a2,CTE(3),Scale,CTE_par,CTE_per,CTE0_par,CTE0_per\n'+
' call Get_CTEoCTE0par(T,F,CTE_par)\n'+
' call Get_CTEoCTE0per(T,F,CTE_per)\n'+
' call Get_CTE0par(T,CTE0_par)\n'+
' call Get_CTE0per(T,CTE0_per)\n'+
'\n'+
#" write(*,*) 'CTE PAR:',CTE_par,'| CTE PER:',CTE_per\n"+
' Eth = 0.d0\n'+
' Eth(1) = CTE0_{}*CTE_{}*(T-{})\n'.format(Term1,Term1,fcode(Ti).strip())+
' Eth(2) = CTE0_{}*CTE_{}*(T-{})\n'.format(Term2,Term2,fcode(Ti).strip())+
' Eth(3) = CTE0_{}*CTE_{}*(T-{})\n'.format(Term3,Term3,fcode(Ti).strip())+
' return\n'+
' end\n')
return CODE
def GetPrimaryRateCode(P,PrimaryCreepRate_Form,PrimaryCreepRate_Form_dEcp,PrimaryCreepRate_Form_dS):
from sympy import symbols
iDc_S, Ecp, iDc, T, F = symbols('iDc_S Ecp iDc T F')
CODE = (
'!***********************************************************************\n'+
' subroutine GetEcp_rate(T,F,stress,Ecp,Ecp_rate,dEcp_rate_dS,\n'+
' . dEcp_rate_dE)\n'+
'!***********************************************************************\n'+
'! This subroutine computes the primary creep rate given temperature,\n'+
'! fluence and stress\n'+
'! Input : T : Temperature\n'+
'! F : Fluence\n'+
'! stress : Stress\n'+
'! Ecp : Primary creep strain\n'+
'! Output : Ecp_rate : Primary creep strain rate (wrt fluence)\n'+
'! dEcp_rate_dS : Derivative of primary creep strain rate wrt stress\n'+
'! dEcp_rate_dE : Scalar derivative of primary creep strain rate\n'+
'! wrt primary creep strain\n'+
' implicit none\n'+
' integer i,j\n'+
' real*8 T,F,stress(6),Ecp(6),Ecp_rate(6),alpha,G0,dEcp_rate_dE\n'+
' real*8 iDel(6,6),iDc(6,6),iDc_S(6),EoverE0_ZF,dEcp_rate_dS(6,6)\n'+
'\n'
' call Get_invDel(T,0.d0,iDel)\n'+
' iDc = iDel\n'+
' stress = stress\n'+
' iDc_S = 0.d0\n'+
' do i=1,6\n'+
' do j=1,6\n'+
' iDc_S(i) = iDc_S(i) + iDc(i,j)*stress(j)\n'+
' enddo\n'+
' enddo\n'+
' Ecp_rate = {}\n'.format(fcode(eval(PrimaryCreepRate_Form)).strip())+
' dEcp_rate_dE = {}\n'.format(fcode(eval(PrimaryCreepRate_Form_dEcp)).strip())+
' dEcp_rate_dS = {}\n'.format(fcode(eval(PrimaryCreepRate_Form_dS)).strip())+
' return\n'+
' end\n'+
'!************************************************************************\n')
return CODE
def GetSecondaryRateCode(P,SecondaryCreepRate_Form,SecondaryCreepRate_Form_dS):
from sympy import symbols
iDc_S, Ecp, iDc, T, F = symbols('iDc_S Ecp iDc T F')
CODE = (
'!************************************************************************\n'+
' subroutine GetEcs_rate(T,F,stress,Ecs_rate,dEcsrate_dS)\n'+
'!************************************************************************\n'+
'! This subroutine computes the secondary creep rate, given the current\n'+
'! temperature, fluence and stress\n'+
'! Input : T : Temperature\n'+
'! F : Fluence\n'+
'! stress : Stress\n'+
'! Output : Ecs_rate : Secondary creep rate\n'+
'! dEcsrate_dS : Derivative of the seondary creep rate wrt stress\n'+
' implicit none\n'+
'\n'
' integer i,j\n'+
' real*8 T,F,stress(6),Ecs_rate(6)\n'+
' real*8 K,SC_a,SC_e,SC_k,beta,iDel(6,6),iDc(6,6),EoverE0_ZF\n'+
' real*8 iDc_S(6),dEcsrate_dS(6,6)\n'+
'\n'+
' call Get_invDel(T,0.d0,iDel)\n'+
' iDc = iDel\n'+
' stress = stress\n'+
'\n'+
' iDc_S = 0.d0\n'+
' do i=1,6\n'+
' do j=1,6\n'+
' iDc_S(i) = iDc_S(i) + iDc(i,j)*stress(j)\n'+
' enddo\n'+
' enddo\n'+
'\n'+
' Ecs_rate = {}\n'.format(fcode(eval(SecondaryCreepRate_Form)).strip())+
' dEcsrate_dS = {}\n'.format(fcode(eval(SecondaryCreepRate_Form_dS)).strip())+
' return\n'+
' end\n')
return CODE
def Get_Code(SubName,Expression,Tmin,Tmax,Fmin,Fmax):
CODE = (
' subroutine {}(To,Fo,Answer)\n'.format(SubName)+
'!*************************************************************************\n'+
'! This subroutine computes the wigner strain in the perpendicular direction\n'+
'! Input : To : Temperature\n'+
'! Fo : Fluence\n'+
'! Output : dL_per : Wigner strain in perpendicular direction\n'+
' implicit none\n'+
' integer i,j\n'+
' real*8 To,Fo,Answer\n'+
' real*8 T,F,Tmin,Tmax,Fmin,Fmax\n'+
'\n'+
' Tmin = {}\n'.format(fcode(Tmin).strip())+
' Tmax = {}\n'.format(fcode(Tmax).strip())+
' Fmin = {}\n'.format(fcode(Fmin).strip())+
' Fmax = {}\n'.format(fcode(Fmax).strip())+
'\n'+
' T = (To-Tmin)/(Tmax-Tmin)\n'+
' F = (Fo-Fmin)/(Fmax-Fmin)\n'+
'\n'+
'{}\n'.format(Expression)+
' return\n'
' end\n')
return CODE
def Get_Code1D(SubName,Expression,Tmin,Tmax):
CODE = (
' subroutine {}(To,Answer)\n'.format(SubName)+
'!*************************************************************************\n'+
'! This subroutine computes the wigner strain in the perpendicular direction\n'+
'! Input : To : Temperature\n'+
'! Fo : Fluence\n'+
'! Output : dL_per : Wigner strain in perpendicular direction\n'+
' implicit none\n'+
' integer i,j\n'+
' real*8 To,Answer\n'+
' real*8 T,F,Tmin,Tmax\n'+
'\n'+
' Tmin = {}\n'.format(fcode(Tmin).strip())+
' Tmax = {}\n'.format(fcode(Tmax).strip())+
'\n'+
' T = (To-Tmin)/(Tmax-Tmin)\n'+
'\n'+
'{}\n'.format(Expression)+
' return\n'
' end\n')
return CODE
| gpl-3.0 |
rahul-c1/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 33 | 6189 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
"""Test NNDSVD behaviour on negative input"""
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
"""Test that NNDSVD does not return negative values"""
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
"""Test NNDSVD error
Test that _initialize_nmf error is less than the standard deviation of the
entries in the matrix.
"""
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
"""Test NNDSVD variants correctness
Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
the basic version has zeros.
"""
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
"""Test model fit behaviour on negative input"""
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
"""Test that the decomposition does not contain negative values"""
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
"""Test that the fit is not too far away"""
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
"""Test that NLS solver doesn't return negative values"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
"""Test that the NLS results should be close"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
"""Test that NMF.transform returns close values
(transform uses scipy.optimize.nnls for now)
"""
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
"""Smoke test for the case of more components than features."""
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
"""Test sparseness
Test that sparsity constraints actually increase sparseness in the
part where they are applied.
"""
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
"""Test that sparse matrices are accepted as input"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
"""Test that transform works on sparse data. Issue #2124"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF()
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
Paul-St-Young/QMC | attic/fix.py | 1 | 2915 | #!/usr/bin/env python
import numpy as np
def rehist(r,gr,group=2):
# coarsen gr by a factor of group, but improve accuracy on each point
newr = np.array([ r[i-group:i] for i in range(group,len(r),group) ]).mean(axis=1)
newgr = np.array([ gr[i-group:i] for i in range(group,len(gr),group) ]).mean(axis=1)
return newr,newgr
# end def
def nonzero(r,vmc,dmc,thres):
# return r,vmc,dmc only on the interval where both vmc and dmc are nonzero
start = 0
end = len(r)-1
started = False
for i in range(len(r)):
if (not started) and (vmc[i]>thres or dmc[i]>thres):
started = True
start = i
# end if not started
if started and (vmc[i]<thres and dmc[i]<thres):
end = i
break
#end if started
# end for
return r[start:end],vmc[start:end],dmc[start:end]
# end def nonzero
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
import argparse
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Use g(r) to fix wf')
parser.add_argument('VMC',type=str,help='text file containing VMC gofr')
parser.add_argument('DMC',type=str,help='text file containing VMC gofr')
parser.add_argument("-t", "--threshold", type=float, default=1e-2
, help="threshold for small number, default 1e-2" )
parser.add_argument("-cv", "--coarsenVMC", type=int, default=1
, help="by what factor to corasen VMC data" )
parser.add_argument("-cd", "--coarsenDMC", type=int, default=1
, help="by what factor to corasen DMC data" )
parser.add_argument("-f", "--fix", action='store_true'
, help="just plot fix factor" )
args = parser.parse_args()
r,vmc= np.loadtxt(args.VMC).T
r,dmc= np.loadtxt(args.DMC).T
r,vmc,dmc=nonzero(r,vmc,dmc,args.threshold)
# corasen if needed
newrv,newvmc=rehist(r,vmc,args.coarsenVMC)
newrd,newdmc=rehist(r,dmc,args.coarsenDMC)
# spline fit to refine
r=np.linspace(max(newrv[0],newrd[0]),min(newrv[-1],newrd[-1]),1000)
vmc=interp1d(newrv,newvmc,kind='cubic')(r)
dmc=interp1d(newrd,newdmc,kind='cubic')(r)
fixed=[ dmc[i]/vmc[i]*dmc[i] for i in range(len(r)) ]
fix_factor=[ dmc[i]/vmc[i] for i in range(len(r)) ]
vm = sum(vmc*r)/sum(vmc)
dm = sum(dmc*r)/sum(dmc)
print "VMC = ", vm
print "DMC = ", dm
print "Ro = ",sum(fixed*r)/sum(fixed)
print "Mean Fix = ", dm*dm/vm
plt.plot(r,vmc,'--',label="VMC")
plt.plot(newrv,newvmc,'^',label="VMC data")
plt.plot(r,dmc,'-.',label="DMC")
plt.plot(newrd,newdmc,'o',label="DMC data")
if (args.fix):
plt.plot(r,np.log(fix_factor),label="Fix Factor")
else:
plt.plot(r,fixed,label="Fix")
# end if fix_factor
plt.legend(loc=0)
plt.show()
# end __main__
| mit |
j-friedrich/OASIS | examples/fig2.py | 1 | 5964 | """Script illustrating OASIS, an active set method for sparse nonnegative deconvolution
@author: Johannes Friedrich
"""
import numpy as np
from matplotlib import pyplot as plt
from oasis.plotting import init_fig, simpleaxis
init_fig()
save_figs = False # subfolder fig and video must exist if set to True
def deconvolveAR1(y, g, lam=0, callback=None):
len_P = y.shape[0]
solution = np.empty(len_P)
# [value, weight, start time, length] of pool
P = [[y[i] - lam * (1 - g), 1, i, 1] for i in range(len_P)]
c = 0
counter = 0
while c < len_P - 1:
while c < len_P - 1 and \
(P[c][0] * P[c + 1][1] * g**P[c][3] <=
P[c][1] * P[c + 1][0]):
c += 1
if callback is not None:
callback(y, P, counter, range(
P[c][2], P[c][2] + P[c][3]))
counter += 1
if c == len_P - 1:
break
if callback is not None:
callback(y, P, counter, range(
P[c + 1][2], P[c + 1][2] + P[c + 1][3]))
counter += 1
# merge two pools
P[c][0] += P[c + 1][0] * g**P[c][3]
P[c][1] += P[c + 1][1] * g**(2 * P[c][3])
P[c][3] += P[c + 1][3]
P.pop(c + 1)
len_P -= 1
if callback is not None:
callback(y, P, counter, range(
P[c][2], P[c][2] + P[c][3]))
counter += 1
while (c > 0 and # backtrack until violations fixed
(P[c - 1][0] * P[c][1] * g**P[c - 1][3] >
P[c - 1][1] * P[c][0])):
c -= 1
# merge two pools
P[c][0] += P[c + 1][0] * g**P[c][3]
P[c][1] += P[c + 1][1] * g**(2 * P[c][3])
P[c][3] += P[c + 1][3]
P.pop(c + 1)
len_P -= 1
if callback is not None:
callback(y, P, counter, range(
P[c][2], P[c][2] + P[c][3]))
counter += 1
# construct solution
for v, w, f, l in P:
solution[f:f + l] = max(v, 0) / w * g**np.arange(l)
return solution
###############
#### Fig 2 ####
###############
def cb(y, P, counter, current):
solution = np.empty(len(y))
for v, w, f, l in P:
solution[f:f + l] = max(v, 0) / w * g**np.arange(l)
plt.figure(figsize=(3, 3))
color = y.copy()
plt.plot(solution, c='k', zorder=-11, lw=1.2)
plt.scatter(np.arange(len(y)), solution, s=60, cmap=plt.cm.Spectral,
c=color, clip_on=False, zorder=11)
plt.scatter([np.arange(len(y))[current]], [solution[current]],
s=200, lw=2.5, marker='+', color='b', clip_on=False, zorder=11)
for a in P[::2]:
plt.axvspan(a[2], a[2] + a[3], alpha=0.1, color='k', zorder=-11)
for x in np.where(trueSpikes)[0]:
plt.plot([x, x], [0, 1.65], lw=1.5, c='r', zorder=-12)
plt.xlim((0, len(y) - .5))
plt.ylim((0, 1.65))
simpleaxis(plt.gca())
plt.xticks([])
plt.yticks([])
if save_figs:
plt.savefig('fig/%d.pdf' % counter)
plt.show()
# generate data
g = .8
T = 30
noise = .2
np.random.seed(1)
y = np.zeros(T)
trueSpikes = np.random.rand(T) < .1
truth = trueSpikes.astype(float)
for i in range(2, T):
truth[i] += g * truth[i - 1]
y = truth + noise * np.random.randn(T)
y = y[:20]
# run OASIS
deconvolveAR1(y, g, .75, callback=cb)
###############
#### Video ####
###############
plt.ion()
def cb(y, P, counter, current):
solution = np.empty(len(y))
for i, (v, w, f, l) in enumerate(P):
solution[f:f + l] = (v if i else max(v, 0)) / w * g**np.arange(l)
color = y.copy()
ax1.plot(solution, c='k', zorder=-11, lw=1.3, clip_on=False)
ax1.scatter(np.arange(len(y)), solution, s=40, cmap=plt.cm.Spectral,
c=color, clip_on=False, zorder=11)
ax1.scatter([np.arange(len(y))[current]], [solution[current]],
s=120, lw=2.5, marker='+', color='b', clip_on=False, zorder=11)
for a in P[::2]:
ax1.axvspan(a[2], a[2] + a[3], alpha=0.1, color='k', zorder=-11)
for x in np.where(trueSpikes)[0]:
ax1.plot([x, x], [0, 2.3], lw=1.5, c='r', zorder=-12)
ax1.set_xlim((0, len(y) - .5))
ax1.set_ylim((0, 2.3))
simpleaxis(ax1)
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_ylabel('Fluorescence')
for i, s in enumerate(np.r_[[0], solution[1:] - g * solution[:-1]]):
ax2.plot([i, i], [0, s], c='k', zorder=-11, lw=1.4, clip_on=False)
ax2.scatter(np.arange(len(y)), np.r_[[0], solution[1:] - g * solution[:-1]],
s=40, cmap=plt.cm.Spectral, c=color, clip_on=False, zorder=11)
ax2.scatter([np.arange(len(y))[current]],
[np.r_[[0], solution[1:] - g * solution[:-1]][current]],
s=120, lw=2.5, marker='+', color='b', clip_on=False, zorder=11)
for a in P[::2]:
ax2.axvspan(a[2], a[2] + a[3], alpha=0.1, color='k', zorder=-11)
for x in np.where(trueSpikes)[0]:
ax2.plot([x, x], [0, 1.55], lw=1.5, c='r', zorder=-12)
ax2.set_xlim((0, len(y) - .5))
ax2.set_ylim((0, 1.55))
simpleaxis(ax2)
ax2.set_xticks([])
ax2.set_yticks([])
ax2.set_xlabel('Time', labelpad=35, x=.5)
ax2.set_ylabel('Spikes')
plt.subplots_adjust(left=0.032, right=.995, top=.995, bottom=0.19, hspace=0.22)
if save_figs:
plt.savefig('video/%03d.pdf' % counter)
plt.pause(1e-9)
ax1.clear()
ax2.clear()
# generate data
g = .8
T0 = 30
noise = .2
np.random.seed(1)
trueSpikes = np.random.rand(T0) < .1
noise0 = noise * np.random.randn(T0)
np.random.seed(14)
T = 150
trueSpikes = np.hstack([trueSpikes, np.random.rand(T - T0) < .1])
truth = trueSpikes.astype(float)
for i in range(2, T):
truth[i] += g * truth[i - 1]
y = truth + np.hstack([noise0, noise * np.random.randn(T - T0)])
# run OASIS
fig = plt.figure(figsize=(15, 6))
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
deconvolveAR1(y, g, .75, callback=cb)
| gpl-3.0 |
courtarro/gnuradio-wg-grc | gr-filter/examples/synth_to_chan.py | 40 | 3854 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
fmtx = list()
for fi in freqs:
s = analog.sig_source_f(fs, analog.GR_SIN_WAVE, fi, 1)
fm = analog.nbfm_tx(fs, 4*fs, max_dev=10000, tau=75e-6)
sigs.append(s)
fmtx.append(fm)
syntaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Synthesis Num. Taps = %d (taps per filter = %d)" % (len(syntaps),
len(syntaps)/nchans)
chtaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Channelizer Num. Taps = %d (taps per filter = %d)" % (len(chtaps),
len(chtaps)/nchans)
filtbank = filter.pfb_synthesizer_ccf(nchans, syntaps)
channelizer = filter.pfb.channelizer_ccf(nchans, chtaps)
noise_level = 0.01
head = blocks.head(gr.sizeof_gr_complex, N)
noise = analog.noise_source_c(analog.GR_GAUSSIAN, noise_level)
addnoise = blocks.add_cc()
snk_synth = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(noise, (addnoise,0))
tb.connect(filtbank, head, (addnoise, 1))
tb.connect(addnoise, channelizer)
tb.connect(addnoise, snk_synth)
snk = list()
for i,si in enumerate(sigs):
tb.connect(si, fmtx[i], (filtbank, i))
for i in xrange(nchans):
snk.append(blocks.vector_sink_c())
tb.connect((channelizer, i), snk[i])
tb.run()
if 1:
channel = 1
data = snk[channel].data()[1000:]
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(data[10000:10200] )
s1.set_title(("Output Signal from Channel %d" % channel))
fftlen = 2048
winfunc = scipy.blackman
#winfunc = scipy.hamming
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.psd(data, NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s2.set_title(("Output PSD from Channel %d" % channel))
f3 = pylab.figure(3)
s3 = f3.add_subplot(1,1,1)
s3.psd(snk_synth.data()[1000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s3.set_title("Output of Synthesis Filter")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
nlalevee/spark | python/pyspark/sql/group.py | 4 | 10631 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_seq, _to_java_column, _create_column_from_literal
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.udf import UserDefinedFunction
from pyspark.sql.types import *
__all__ = ["GroupedData"]
def dfapi(f):
def _api(self):
name = f.__name__
jdf = getattr(self._jgd, name)()
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
def df_varargs_api(f):
def _api(self, *cols):
name = f.__name__
jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols))
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
class GroupedData(object):
"""
A set of methods for aggregations on a :class:`DataFrame`,
created by :func:`DataFrame.groupBy`.
.. note:: Experimental
.. versionadded:: 1.3
"""
def __init__(self, jgd, df):
self._jgd = jgd
self._df = df
self.sql_ctx = df.sql_ctx
@ignore_unicode_prefix
@since(1.3)
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions are `avg`, `max`, `min`, `sum`, `count`.
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx)
@dfapi
@since(1.3)
def count(self):
"""Counts the number of records for each group.
>>> sorted(df.groupBy(df.age).count().collect())
[Row(age=2, count=1), Row(age=5, count=1)]
"""
@df_varargs_api
@since(1.3)
def mean(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().mean('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().mean('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def avg(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().avg('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().avg('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def max(self, *cols):
"""Computes the max value for each numeric columns for each group.
>>> df.groupBy().max('age').collect()
[Row(max(age)=5)]
>>> df3.groupBy().max('age', 'height').collect()
[Row(max(age)=5, max(height)=85)]
"""
@df_varargs_api
@since(1.3)
def min(self, *cols):
"""Computes the min value for each numeric column for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().min('age').collect()
[Row(min(age)=2)]
>>> df3.groupBy().min('age', 'height').collect()
[Row(min(age)=2, min(height)=80)]
"""
@df_varargs_api
@since(1.3)
def sum(self, *cols):
"""Compute the sum for each numeric columns for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().sum('age').collect()
[Row(sum(age)=7)]
>>> df3.groupBy().sum('age', 'height').collect()
[Row(sum(age)=7, sum(height)=165)]
"""
@since(1.6)
def pivot(self, pivot_col, values=None):
"""
Pivots a column of the current :class:`DataFrame` and perform the specified aggregation.
There are two versions of pivot function: one that requires the caller to specify the list
of distinct values to pivot on, and one that does not. The latter is more concise but less
efficient, because Spark needs to first compute the list of distinct values internally.
:param pivot_col: Name of the column to pivot.
:param values: List of values that will be translated to columns in the output DataFrame.
# Compute the sum of earnings for each year by course with each course as a separate column
>>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
[Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)]
# Or without specifying column values (less efficient)
>>> df4.groupBy("year").pivot("course").sum("earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
"""
if values is None:
jgd = self._jgd.pivot(pivot_col)
else:
jgd = self._jgd.pivot(pivot_col, values)
return GroupedData(jgd, self._df)
@since(2.3)
def apply(self, udf):
"""
Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result
as a `DataFrame`.
The user-defined function should take a `pandas.DataFrame` and return another
`pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame`
to the user-function and the returned `pandas.DataFrame`s are combined as a
:class:`DataFrame`.
The returned `pandas.DataFrame` can be of arbitrary length and its schema must match the
returnType of the pandas udf.
This function does not support partial aggregation, and requires shuffling all the data in
the :class:`DataFrame`.
:param udf: A function object returned by :meth:`pyspark.sql.functions.pandas_udf`
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("id long, v double", PandasUDFType.GROUP_MAP)
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
.. seealso:: :meth:`pyspark.sql.functions.pandas_udf`
"""
# Columns are special because hasattr always return True
if isinstance(udf, Column) or not hasattr(udf, 'func') \
or udf.evalType != PythonEvalType.SQL_PANDAS_GROUP_MAP_UDF:
raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type "
"GROUP_MAP.")
df = self._df
udf_column = udf(*[df[col] for col in df.columns])
jdf = self._jgd.flatMapGroupsInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.group
globs = pyspark.sql.group.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.group tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
Row(name='Bob', age=5, height=85)]).toDF()
globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000),
Row(course="Java", year=2012, earnings=20000),
Row(course="dotNET", year=2012, earnings=5000),
Row(course="dotNET", year=2013, earnings=48000),
Row(course="Java", year=2013, earnings=30000)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.group, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
zihua/scikit-learn | examples/model_selection/plot_learning_curve.py | 76 | 4509 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
amolkahat/pandas | pandas/tests/extension/base/reduce.py | 2 | 1908 | import warnings
import pytest
import pandas.util.testing as tm
import pandas as pd
from .base import BaseExtensionTests
class BaseReduceTests(BaseExtensionTests):
"""
Reduction specific tests. Generally these only
make sense for numeric/boolean operations.
"""
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
expected = getattr(s.astype('float64'), op_name)(skipna=skipna)
tm.assert_almost_equal(result, expected)
class BaseNoReduceTests(BaseReduceTests):
""" we don't define any reductions """
@pytest.mark.parametrize('skipna', [True, False])
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
s = pd.Series(data)
with pytest.raises(TypeError):
getattr(s, op_name)(skipna=skipna)
@pytest.mark.parametrize('skipna', [True, False])
def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
op_name = all_boolean_reductions
s = pd.Series(data)
with pytest.raises(TypeError):
getattr(s, op_name)(skipna=skipna)
class BaseNumericReduceTests(BaseReduceTests):
@pytest.mark.parametrize('skipna', [True, False])
def test_reduce_series(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
s = pd.Series(data)
# min/max with empty produce numpy warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
self.check_reduce(s, op_name, skipna)
class BaseBooleanReduceTests(BaseReduceTests):
@pytest.mark.parametrize('skipna', [True, False])
def test_reduce_series(self, data, all_boolean_reductions, skipna):
op_name = all_boolean_reductions
s = pd.Series(data)
self.check_reduce(s, op_name, skipna)
| bsd-3-clause |
bhargav/scikit-learn | sklearn/tests/test_cross_validation.py | 24 | 47465 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
poryfly/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
psychoinformatics-de/studyforrest_annotations | code/importer/confounds.py | 1 | 3816 | #!/usr/bin/env python3
'''merges and copies (already segmented) annotations of confounds
'''
from glob import glob
import argparse
import numpy as np
import os
import pandas as pd
import re
# constants
# no. of segments
SEGMENTS = range(0, 8)
# currently relevant annotations
VIS_CONFS = ['brmean', 'brlr', 'brud', 'phash', 'normdiff']
AUD_CONFS = ['rms', 'lrdiff']
def parse_arguments():
'''
'''
parser = argparse.ArgumentParser(
description='merges and copies annotations of confounds'
)
parser.add_argument('-i',
help='input directory where files are located',
default='src/confounds/annotation/audio/'
)
parser.add_argument('-s',
help='the annotated stimulus' +
'(\'aomovie\' or \'avmovie\')',
default='aomovie'
)
parser.add_argument('-o',
default='test',
help='output directory')
args = parser.parse_args()
in_dir = args.i
stimulus = args.s
out_dir = args.o
return in_dir, stimulus, out_dir
def find_files(pattern):
'''
'''
found_files = glob(pattern)
return sorted(found_files)
def create_merged_datafr(inputs):
'''
'''
# initial dataframe providing 'duration', too
df = pd.read_csv(inputs[0],
header=0,
sep='\t',
index_col=0,
dtype='str' # read all input as string
)
# populate the dataframe with the data from remaining files
for input_f in inputs[1:]:
df_new = pd.read_csv(input_f,
usecols=[0, 2],
index_col=0, # well, thats hard coded
header = 0,
sep='\t',
dtype='str' # read all input as string
)
df[df_new.columns[0]] = df_new
return df
if __name__ == '__main__':
# get command line arguments
in_dir, stimulus, out = parse_arguments()
out_path = os.path.join(out, stimulus)
os.makedirs(out_path, exist_ok=True)
if stimulus == 'aomovie':
file_pattern = 'fg_ad_seg?_*.tsv'
# in_fpathes = find_files(os.path.join(in_dir, file_pattern))
elif stimulus == 'avmovie':
file_pattern = 'fg_av_ger_seg?_*.tsv'
# in_fpathes = find_files(os.path.join(in_dir, file_pattern))
else:
raise ValueError('stimulus must be \'aomovie\' or \'avmovie\'')
# segments = [re.search(r'seg\d{1}', in_fpath) for in_fpath in in_fpathes]
# segments = sorted(list(set([segment.group() for segment in segments])))
for segment in SEGMENTS:
run = f'run-{segment + 1}'
# subtitute for current segment
inputs = os.path.join(in_dir, file_pattern)
inputs = inputs.replace('seg?', f'seg{str(segment)}')
if 'visual' in in_dir:
inputs = [inputs.replace('_*.tsv', f'_{conf}.tsv') for conf in VIS_CONFS]
out_file = 'conf_visual_' + run + '_events.tsv'
elif 'audio' in in_dir:
inputs = [inputs.replace('_*.tsv', f'_{conf}.tsv') for conf in AUD_CONFS]
out_file = 'conf_audio_' + run + '_events.tsv'
# read and merge the inputs
merged_df = create_merged_datafr(inputs)
# prepare saving
out_fpath = os.path.join(out_path, out_file)
# format the index in such a way
# that is written to file nicely formatted
merged_df.reset_index(level=0, inplace=True)
merged_df['onset'] = merged_df['onset'].map('{:.2f}'.format)
merged_df.to_csv(out_fpath, sep='\t', index=False)
| cc0-1.0 |
larsoner/mne-python | examples/visualization/plot_topo_compare_conditions.py | 20 | 1828 | """
=================================================
Compare evoked responses for different conditions
=================================================
In this example, an Epochs object for visual and auditory responses is created.
Both conditions are then accessed by their respective names to create a sensor
layout plot of the related evoked responses.
"""
# Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import plot_evoked_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks='meg', baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
###############################################################################
# Show topography for two different conditions
colors = 'blue', 'red'
title = 'MNE sample data\nleft vs right (A/V combined)'
plot_evoked_topo(evokeds, color=colors, title=title, background_color='w')
plt.show()
| bsd-3-clause |
Tong-Chen/scikit-learn | sklearn/__check_build/__init__.py | 30 | 1669 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/font_manager.py | 6 | 47238 | """
A module for finding, managing, and using fonts across platforms.
This module provides a single :class:`FontManager` instance that can
be shared across backends and platforms. The :func:`findfont`
function returns the best TrueType (TTF) font file in the local or
system font path that matches the specified :class:`FontProperties`
instance. The :class:`FontManager` also handles Adobe Font Metrics
(AFM) font files for use by the PostScript backend.
The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)
font specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_.
Future versions may implement the Level 2 or 2.1 specifications.
Experimental support is included for using `fontconfig` on Unix
variant platforms (Linux, OS X, Solaris). To enable it, set the
constant ``USE_FONTCONFIG`` in this file to ``True``. Fontconfig has
the advantage that it is the standard way to look up fonts on X11
platforms, so if a font is installed, it is much more likely to be
found.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import cPickle as pickle
"""
KNOWN ISSUES
- documentation
- font variant is untested
- font stretch is incomplete
- font size is incomplete
- font size_adjust is incomplete
- default font algorithm needs improvement and testing
- setWeights function needs improvement
- 'light' is an invalid weight value, remove it.
- update_fonts not implemented
Authors : John Hunter <[email protected]>
Paul Barrett <[email protected]>
Michael Droettboom <[email protected]>
Copyright : John Hunter (2004,2005), Paul Barrett (2004,2005)
License : matplotlib license (PSF compatible)
The font directory code is from ttfquery,
see license/LICENSE_TTFQUERY.
"""
from collections import Iterable
import json
import os
import sys
from threading import Timer
import warnings
import matplotlib
from matplotlib import afm, cbook, ft2font, rcParams, get_cachedir
from matplotlib.cbook import is_string_like
from matplotlib.compat import subprocess
from matplotlib.fontconfig_pattern import (
parse_fontconfig_pattern, generate_fontconfig_pattern)
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
USE_FONTCONFIG = False
verbose = matplotlib.verbose
font_scalings = {
'xx-small' : 0.579,
'x-small' : 0.694,
'small' : 0.833,
'medium' : 1.0,
'large' : 1.200,
'x-large' : 1.440,
'xx-large' : 1.728,
'larger' : 1.2,
'smaller' : 0.833,
None : 1.0}
stretch_dict = {
'ultra-condensed' : 100,
'extra-condensed' : 200,
'condensed' : 300,
'semi-condensed' : 400,
'normal' : 500,
'semi-expanded' : 600,
'expanded' : 700,
'extra-expanded' : 800,
'ultra-expanded' : 900}
weight_dict = {
'ultralight' : 100,
'light' : 200,
'normal' : 400,
'regular' : 400,
'book' : 400,
'medium' : 500,
'roman' : 500,
'semibold' : 600,
'demibold' : 600,
'demi' : 600,
'bold' : 700,
'heavy' : 800,
'extra bold' : 800,
'black' : 900}
font_family_aliases = set([
'serif',
'sans-serif',
'sans serif',
'cursive',
'fantasy',
'monospace',
'sans'])
# OS Font paths
MSFolders = \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
MSFontDirectories = [
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts',
r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts']
X11FontDirectories = [
# an old standard installation point
"/usr/X11R6/lib/X11/fonts/TTF/",
"/usr/X11/lib/X11/fonts",
# here is the new standard location for fonts
"/usr/share/fonts/",
# documented as a good place to install new fonts
"/usr/local/share/fonts/",
# common application, not really useful
"/usr/lib/openoffice/share/fonts/truetype/",
]
OSXFontDirectories = [
"/Library/Fonts/",
"/Network/Library/Fonts/",
"/System/Library/Fonts/",
# fonts installed via MacPorts
"/opt/local/share/fonts"
""
]
if not USE_FONTCONFIG and sys.platform != 'win32':
home = os.environ.get('HOME')
if home is not None:
# user fonts on OSX
path = os.path.join(home, 'Library', 'Fonts')
OSXFontDirectories.append(path)
path = os.path.join(home, '.fonts')
X11FontDirectories.append(path)
def get_fontext_synonyms(fontext):
"""
Return a list of file extensions extensions that are synonyms for
the given file extension *fileext*.
"""
return {'ttf': ('ttf', 'otf'),
'otf': ('ttf', 'otf'),
'afm': ('afm',)}[fontext]
def list_fonts(directory, extensions):
"""
Return a list of all fonts matching any of the extensions,
possibly upper-cased, found recursively under the directory.
"""
pattern = ';'.join(['*.%s;*.%s' % (ext, ext.upper())
for ext in extensions])
return cbook.listFiles(directory, pattern)
def win32FontDirectory():
"""
Return the user-specified font directory for Win32. This is
looked up from the registry key::
\\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts
If the key is not found, $WINDIR/Fonts will be returned.
"""
try:
from six.moves import winreg
except ImportError:
pass # Fall through to default
else:
try:
user = winreg.OpenKey(winreg.HKEY_CURRENT_USER, MSFolders)
try:
try:
return winreg.QueryValueEx(user, 'Fonts')[0]
except OSError:
pass # Fall through to default
finally:
winreg.CloseKey(user)
except OSError:
pass # Fall through to default
return os.path.join(os.environ['WINDIR'], 'Fonts')
def win32InstalledFonts(directory=None, fontext='ttf'):
"""
Search for fonts in the specified font directory, or use the
system directories if none given. A list of TrueType font
filenames are returned by default, or AFM fonts if *fontext* ==
'afm'.
"""
from six.moves import winreg
if directory is None:
directory = win32FontDirectory()
fontext = get_fontext_synonyms(fontext)
key, items = None, {}
for fontdir in MSFontDirectories:
try:
local = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, fontdir)
except OSError:
continue
if not local:
return list_fonts(directory, fontext)
try:
for j in range(winreg.QueryInfoKey(local)[1]):
try:
key, direc, any = winreg.EnumValue( local, j)
if not is_string_like(direc):
continue
if not os.path.dirname(direc):
direc = os.path.join(directory, direc)
direc = os.path.abspath(direc).lower()
if os.path.splitext(direc)[1][1:] in fontext:
items[direc] = 1
except EnvironmentError:
continue
except WindowsError:
continue
except MemoryError:
continue
return list(six.iterkeys(items))
finally:
winreg.CloseKey(local)
return None
def OSXInstalledFonts(directories=None, fontext='ttf'):
"""
Get list of font files on OS X - ignores font suffix by default.
"""
if directories is None:
directories = OSXFontDirectories
fontext = get_fontext_synonyms(fontext)
files = []
for path in directories:
if fontext is None:
files.extend(cbook.listFiles(path, '*'))
else:
files.extend(list_fonts(path, fontext))
return files
@lru_cache()
def _call_fc_list():
"""Cache and list the font filenames known to `fc-list`.
"""
# Delay the warning by 5s.
timer = Timer(5, lambda: warnings.warn(
'Matplotlib is building the font cache using fc-list. '
'This may take a moment.'))
timer.start()
try:
out = subprocess.check_output([str('fc-list'), '--format=%{file}'])
except (OSError, subprocess.CalledProcessError):
return []
finally:
timer.cancel()
fnames = []
for fname in out.split(b'\n'):
try:
fname = six.text_type(fname, sys.getfilesystemencoding())
except UnicodeDecodeError:
continue
fnames.append(fname)
return fnames
def get_fontconfig_fonts(fontext='ttf'):
"""List the font filenames known to `fc-list` having the given extension.
"""
fontext = get_fontext_synonyms(fontext)
return [fname for fname in _call_fc_list()
if os.path.splitext(fname)[1][1:] in fontext]
def findSystemFonts(fontpaths=None, fontext='ttf'):
"""
Search for fonts in the specified font paths. If no paths are
given, will use a standard set of system paths, as well as the
list of fonts tracked by fontconfig if fontconfig is installed and
available. A list of TrueType fonts are returned by default with
AFM fonts as an option.
"""
fontfiles = set()
fontexts = get_fontext_synonyms(fontext)
if fontpaths is None:
if sys.platform == 'win32':
fontdir = win32FontDirectory()
fontpaths = [fontdir]
# now get all installed fonts directly...
for f in win32InstalledFonts(fontdir):
base, ext = os.path.splitext(f)
if len(ext)>1 and ext[1:].lower() in fontexts:
fontfiles.add(f)
else:
fontpaths = X11FontDirectories
# check for OS X & load its fonts if present
if sys.platform == 'darwin':
for f in OSXInstalledFonts(fontext=fontext):
fontfiles.add(f)
for f in get_fontconfig_fonts(fontext):
fontfiles.add(f)
elif isinstance(fontpaths, six.string_types):
fontpaths = [fontpaths]
for path in fontpaths:
files = list_fonts(path, fontexts)
for fname in files:
fontfiles.add(os.path.abspath(fname))
return [fname for fname in fontfiles if os.path.exists(fname)]
def weight_as_number(weight):
"""
Return the weight property as a numeric value. String values
are converted to their corresponding numeric value.
"""
if isinstance(weight, six.string_types):
try:
weight = weight_dict[weight.lower()]
except KeyError:
weight = 400
elif weight in range(100, 1000, 100):
pass
else:
raise ValueError('weight not a valid integer')
return weight
class FontEntry(object):
"""
A class for storing Font properties. It is used when populating
the font lookup dictionary.
"""
def __init__(self,
fname ='',
name ='',
style ='normal',
variant='normal',
weight ='normal',
stretch='normal',
size ='medium',
):
self.fname = fname
self.name = name
self.style = style
self.variant = variant
self.weight = weight
self.stretch = stretch
try:
self.size = str(float(size))
except ValueError:
self.size = size
def __repr__(self):
return "<Font '%s' (%s) %s %s %s %s>" % (
self.name, os.path.basename(self.fname), self.style, self.variant,
self.weight, self.stretch)
def ttfFontProperty(font):
"""
A function for populating the :class:`FontKey` by extracting
information from the TrueType font file.
*font* is a :class:`FT2Font` instance.
"""
name = font.family_name
# Styles are: italic, oblique, and normal (default)
sfnt = font.get_sfnt()
sfnt2 = sfnt.get((1,0,0,2))
sfnt4 = sfnt.get((1,0,0,4))
if sfnt2:
sfnt2 = sfnt2.decode('macroman').lower()
else:
sfnt2 = ''
if sfnt4:
sfnt4 = sfnt4.decode('macroman').lower()
else:
sfnt4 = ''
if sfnt4.find('oblique') >= 0:
style = 'oblique'
elif sfnt4.find('italic') >= 0:
style = 'italic'
elif sfnt2.find('regular') >= 0:
style = 'normal'
elif font.style_flags & ft2font.ITALIC:
style = 'italic'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = None
for w in six.iterkeys(weight_dict):
if sfnt4.find(w) >= 0:
weight = w
break
if not weight:
if font.style_flags & ft2font.BOLD:
weight = 700
else:
weight = 400
weight = weight_as_number(weight)
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
if (sfnt4.find('narrow') >= 0 or sfnt4.find('condensed') >= 0 or
sfnt4.find('cond') >= 0):
stretch = 'condensed'
elif sfnt4.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif sfnt4.find('wide') >= 0 or sfnt4.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g., 12pt
# Percentage values are in 'em's. Most robust specification.
# !!!! Incomplete
if font.scalable:
size = 'scalable'
else:
size = str(float(font.get_fontsize()))
# !!!! Incomplete
size_adjust = None
return FontEntry(font.fname, name, style, variant, weight, stretch, size)
def afmFontProperty(fontpath, font):
"""
A function for populating a :class:`FontKey` instance by
extracting information from the AFM font file.
*font* is a class:`AFM` instance.
"""
name = font.get_familyname()
fontname = font.get_fontname().lower()
# Styles are: italic, oblique, and normal (default)
if font.get_angle() != 0 or name.lower().find('italic') >= 0:
style = 'italic'
elif name.lower().find('oblique') >= 0:
style = 'oblique'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = weight_as_number(font.get_weight().lower())
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
if fontname.find('narrow') >= 0 or fontname.find('condensed') >= 0 or \
fontname.find('cond') >= 0:
stretch = 'condensed'
elif fontname.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif fontname.find('wide') >= 0 or fontname.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g., 12pt
# Percentage values are in 'em's. Most robust specification.
# All AFM fonts are apparently scalable.
size = 'scalable'
# !!!! Incomplete
size_adjust = None
return FontEntry(fontpath, name, style, variant, weight, stretch, size)
def createFontList(fontfiles, fontext='ttf'):
"""
A function to create a font lookup list. The default is to create
a list of TrueType fonts. An AFM font list can optionally be
created.
"""
fontlist = []
# Add fonts from list of known font files.
seen = {}
for fpath in fontfiles:
verbose.report('createFontDict: %s' % (fpath), 'debug')
fname = os.path.split(fpath)[1]
if fname in seen:
continue
else:
seen[fname] = 1
if fontext == 'afm':
try:
fh = open(fpath, 'rb')
except:
verbose.report("Could not open font file %s" % fpath)
continue
try:
try:
font = afm.AFM(fh)
finally:
fh.close()
except RuntimeError:
verbose.report("Could not parse font file %s" % fpath)
continue
try:
prop = afmFontProperty(fpath, font)
except KeyError:
continue
else:
try:
font = ft2font.FT2Font(fpath)
except RuntimeError:
verbose.report("Could not open font file %s" % fpath)
continue
except UnicodeError:
verbose.report("Cannot handle unicode filenames")
# print >> sys.stderr, 'Bad file is', fpath
continue
except IOError:
verbose.report("IO error - cannot open font file %s" % fpath)
continue
try:
prop = ttfFontProperty(font)
except (KeyError, RuntimeError, ValueError):
continue
fontlist.append(prop)
return fontlist
class FontProperties(object):
"""
A class for storing and manipulating font properties.
The font properties are those described in the `W3C Cascading
Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font
specification. The six properties are:
- family: A list of font names in decreasing order of priority.
The items may include a generic font family name, either
'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace'.
In that case, the actual font to be used will be looked up
from the associated rcParam in :file:`matplotlibrc`.
- style: Either 'normal', 'italic' or 'oblique'.
- variant: Either 'normal' or 'small-caps'.
- stretch: A numeric value in the range 0-1000 or one of
'ultra-condensed', 'extra-condensed', 'condensed',
'semi-condensed', 'normal', 'semi-expanded', 'expanded',
'extra-expanded' or 'ultra-expanded'
- weight: A numeric value in the range 0-1000 or one of
'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'
- size: Either an relative value of 'xx-small', 'x-small',
'small', 'medium', 'large', 'x-large', 'xx-large' or an
absolute font size, e.g., 12
The default font property for TrueType fonts (as specified in the
default :file:`matplotlibrc` file) is::
sans-serif, normal, normal, normal, normal, scalable.
Alternatively, a font may be specified using an absolute path to a
.ttf file, by using the *fname* kwarg.
The preferred usage of font sizes is to use the relative values,
e.g., 'large', instead of absolute font sizes, e.g., 12. This
approach allows all text sizes to be made larger or smaller based
on the font manager's default font size.
This class will also accept a `fontconfig
<https://www.freedesktop.org/wiki/Software/fontconfig/>`_ pattern, if it is
the only argument provided. See the documentation on `fontconfig patterns
<https://www.freedesktop.org/software/fontconfig/fontconfig-user.html>`_.
This support does not require fontconfig to be installed. We are merely
borrowing its pattern syntax for use here.
Note that matplotlib's internal font manager and fontconfig use a
different algorithm to lookup fonts, so the results of the same pattern
may be different in matplotlib than in other applications that use
fontconfig.
"""
def __init__(self,
family = None,
style = None,
variant= None,
weight = None,
stretch= None,
size = None,
fname = None, # if this is set, it's a hardcoded filename to use
_init = None # used only by copy()
):
self._family = _normalize_font_family(rcParams['font.family'])
self._slant = rcParams['font.style']
self._variant = rcParams['font.variant']
self._weight = rcParams['font.weight']
self._stretch = rcParams['font.stretch']
self._size = rcParams['font.size']
self._file = None
# This is used only by copy()
if _init is not None:
self.__dict__.update(_init.__dict__)
return
if is_string_like(family):
# Treat family as a fontconfig pattern if it is the only
# parameter provided.
if (style is None and
variant is None and
weight is None and
stretch is None and
size is None and
fname is None):
self.set_fontconfig_pattern(family)
return
self.set_family(family)
self.set_style(style)
self.set_variant(variant)
self.set_weight(weight)
self.set_stretch(stretch)
self.set_file(fname)
self.set_size(size)
def _parse_fontconfig_pattern(self, pattern):
return parse_fontconfig_pattern(pattern)
def __hash__(self):
l = (tuple(self.get_family()),
self.get_slant(),
self.get_variant(),
self.get_weight(),
self.get_stretch(),
self.get_size_in_points(),
self.get_file())
return hash(l)
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return hash(self) != hash(other)
def __str__(self):
return self.get_fontconfig_pattern()
def get_family(self):
"""
Return a list of font names that comprise the font family.
"""
return self._family
def get_name(self):
"""
Return the name of the font that best matches the font
properties.
"""
return get_font(findfont(self)).family_name
def get_style(self):
"""
Return the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
return self._slant
get_slant = get_style
def get_variant(self):
"""
Return the font variant. Values are: 'normal' or
'small-caps'.
"""
return self._variant
def get_weight(self):
"""
Set the font weight. Options are: A numeric value in the
range 0-1000 or one of 'light', 'normal', 'regular', 'book',
'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',
'heavy', 'extra bold', 'black'
"""
return self._weight
def get_stretch(self):
"""
Return the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.
"""
return self._stretch
def get_size(self):
"""
Return the font size.
"""
return self._size
def get_size_in_points(self):
return self._size
def get_file(self):
"""
Return the filename of the associated font.
"""
return self._file
def get_fontconfig_pattern(self):
"""
Get a fontconfig pattern suitable for looking up the font as
specified with fontconfig's ``fc-match`` utility.
See the documentation on `fontconfig patterns
<https://www.freedesktop.org/software/fontconfig/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
return generate_fontconfig_pattern(self)
def set_family(self, family):
"""
Change the font family. May be either an alias (generic name
is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',
'fantasy', or 'monospace', a real font name or a list of real
font names. Real font names are not supported when
`text.usetex` is `True`.
"""
if family is None:
family = rcParams['font.family']
self._family = _normalize_font_family(family)
set_name = set_family
def set_style(self, style):
"""
Set the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if style is None:
style = rcParams['font.style']
if style not in ('normal', 'italic', 'oblique'):
raise ValueError("style must be normal, italic or oblique")
self._slant = style
set_slant = set_style
def set_variant(self, variant):
"""
Set the font variant. Values are: 'normal' or 'small-caps'.
"""
if variant is None:
variant = rcParams['font.variant']
if variant not in ('normal', 'small-caps'):
raise ValueError("variant must be normal or small-caps")
self._variant = variant
def set_weight(self, weight):
"""
Set the font weight. May be either a numeric value in the
range 0-1000 or one of 'ultralight', 'light', 'normal',
'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
'demi', 'bold', 'heavy', 'extra bold', 'black'
"""
if weight is None:
weight = rcParams['font.weight']
try:
weight = int(weight)
if weight < 0 or weight > 1000:
raise ValueError()
except ValueError:
if weight not in weight_dict:
raise ValueError("weight is invalid")
weight = weight_dict[weight]
self._weight = weight
def set_stretch(self, stretch):
"""
Set the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded' or
'ultra-expanded', or a numeric value in the range 0-1000.
"""
if stretch is None:
stretch = rcParams['font.stretch']
try:
stretch = int(stretch)
if stretch < 0 or stretch > 1000:
raise ValueError()
except ValueError:
if stretch not in stretch_dict:
raise ValueError("stretch is invalid")
self._stretch = stretch
def set_size(self, size):
"""
Set the font size. Either an relative value of 'xx-small',
'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'
or an absolute font size, e.g., 12.
"""
if size is None:
size = rcParams['font.size']
try:
size = float(size)
except ValueError:
try:
scale = font_scalings[size]
except KeyError:
raise ValueError(
"Size is invalid. Valid font size are " + ", ".join(
str(i) for i in font_scalings.keys()))
else:
size = scale * FontManager.get_default_size()
self._size = size
def set_file(self, file):
"""
Set the filename of the fontfile to use. In this case, all
other properties will be ignored.
"""
self._file = file
def set_fontconfig_pattern(self, pattern):
"""
Set the properties by parsing a fontconfig *pattern*.
See the documentation on `fontconfig patterns
<https://www.freedesktop.org/software/fontconfig/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
for key, val in six.iteritems(self._parse_fontconfig_pattern(pattern)):
if type(val) == list:
getattr(self, "set_" + key)(val[0])
else:
getattr(self, "set_" + key)(val)
def copy(self):
"""Return a deep copy of self"""
return FontProperties(_init=self)
def ttfdict_to_fnames(d):
"""
flatten a ttfdict to all the filenames it contains
"""
fnames = []
for named in six.itervalues(d):
for styled in six.itervalues(named):
for variantd in six.itervalues(styled):
for weightd in six.itervalues(variantd):
for stretchd in six.itervalues(weightd):
for fname in six.itervalues(stretchd):
fnames.append(fname)
return fnames
def pickle_dump(data, filename):
"""
Equivalent to pickle.dump(data, open(filename, 'w'))
but closes the file to prevent filehandle leakage.
"""
with open(filename, 'wb') as fh:
pickle.dump(data, fh)
def pickle_load(filename):
"""
Equivalent to pickle.load(open(filename, 'r'))
but closes the file to prevent filehandle leakage.
"""
with open(filename, 'rb') as fh:
data = pickle.load(fh)
return data
def _normalize_font_family(family):
if is_string_like(family):
family = [six.text_type(family)]
elif isinstance(family, Iterable):
family = [six.text_type(f) for f in family]
return family
class TempCache(object):
"""
A class to store temporary caches that are (a) not saved to disk
and (b) invalidated whenever certain font-related
rcParams---namely the family lookup lists---are changed or the
font cache is reloaded. This avoids the expensive linear search
through all fonts every time a font is looked up.
"""
# A list of rcparam names that, when changed, invalidated this
# cache.
invalidating_rcparams = (
'font.serif', 'font.sans-serif', 'font.cursive', 'font.fantasy',
'font.monospace')
def __init__(self):
self._lookup_cache = {}
self._last_rcParams = self.make_rcparams_key()
def make_rcparams_key(self):
return [id(fontManager)] + [
rcParams[param] for param in self.invalidating_rcparams]
def get(self, prop):
key = self.make_rcparams_key()
if key != self._last_rcParams:
self._lookup_cache = {}
self._last_rcParams = key
return self._lookup_cache.get(prop)
def set(self, prop, value):
key = self.make_rcparams_key()
if key != self._last_rcParams:
self._lookup_cache = {}
self._last_rcParams = key
self._lookup_cache[prop] = value
class FontManager(object):
"""
On import, the :class:`FontManager` singleton instance creates a
list of TrueType fonts based on the font properties: name, style,
variant, weight, stretch, and size. The :meth:`findfont` method
does a nearest neighbor search to find the font that most closely
matches the specification. If no good enough match is found, a
default font is returned.
"""
# Increment this version number whenever the font cache data
# format or behavior has changed and requires a existing font
# cache files to be rebuilt.
__version__ = 200
def __init__(self, size=None, weight='normal'):
self._version = self.__version__
self.__default_weight = weight
self.default_size = size
paths = [os.path.join(rcParams['datapath'], 'fonts', 'ttf'),
os.path.join(rcParams['datapath'], 'fonts', 'afm'),
os.path.join(rcParams['datapath'], 'fonts', 'pdfcorefonts')]
# Create list of font paths
for pathname in ['TTFPATH', 'AFMPATH']:
if pathname in os.environ:
ttfpath = os.environ[pathname]
if ttfpath.find(';') >= 0: #win32 style
paths.extend(ttfpath.split(';'))
elif ttfpath.find(':') >= 0: # unix style
paths.extend(ttfpath.split(':'))
else:
paths.append(ttfpath)
verbose.report('font search path %s'%(str(paths)))
# Load TrueType fonts and create font dictionary.
self.ttffiles = findSystemFonts(paths) + findSystemFonts()
self.defaultFamily = {
'ttf': 'DejaVu Sans',
'afm': 'Helvetica'}
self.defaultFont = {}
for fname in self.ttffiles:
verbose.report('trying fontname %s' % fname, 'debug')
if fname.lower().find('DejaVuSans.ttf')>=0:
self.defaultFont['ttf'] = fname
break
else:
# use anything
self.defaultFont['ttf'] = self.ttffiles[0]
self.ttflist = createFontList(self.ttffiles)
self.afmfiles = findSystemFonts(paths, fontext='afm') + \
findSystemFonts(fontext='afm')
self.afmlist = createFontList(self.afmfiles, fontext='afm')
if len(self.afmfiles):
self.defaultFont['afm'] = self.afmfiles[0]
else:
self.defaultFont['afm'] = None
def get_default_weight(self):
"""
Return the default font weight.
"""
return self.__default_weight
@staticmethod
def get_default_size():
"""
Return the default font size.
"""
return rcParams['font.size']
def set_default_weight(self, weight):
"""
Set the default font weight. The initial value is 'normal'.
"""
self.__default_weight = weight
def update_fonts(self, filenames):
"""
Update the font dictionary with new font files.
Currently not implemented.
"""
# !!!! Needs implementing
raise NotImplementedError
# Each of the scoring functions below should return a value between
# 0.0 (perfect match) and 1.0 (terrible match)
def score_family(self, families, family2):
"""
Returns a match score between the list of font families in
*families* and the font family name *family2*.
An exact match at the head of the list returns 0.0.
A match further down the list will return between 0 and 1.
No match will return 1.0.
"""
if not isinstance(families, (list, tuple)):
families = [families]
elif len(families) == 0:
return 1.0
family2 = family2.lower()
step = 1 / len(families)
for i, family1 in enumerate(families):
family1 = family1.lower()
if family1 in font_family_aliases:
if family1 in ('sans', 'sans serif'):
family1 = 'sans-serif'
options = rcParams['font.' + family1]
options = [x.lower() for x in options]
if family2 in options:
idx = options.index(family2)
return (i + (idx / len(options))) * step
elif family1 == family2:
# The score should be weighted by where in the
# list the font was found.
return i * step
return 1.0
def score_style(self, style1, style2):
"""
Returns a match score between *style1* and *style2*.
An exact match returns 0.0.
A match between 'italic' and 'oblique' returns 0.1.
No match returns 1.0.
"""
if style1 == style2:
return 0.0
elif style1 in ('italic', 'oblique') and \
style2 in ('italic', 'oblique'):
return 0.1
return 1.0
def score_variant(self, variant1, variant2):
"""
Returns a match score between *variant1* and *variant2*.
An exact match returns 0.0, otherwise 1.0.
"""
if variant1 == variant2:
return 0.0
else:
return 1.0
def score_stretch(self, stretch1, stretch2):
"""
Returns a match score between *stretch1* and *stretch2*.
The result is the absolute value of the difference between the
CSS numeric values of *stretch1* and *stretch2*, normalized
between 0.0 and 1.0.
"""
try:
stretchval1 = int(stretch1)
except ValueError:
stretchval1 = stretch_dict.get(stretch1, 500)
try:
stretchval2 = int(stretch2)
except ValueError:
stretchval2 = stretch_dict.get(stretch2, 500)
return abs(stretchval1 - stretchval2) / 1000.0
def score_weight(self, weight1, weight2):
"""
Returns a match score between *weight1* and *weight2*.
The result is the absolute value of the difference between the
CSS numeric values of *weight1* and *weight2*, normalized
between 0.0 and 1.0.
"""
try:
weightval1 = int(weight1)
except ValueError:
weightval1 = weight_dict.get(weight1, 500)
try:
weightval2 = int(weight2)
except ValueError:
weightval2 = weight_dict.get(weight2, 500)
return abs(weightval1 - weightval2) / 1000.0
def score_size(self, size1, size2):
"""
Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings(size1)
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72.0
def findfont(self, prop, fontext='ttf', directory=None,
fallback_to_default=True, rebuild_if_missing=True):
"""
Search the font list for the font that most closely matches
the :class:`FontProperties` *prop*.
:meth:`findfont` performs a nearest neighbor search. Each
font is given a similarity score to the target font
properties. The first font with the highest score is
returned. If no matches below a certain threshold are found,
the default font (usually DejaVu Sans) is returned.
`directory`, is specified, will only return fonts from the
given directory (or subdirectory of that directory).
The result is cached, so subsequent lookups don't have to
perform the O(n) nearest neighbor search.
If `fallback_to_default` is True, will fallback to the default
font family (usually "DejaVu Sans" or "Helvetica") if
the first lookup hard-fails.
See the `W3C Cascading Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation
for a description of the font finding algorithm.
"""
if not isinstance(prop, FontProperties):
prop = FontProperties(prop)
fname = prop.get_file()
if fname is not None:
verbose.report('findfont returning %s'%fname, 'debug')
return fname
if fontext == 'afm':
fontlist = self.afmlist
else:
fontlist = self.ttflist
if directory is None:
cached = _lookup_cache[fontext].get(prop)
if cached is not None:
return cached
best_score = 1e64
best_font = None
for font in fontlist:
if (directory is not None and
os.path.commonprefix([font.fname, directory]) != directory):
continue
# Matching family should have highest priority, so it is multiplied
# by 10.0
score = \
self.score_family(prop.get_family(), font.name) * 10.0 + \
self.score_style(prop.get_style(), font.style) + \
self.score_variant(prop.get_variant(), font.variant) + \
self.score_weight(prop.get_weight(), font.weight) + \
self.score_stretch(prop.get_stretch(), font.stretch) + \
self.score_size(prop.get_size(), font.size)
if score < best_score:
best_score = score
best_font = font
if score == 0:
break
if best_font is None or best_score >= 10.0:
if fallback_to_default:
warnings.warn(
'findfont: Font family %s not found. Falling back to %s' %
(prop.get_family(), self.defaultFamily[fontext]))
default_prop = prop.copy()
default_prop.set_family(self.defaultFamily[fontext])
return self.findfont(default_prop, fontext, directory, False)
else:
# This is a hard fail -- we can't find anything reasonable,
# so just return the DejuVuSans.ttf
warnings.warn(
'findfont: Could not match %s. Returning %s' %
(prop, self.defaultFont[fontext]),
UserWarning)
result = self.defaultFont[fontext]
else:
verbose.report(
'findfont: Matching %s to %s (%s) with score of %f' %
(prop, best_font.name, repr(best_font.fname), best_score))
result = best_font.fname
if not os.path.isfile(result):
if rebuild_if_missing:
verbose.report(
'findfont: Found a missing font file. Rebuilding cache.')
_rebuild()
return fontManager.findfont(
prop, fontext, directory, True, False)
else:
raise ValueError("No valid font could be found")
if directory is None:
_lookup_cache[fontext].set(prop, result)
return result
_is_opentype_cff_font_cache = {}
def is_opentype_cff_font(filename):
"""
Returns True if the given font is a Postscript Compact Font Format
Font embedded in an OpenType wrapper. Used by the PostScript and
PDF backends that can not subset these fonts.
"""
if os.path.splitext(filename)[1].lower() == '.otf':
result = _is_opentype_cff_font_cache.get(filename)
if result is None:
with open(filename, 'rb') as fd:
tag = fd.read(4)
result = (tag == b'OTTO')
_is_opentype_cff_font_cache[filename] = result
return result
return False
fontManager = None
_fmcache = None
get_font = lru_cache(64)(ft2font.FT2Font)
# The experimental fontconfig-based backend.
if USE_FONTCONFIG and sys.platform != 'win32':
import re
def fc_match(pattern, fontext):
fontexts = get_fontext_synonyms(fontext)
ext = "." + fontext
try:
pipe = subprocess.Popen(
['fc-match', '-s', '--format=%{file}\\n', pattern],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = pipe.communicate()[0]
except (OSError, IOError):
return None
# The bulk of the output from fc-list is ascii, so we keep the
# result in bytes and parse it as bytes, until we extract the
# filename, which is in sys.filesystemencoding().
if pipe.returncode == 0:
for fname in output.split(b'\n'):
try:
fname = six.text_type(fname, sys.getfilesystemencoding())
except UnicodeDecodeError:
continue
if os.path.splitext(fname)[1][1:] in fontexts:
return fname
return None
_fc_match_cache = {}
def findfont(prop, fontext='ttf'):
if not is_string_like(prop):
prop = prop.get_fontconfig_pattern()
cached = _fc_match_cache.get(prop)
if cached is not None:
return cached
result = fc_match(prop, fontext)
if result is None:
result = fc_match(':', fontext)
_fc_match_cache[prop] = result
return result
else:
_fmcache = None
cachedir = get_cachedir()
if cachedir is not None:
if six.PY3:
_fmcache = os.path.join(cachedir, 'fontList.py3k.cache')
else:
_fmcache = os.path.join(cachedir, 'fontList.cache')
fontManager = None
_lookup_cache = {
'ttf': TempCache(),
'afm': TempCache()
}
def _rebuild():
global fontManager
fontManager = FontManager()
if _fmcache:
with cbook.Locked(cachedir):
pickle_dump(fontManager, _fmcache)
verbose.report("generated new fontManager")
if _fmcache:
try:
fontManager = pickle_load(_fmcache)
if (not hasattr(fontManager, '_version') or
fontManager._version != FontManager.__version__):
_rebuild()
else:
fontManager.default_size = None
verbose.report("Using fontManager instance from %s" % _fmcache)
except cbook.Locked.TimeoutError:
raise
except:
_rebuild()
else:
_rebuild()
def findfont(prop, **kw):
global fontManager
font = fontManager.findfont(prop, **kw)
return font
| gpl-3.0 |
toastedcornflakes/scikit-learn | examples/neural_networks/plot_mnist_filters.py | 57 | 2195 | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# algorithm='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
algorithm='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
luo66/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
bsipocz/scikit-image | doc/examples/plot_blob.py | 18 | 2796 | """
==============
Blob Detection
==============
Blobs are bright on dark or dark on bright regions in an image. In
this example, blobs are detected using 3 algorithms. The image used
in this case is the Hubble eXtreme Deep Field. Each bright dot in the
image is a star or a galaxy.
Laplacian of Gaussian (LoG)
-----------------------------
This is the most accurate and slowest approach. It computes the Laplacian
of Gaussian images with successively increasing standard deviation and
stacks them up in a cube. Blobs are local maximas in this cube. Detecting
larger blobs is especially slower because of larger kernel sizes during
convolution. Only bright blobs on dark backgrounds are detected. See
:py:meth:`skimage.feature.blob_log` for usage.
Difference of Gaussian (DoG)
----------------------------
This is a faster approximation of LoG approach. In this case the image is
blurred with increasing standard deviations and the difference between
two successively blurred images are stacked up in a cube. This method
suffers from the same disadvantage as LoG approach for detecting larger
blobs. Blobs are again assumed to be bright on dark. See
:py:meth:`skimage.feature.blob_dog` for usage.
Determinant of Hessian (DoH)
----------------------------
This is the fastest approach. It detects blobs by finding maximas in the
matrix of the Determinant of Hessian of the image. The detection speed is
independent of the size of blobs as internally the implementation uses
box filters instead of convolutions. Bright on dark as well as dark on
bright blobs are detected. The downside is that small blobs (<3px) are not
detected accurately. See :py:meth:`skimage.feature.blob_doh` for usage.
"""
from matplotlib import pyplot as plt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from math import sqrt
from skimage.color import rgb2gray
image = data.hubble_deep_field()[0:500, 0:500]
image_gray = rgb2gray(image)
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)
blobs_list = [blobs_log, blobs_dog, blobs_doh]
colors = ['yellow', 'lime', 'red']
titles = ['Laplacian of Gaussian', 'Difference of Gaussian',
'Determinant of Hessian']
sequence = zip(blobs_list, colors, titles)
for blobs, color, title in sequence:
fig, ax = plt.subplots(1, 1)
ax.set_title(title)
ax.imshow(image, interpolation='nearest')
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)
ax.add_patch(c)
plt.show()
| bsd-3-clause |
lepmik/nest-simulator | examples/nest/plot_tsodyks_depr_fac.py | 17 | 1135 | # -*- coding: utf-8 -*-
#
# plot_tsodyks_depr_fac.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from scipy import *
from matplotlib.pylab import *
from matplotlib.mlab import *
def plot_spikes():
dt = 0.1 # time resolution
nbins = 1000
N = 500 # number of neurons
vm = load('voltmeter-4-0.dat')
figure(1)
clf()
plot(vm[:, 0], vm[:, 1], 'r')
xlabel('time / ms')
ylabel('$V_m [mV]$')
savefig('test_tsodyks_depressing.png')
plot_spikes()
show()
| gpl-2.0 |
Adai0808/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
andrewnc/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 71 | 25104 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
sinkpoint/dipy | doc/examples/restore_dti.py | 10 | 7653 | """
=====================================================
Using the RESTORE algorithm for robust tensor fitting
=====================================================
The diffusion tensor model takes into account certain kinds of noise (thermal),
but not other kinds, such as "physiological" noise. For example, if a subject
moves during the acquisition of one of the diffusion-weighted samples, this
might have a substantial effect on the parameters of the tensor fit calculated
in all voxels in the brain for that subject. One of the pernicious consequences
of this is that it can lead to wrong interpretation of group differences. For
example, some groups of participants (e.g. young children, patient groups,
etc.) are particularly prone to motion and differences in tensor parameters and
derived statistics (such as FA) due to motion would be confounded with actual
differences in the physical properties of the white matter. An example of this
was shown in a paper by Yendiki et al. [1]_.
One of the strategies to deal with this problem is to apply an automatic method
for detecting outliers in the data, excluding these outliers and refitting the
model without the presence of these outliers. This is often referred to as
"robust model fitting". One of the common algorithms for robust tensor fitting
is called RESTORE, and was first proposed by Chang et al. [2]_.
In the following example, we will demonstrate how to use RESTORE on a simulated
dataset, which we will corrupt by adding intermittent noise.
We start by importing a few of the libraries we will use.
"""
import numpy as np
import nibabel as nib
"""
The module ``dipy.reconst.dti`` contains the implementation of tensor fitting,
including an implementation of the RESTORE algorithm.
"""
import dipy.reconst.dti as dti
reload(dti)
"""
``dipy.data`` is used for small datasets that we use in tests and examples.
"""
import dipy.data as dpd
"""
``dipy.viz.fvtk`` is used for 3D visualization and matplotlib for 2D
visualizations:
"""
import dipy.viz.fvtk as fvtk
import matplotlib.pyplot as plt
"""
If needed, the fetch_stanford_hardi function will download the raw dMRI dataset
of a single subject. The size of this dataset is 87 MBytes. You only need to
fetch once.
"""
dpd.fetch_stanford_hardi()
img, gtab = dpd.read_stanford_hardi()
"""
We initialize a DTI model class instance using the gradient table used in the
measurement. By default, dti.Tensor model will use a weighted least-squares
algorithm (described in [2]_) to fit the parameters of the model. We initialize
this model as a baseline for comparison of noise-corrupted models:
"""
dti_wls = dti.TensorModel(gtab)
"""
For the purpose of this example, we will focus on the data from a region of
interest (ROI) surrounding the Corpus Callosum. We define that ROI as the
following indices:
"""
roi_idx = (slice(20,50), slice(55,85), slice(38,39))
"""
And use them to index into the data:
"""
data = img.get_data()[roi_idx]
"""
This dataset is not very noisy, so we will artificially corrupt it to simulate
the effects of "physiological" noise, such as subject motion. But first, let's
establish a baseline, using the data as it is:
"""
fit_wls = dti_wls.fit(data)
fa1 = fit_wls.fa
evals1 = fit_wls.evals
evecs1 = fit_wls.evecs
cfa1 = dti.color_fa(fa1, evecs1)
sphere = dpd.get_sphere('symmetric724')
"""
We visualize the ODFs in the ROI using fvtk:
"""
ren = fvtk.ren()
fvtk.add(ren, fvtk.tensor(evals1, evecs1, cfa1, sphere))
print('Saving illustration as tensor_ellipsoids_wls.png')
fvtk.record(ren, n_frames=1, out_path='tensor_ellipsoids_wls.png',
size=(600, 600))
"""
.. figure:: tensor_ellipsoids_wls.png
:align: center
**Tensor Ellipsoids**.
"""
fvtk.clear(ren)
"""
Next, we corrupt the data with some noise. To simulate a subject that moves
intermittently, we will replace a few of the images with a very low signal
"""
noisy_data = np.copy(data)
noisy_idx = slice(-10, None) # The last 10 volumes are corrupted
noisy_data[..., noisy_idx] = 1.0
"""
We use the same model to fit this noisy data
"""
fit_wls_noisy = dti_wls.fit(noisy_data)
fa2 = fit_wls_noisy.fa
evals2 = fit_wls_noisy.evals
evecs2 = fit_wls_noisy.evecs
cfa2 = dti.color_fa(fa2, evecs2)
ren = fvtk.ren()
fvtk.add(ren, fvtk.tensor(evals2, evecs2, cfa2, sphere))
print('Saving illustration as tensor_ellipsoids_wls_noisy.png')
fvtk.record(ren, n_frames=1, out_path='tensor_ellipsoids_wls_noisy.png',
size=(600, 600))
"""
In places where the tensor model is particularly sensitive to noise, the
resulting tensor field will be distorted
.. figure:: tensor_ellipsoids_wls_noisy.png
:align: center
**Tensor Ellipsoids from noisy data**.
To estimate the parameters from the noisy data using RESTORE, we need to
estimate what would be a reasonable amount of noise to expect in the
measurement. To do that, we use the `dipy.denoise.noise_estimate` module:
"""
import dipy.denoise.noise_estimate as ne
sigma = ne.estimate_sigma(data)
"""
This estimate of the standard deviation will be used by the RESTORE algorithm
to identify the outliers in each voxel and is given as an input when
initializing the TensorModel object:
"""
dti_restore = dti.TensorModel(gtab,fit_method='RESTORE', sigma=sigma)
fit_restore_noisy = dti_restore.fit(noisy_data)
fa3 = fit_restore_noisy.fa
evals3 = fit_restore_noisy.evals
evecs3 = fit_restore_noisy.evecs
cfa3 = dti.color_fa(fa3, evecs3)
ren = fvtk.ren()
fvtk.add(ren, fvtk.tensor(evals3, evecs3, cfa3, sphere))
print('Saving illustration as tensor_ellipsoids_restore_noisy.png')
fvtk.record(ren, n_frames=1, out_path='tensor_ellipsoids_restore_noisy.png',
size=(600, 600))
"""
.. figure:: tensor_ellipsoids_restore_noisy.png
:align: center
**Tensor Ellipsoids from noisy data recovered with RESTORE**.
The tensor field looks rather restored to its noiseless state in this
image, but to convince ourselves further that this did the right thing, we will
compare the distribution of FA in this region relative to the baseline, using
the RESTORE estimate and the WLS estimate.
"""
fig_hist, ax = plt.subplots(1)
ax.hist(np.ravel(fa2), color='b', histtype='step', label='WLS')
ax.hist(np.ravel(fa3), color='r', histtype='step', label='RESTORE')
ax.hist(np.ravel(fa1), color='g', histtype='step', label='Original')
ax.set_xlabel('Fractional Anisotropy')
ax.set_ylabel('Count')
plt.legend()
fig_hist.savefig('dti_fa_distributions.png')
"""
.. figure:: dti_fa_distributions.png
:align: center
This demonstrates that RESTORE can recover a distribution of FA that more
closely resembles the baseline distribution of the noiseless signal, and
demonstrates the utility of the method to data with intermittent
noise. Importantly, this method assumes that the tensor is a good
representation of the diffusion signal in the data. If you have reason to
believe this is not the case (for example, you have data with very high b
values and you are particularly interested in locations in the brain in which
fibers cross), you might want to use a different method to fit your data.
References
----------
.. [1] Yendiki, A, Koldewynb, K, Kakunooria, S, Kanwisher, N, and Fischl,
B. (2013). Spurious group differences due to head motion in a diffusion
MRI study. Neuroimage.
.. [2] Chang, L-C, Jones, DK and Pierpaoli, C (2005). RESTORE: robust estimation
of tensors by outlier rejection. MRM, 53: 1088-95.
.. [3] Chung, SW, Lu, Y, Henry, R-G, (2006). Comparison of bootstrap
approaches for estimation of uncertainties of DTI parameters.
NeuroImage 33, 531-541.
.. include:: ../links_names.inc
"""
| bsd-3-clause |
meduz/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 55 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
azjps/bokeh | examples/models/external_resources.py | 1 | 2289 | """ The LaTex example was derived from: http://matplotlib.org/users/usetex.html
"""
from bokeh.core.properties import Override
from bokeh.models import Label
from bokeh.plotting import output_file, figure, show
import numpy as np
output_file('external_resources.html')
class LatexLabel(Label):
"""A subclass of `Label` with all of the same class attributes except
canvas mode isn't supported and DOM manipulation happens in the coffeescript
superclass implementation that requires setting `render_mode='css'`).
Only the render method of LabelView is overwritten to perform the
text -> latex (via katex) conversion
"""
__javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.js"]
__css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.css"]
__implementation__ = """
import {Label, LabelView} from "models/annotations/label"
export class LatexLabelView extends LabelView
render: () ->
ctx = @plot_view.canvas_view.ctx
# Here because AngleSpec does units tranform and label doesn't support specs
switch @model.angle_units
when "rad" then angle = -1 * @model.angle
when "deg" then angle = -1 * @model.angle * Math.PI/180.0
if @model.x_units == "data"
vx = @xmapper.map_to_target(@model.x)
else
vx = @model.x
sx = @canvas.vx_to_sx(vx)
if @model.y_units == "data"
vy = @ymapper.map_to_target(@model.y)
else
vy = @model.y
sy = @canvas.vy_to_sy(vy)
if @model.panel?
panel_offset = @_get_panel_offset()
sx += panel_offset.x
sy += panel_offset.y
latex = katex.renderToString(@model.text, {displayMode: true})
@_css_text(ctx, latex, sx + @model.x_offset, sy - @model.y_offset, angle)
export class LatexLabel extends Label
type: 'LatexLabel'
default_view: LatexLabelView
"""
x = np.arange(0.0, 1.0 + 0.01, 0.01)
y = np.cos(2*2*np.pi*x) + 2
p = figure(title="LaTex Demonstration", width=500, height=500)
p.line(x, y)
latex = LatexLabel(text="f = \sum_{n=1}^\infty\\frac{-e^{i\pi}}{2^n}!",
x=35, y=445, x_units='screen', y_units='screen',
render_mode='css', text_font_size='16pt',
background_fill_color='#ffffff')
p.add_layout(latex)
show(p)
| bsd-3-clause |
AndresYague/Snuppat | output/TDUTables/plotC13Profiles.py | 1 | 3637 | import sys
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def main():
if len(sys.argv) < 2:
print("Usage: python {} profile1 <profile2 ...>".format(sys.argv[0]))
return 1
subplotNum = len(sys.argv[1:])
fig = plt.figure()
colors = ["b-", "g-", "y-"]
labels = ["$\omega = 0.10$", "$\omega = 0.12$", "$\omega = 0.14$"]
# Plot all the profiles
ii = 0; maxSaveMass = 0
for arch in sys.argv[1:]:
# Read event
fread = open(arch, "r")
# Add subplot
ii += 1
ax = fig.add_subplot(subplotNum, 1, ii)
if ii == 1:
ax.text(0.0020, 0.02, "4M$_\odot$", fontsize = 16)
lastMass = 0
maxProfileHeight = 0
mass = []; profile = []
maxProfile = []
c13Count = 0; c13TotMass = 0
while True:
line = fread.readline()
label = arch
if "#" not in line and len(line) > 0:
lnlst = line.split()
if len(lnlst) == 0:
continue
mass.append(float(lnlst[0]))
profile.append(float(lnlst[1]))
elif len(mass) > 0:
# Count one c13 pocket more
c13Count += 1
# Add the mass to the maximum
c13TotMass += max(mass) - min(mass)
# Shift mass so profiles are consecutive
mass0 = mass[0]
mass = [x - mass0 + lastMass for x in mass]
# Plot
ax.plot(mass, profile, colors[ii - 1], lw = 2,
label = labels[ii - 1])
# Modify lastMass
lastMass += mass[-1] - mass[0]
# Restart variables
saveMass = mass; mass = []
maxProfileHeight = 0
maxProfile = []; profile = []
# Exit if last line
if len(line) == 0:
break
mass = []; profile = []
if max(saveMass) > maxSaveMass:
maxSaveMass = max(saveMass)
ax.yaxis.set_major_locator(ticker.MultipleLocator(1e-2))
ax.xaxis.set_major_locator(ticker.MaxNLocator(prune = "both"))
# Make labels invisible if ii < maximum:
if ii < len(sys.argv[1:]):
[label.set_visible(False) for label in ax.get_xticklabels()]
ax.set_ylim([0, ax.get_ylim()[1]])
ax.legend(prop = {"size": 12})
# Prune the yaxis except for last plot
yLim = ax.get_ylim()
yticks = list(ax.get_yticks())
yticks = [x for x in yticks if x >= yLim[0] and x <= yLim[1]*1.1]
if ii < len(sys.argv[1:]):
yticks.pop(0)
ax.set_yticks(yticks)
fread.close()
# Print the average size
if c13Count > 0:
avgMass = c13TotMass/c13Count
print("# Average pocket mass in {} = {}".format(arch, avgMass))
print("")
# Fix axes
ii = 0
for axi in fig.axes:
axi.set_xlim([0, maxSaveMass*1.1])
# Set central ylabel
if ii == 1:
axi.set_ylabel("Effective $^{{13}}$C mass fraction", size = 12)
ii += 1
fig.subplots_adjust(hspace = 0)
ax.set_xlabel("Mass (M$_\odot$)", size = 12)
plt.show()
if __name__ == "__main__":
main()
| mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/mpl_toolkits/axisartist/clip_path.py | 2 | 4750 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import numpy as np
from math import degrees
import math
import warnings
def atan2(dy, dx):
if dx == 0 and dy == 0:
warnings.warn("dx and dy is 0")
return 0
else:
return math.atan2(dy, dx)
# FIXME : The current algorithm seems to return incorrect angle when the line
# ends at the boundary.
def clip(xlines, ylines, x0, clip="right", xdir=True, ydir=True):
clipped_xlines = []
clipped_ylines = []
_pos_angles = []
if xdir:
xsign = 1
else:
xsign = -1
if ydir:
ysign = 1
else:
ysign = -1
for x, y in zip(xlines, ylines):
if clip in ["up", "right"]:
b = (x < x0).astype("i")
db = b[1:] - b[:-1]
else:
b = (x > x0).astype("i")
db = b[1:] - b[:-1]
if b[0]:
ns = 0
else:
ns = -1
segx, segy = [], []
for (i,) in np.argwhere(db!=0):
c = db[i]
if c == -1:
dx = (x0 - x[i])
dy = (y[i+1] - y[i]) * (dx/ (x[i+1] - x[i]))
y0 = y[i] + dy
clipped_xlines.append(np.concatenate([segx, x[ns:i+1], [x0]]))
clipped_ylines.append(np.concatenate([segy, y[ns:i+1], [y0]]))
ns = -1
segx, segy = [], []
if dx == 0. and dy == 0:
dx = x[i+1] - x[i]
dy = y[i+1] - y[i]
a = degrees(atan2(ysign*dy, xsign*dx))
_pos_angles.append((x0, y0, a))
elif c == 1:
dx = (x0 - x[i])
dy = (y[i+1] - y[i]) * (dx / (x[i+1] - x[i]))
y0 = y[i] + dy
segx, segy = [x0], [y0]
ns = i+1
if dx == 0. and dy == 0:
dx = x[i+1] - x[i]
dy = y[i+1] - y[i]
a = degrees(atan2(ysign*dy, xsign*dx))
_pos_angles.append((x0, y0, a))
#print x[i], x[i+1]
if ns != -1:
clipped_xlines.append(np.concatenate([segx, x[ns:]]))
clipped_ylines.append(np.concatenate([segy, y[ns:]]))
#clipped_pos_angles.append(_pos_angles)
return clipped_xlines, clipped_ylines, _pos_angles
def clip_line_to_rect(xline, yline, bbox):
x0, y0, x1, y1 = bbox.extents
xdir = x1 > x0
ydir = y1 > y0
if x1 > x0:
lx1, ly1, c_right_ = clip([xline], [yline], x1, clip="right", xdir=xdir, ydir=ydir)
lx2, ly2, c_left_ = clip(lx1, ly1, x0, clip="left", xdir=xdir, ydir=ydir)
else:
lx1, ly1, c_right_ = clip([xline], [yline], x0, clip="right", xdir=xdir, ydir=ydir)
lx2, ly2, c_left_ = clip(lx1, ly1, x1, clip="left", xdir=xdir, ydir=ydir)
if y1 > y0:
ly3, lx3, c_top_ = clip(ly2, lx2, y1, clip="right", xdir=ydir, ydir=xdir)
ly4, lx4, c_bottom_ = clip(ly3, lx3, y0, clip="left", xdir=ydir, ydir=xdir)
else:
ly3, lx3, c_top_ = clip(ly2, lx2, y0, clip="right", xdir=ydir, ydir=xdir)
ly4, lx4, c_bottom_ = clip(ly3, lx3, y1, clip="left", xdir=ydir, ydir=xdir)
# lx1, ly1, c_right_ = clip([xline], [yline], x1, clip="right")
# lx2, ly2, c_left_ = clip(lx1, ly1, x0, clip="left")
# ly3, lx3, c_top_ = clip(ly2, lx2, y1, clip="right")
# ly4, lx4, c_bottom_ = clip(ly3, lx3, y0, clip="left")
#c_left = [((x, y), (a+90)%180-180) for (x, y, a) in c_left_ \
# if bbox.containsy(y)]
c_left = [((x, y), (a+90)%180-90) for (x, y, a) in c_left_ \
if bbox.containsy(y)]
c_bottom = [((x, y), (90 - a)%180) for (y, x, a) in c_bottom_ \
if bbox.containsx(x)]
c_right = [((x, y), (a+90)%180+90) for (x, y, a) in c_right_ \
if bbox.containsy(y)]
c_top = [((x, y), (90 - a)%180+180) for (y, x, a) in c_top_ \
if bbox.containsx(x)]
return list(zip(lx4, ly4)), [c_left, c_bottom, c_right, c_top]
if __name__ == "__main__":
import matplotlib.pyplot as plt
x = np.array([-3, -2, -1, 0., 1, 2, 3, 2, 1, 0, -1, -2, -3, 5])
#x = np.array([-3, -2, -1, 0., 1, 2, 3])
y = np.arange(len(x))
#x0 = 2
plt.plot(x, y, lw=1)
from matplotlib.transforms import Bbox
bb = Bbox.from_extents(-2, 3, 2, 12.5)
lxy, ticks = clip_line_to_rect(x, y, bb)
for xx, yy in lxy:
plt.plot(xx, yy, lw=1, color="g")
ccc = iter(["ro", "go", "rx", "bx"])
for ttt in ticks:
cc = six.next(ccc)
for (xx, yy), aa in ttt:
plt.plot([xx], [yy], cc)
#xlim(
| mit |
trondeau/gnuradio | gr-digital/examples/snr_estimators.py | 46 | 6348 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import sys
try:
import scipy
from scipy import stats
except ImportError:
print "Error: Program requires scipy (www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires Matplotlib (matplotlib.sourceforge.net)."
sys.exit(1)
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from optparse import OptionParser
from gnuradio.eng_option import eng_option
'''
This example program uses Python and GNU Radio to calculate SNR of a
noise BPSK signal to compare them.
For an explination of the online algorithms, see:
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Higher-order_statistics
'''
def online_skewness(data):
n = 0
mean = 0
M2 = 0
M3 = 0
for n in xrange(len(data)):
delta = data[n] - mean
delta_n = delta / (n+1)
term1 = delta * delta_n * n
mean = mean + delta_n
M3 = M3 + term1 * delta_n * (n - 1) - 3 * delta_n * M2
M2 = M2 + term1
return scipy.sqrt(len(data))*M3 / scipy.power(M2, 3.0/2.0);
def snr_est_simple(signal):
s = scipy.mean(abs(signal)**2)
n = 2*scipy.var(abs(signal))
snr_rat = s/n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_skew(signal):
y1 = scipy.mean(abs(signal))
y2 = scipy.mean(scipy.real(signal**2))
y3 = (y1*y1 - y2)
y4 = online_skewness(signal.real)
#y4 = stats.skew(abs(signal.real))
skw = y4*y4 / (y2*y2*y2);
s = y1*y1
n = 2*(y3 + skw*s)
snr_rat = s / n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_m2m4(signal):
M2 = scipy.mean(abs(signal)**2)
M4 = scipy.mean(abs(signal)**4)
snr_rat = scipy.sqrt(2*M2*M2 - M4) / (M2 - scipy.sqrt(2*M2*M2 - M4))
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_svr(signal):
N = len(signal)
ssum = 0
msum = 0
for i in xrange(1, N):
ssum += (abs(signal[i])**2)*(abs(signal[i-1])**2)
msum += (abs(signal[i])**4)
savg = (1.0/(float(N)-1.0))*ssum
mavg = (1.0/(float(N)-1.0))*msum
beta = savg / (mavg - savg)
snr_rat = ((beta - 1) + scipy.sqrt(beta*(beta-1)))
return 10.0*scipy.log10(snr_rat), snr_rat
def main():
gr_estimators = {"simple": digital.SNR_EST_SIMPLE,
"skew": digital.SNR_EST_SKEW,
"m2m4": digital.SNR_EST_M2M4,
"svr": digital.SNR_EST_SVR}
py_estimators = {"simple": snr_est_simple,
"skew": snr_est_skew,
"m2m4": snr_est_m2m4,
"svr": snr_est_svr}
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Set the number of samples to process [default=%default]")
parser.add_option("", "--snr-min", type="float", default=-5,
help="Minimum SNR [default=%default]")
parser.add_option("", "--snr-max", type="float", default=20,
help="Maximum SNR [default=%default]")
parser.add_option("", "--snr-step", type="float", default=0.5,
help="SNR step amount [default=%default]")
parser.add_option("-t", "--type", type="choice",
choices=gr_estimators.keys(), default="simple",
help="Estimator type {0} [default=%default]".format(
gr_estimators.keys()))
(options, args) = parser.parse_args ()
N = options.nsamples
xx = scipy.random.randn(N)
xy = scipy.random.randn(N)
bits =2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1
#bits =(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1) + \
# 1j*(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1)
snr_known = list()
snr_python = list()
snr_gr = list()
# when to issue an SNR tag; can be ignored in this example.
ntag = 10000
n_cpx = xx + 1j*xy
py_est = py_estimators[options.type]
gr_est = gr_estimators[options.type]
SNR_min = options.snr_min
SNR_max = options.snr_max
SNR_step = options.snr_step
SNR_dB = scipy.arange(SNR_min, SNR_max+SNR_step, SNR_step)
for snr in SNR_dB:
SNR = 10.0**(snr/10.0)
scale = scipy.sqrt(2*SNR)
yy = bits + n_cpx/scale
print "SNR: ", snr
Sknown = scipy.mean(yy**2)
Nknown = scipy.var(n_cpx/scale)
snr0 = Sknown/Nknown
snr0dB = 10.0*scipy.log10(snr0)
snr_known.append(float(snr0dB))
snrdB, snr = py_est(yy)
snr_python.append(snrdB)
gr_src = blocks.vector_source_c(bits.tolist(), False)
gr_snr = digital.mpsk_snr_est_cc(gr_est, ntag, 0.001)
gr_chn = channels.channel_model(1.0/scale)
gr_snk = blocks.null_sink(gr.sizeof_gr_complex)
tb = gr.top_block()
tb.connect(gr_src, gr_chn, gr_snr, gr_snk)
tb.run()
snr_gr.append(gr_snr.snr())
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(SNR_dB, snr_known, "k-o", linewidth=2, label="Known")
s1.plot(SNR_dB, snr_python, "b-o", linewidth=2, label="Python")
s1.plot(SNR_dB, snr_gr, "g-o", linewidth=2, label="GNU Radio")
s1.grid(True)
s1.set_title('SNR Estimators')
s1.set_xlabel('SNR (dB)')
s1.set_ylabel('Estimated SNR')
s1.legend()
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.plot(yy.real, yy.imag, 'o')
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
yaukwankiu/armor | tests/modifiedMexicanHatTest17_3d.py | 1 | 5593 | thisScript = "modifiedMexicanHatTest17_3d.py"
import pickle, os, shutil, time
from armor import pattern
from armor import defaultParameters as dp
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
root = dp.rootFolder
timeString = str(int(time.time()))
######################################################################
#
# 1
#dataSource = "Numerical_Spectrum_for_March2014_Rainband_WRF"
#inputFolder = root+ "labLogs/2014-5-14-modifiedMexicanHatTest13_2/"
# 2
#dataSource = "Numerical_Spectrum_for_March2014_Rainband"
#inputFolder = root+ "labLogs/2014-5-13-modifiedMexicanHatTest13/"
# 3
#dataSource = "Numerical_Spectrum_for_Typhoon_Kong-Rey"
#inputFolder = root+ "labLogs/2014-5-7-modifiedMexicanHatTest10/"
# 4
#dataSource = "Numerical_Spectrum_for_Typhoon_Kong-Rey_WRF"
#inputFolder = root+ "labLogs/2014-5-7-modifiedMexicanHatTest9/"
############
# 2014-05-26
# 1
dataSource = "Numerical_Spectrum_for_Typhoon_Kong-Rey_WRF"
inputFolder = root + "labLogs/2014-5-26-modifiedMexicanHatTest17_kongreywrf/"
# 2
#dataSource = "Numerical_Spectrum_for_Typhoon_Kong-Rey_RADAR"
#inputFolder = root + "labLogs/2014-5-26-modifiedMexicanHatTest17_kongreycompref/"
# 3
#dataSource = "Numerical_Spectrum_for_Kong-Rey_COMPREF-sigmaPower2"
#inputFolder = root + "labLogs/2014-5-26-modifiedMexicanHatTest10/"
#dataSource = "Numerical_Spectrum_for_Kong-Rey_COMPREF-sigmaPreprocessing10"
#inputFolder = root + "labLogs/2014-5-16-modifiedMexicanHatTest15_march2014/"
#inputFolder = root+ "labLogs/2014-5-19-modifiedMexicanHatTest15_march2014_sigmaPreprocessing2/"
#inputFolder = root+ "labLogs/2014-5-19-modifiedMexicanHatTest15_march2014_sigmaPreprocessing4/"
#dataSource = "Numerical_Spectrum_for_Kong-Rey_COMPREF-sigmaPreprocessing16"
#inputFolder = root+ "labLogs/2014-5-19-modifiedMexicanHatTest15_march2014_sigmaPreprocessing16/"
#
############################################################################
outputFolder = root+"labLogs/%d-%d-%d-modifiedMexicanHatTest17_%s/" % (dp.year, dp.month, dp.day, dataSource)
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
shutil.copyfile(root+"python/armor/tests/"+thisScript, outputFolder+thisScript)
open(outputFolder+thisScript,'a').write('\n# outputFolder:\n# ' + outputFolder)
L = os.listdir(inputFolder)
L = [v for v in L if ".pydump" in v and "responseImagesList" in v]
L = [inputFolder+v for v in L]
print len(L)
N = len(L)
## test/parameter setup
sigmas = []
for i in range(3):
responseImages = pickle.load(open(L[i],'r'))
M = responseImages[0]['matrix']
sigma = responseImages[0]['sigma']
height, width = M.shape
for j in range(len(responseImages)):
M = responseImages[j]['matrix']
M = M*(M>0)
sigma = responseImages[j]['sigma']
sigmas.append(sigma)
print j, sigma, '\t', M.min(), '\t', M.max()
print "sleeping 2 seconds"
time.sleep(2)
sigmas = sorted(list(set(sigmas)))
### end test/parameter setup
# makeing the 3d plot
X, Y = np.meshgrid(range(20), sigmas)
I, J = Y, X
Z = np.zeros(X.shape)
for i in range(len(L)):
responseImages = pickle.load(open(L[i],'r'))
M = responseImages[0]['matrix']
sigma = responseImages[0]['sigma']
z = np.zeros(X.shape)
for j in range(len(responseImages)):
M = responseImages[j]['matrix']
M = M*(M>0)
sigma = responseImages[j]['sigma']
print j, sigma, '\t', M.min(), '\t', M.max()
h = np.histogram(M, bins=20, range=(0,20))
z[j,:] = h[0]
Z += z
# making the chart
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, np.log2(Y), np.log2(Z), rstride=1, cstride=1) #key line
plt.title(dataSource+ " " + str(i) + "DBZ images\n"+\
"x-axis: response intensity(from 0 to 20)\n"+\
"y-axis: log_2(sigma)\n"+\
"z-axis: log_2(count)\n")
plt.xlabel('response intensity')
plt.ylabel('log2(sigma)')
# saving
fig.savefig(outputFolder+ "3d_numspec_plot_log2scale.png", dpi=200)
pickle.dump({"X": X, "Y":Y, "Z":Z}, open(outputFolder+'XYZ.pydump','w'))
#pickle.dump(fig,open(outputFolder+"fig.pydump","w")) #doesn't work
logFile = open(outputFolder+timeString+"logs.txt",'w')
logFile.write("i = " + str(i))
logFile.write("\noutputFolder = '" + str(outputFolder) + "'")
logFile.write("\ndataSource = '" + str(dataSource) + "'")
logFile.close()
print "time spent: ", time.time() - int(timeString)
fig.show()
open(outputFolder+thisScript,'a').write('\n\n outputFolder\n '+outputFolder)
"""
to see the final fig, go to the output folder, enter python interactive mode, and:
dataSource="" #<-- fix it yourself
inputFolder=outputFolder #<-- fix it yourself
import pickle
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
xyz = pickle.load(open(inputFolder+'XYZ.pydump','r'))
X = xyz['X']
Y = xyz['Y']
Z = xyz['Z']
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, np.log2(Y), np.log2(Z), rstride=1, cstride=1) #key line
plt.title(dataSource+ " " + str(i) + "DBZ images\n"+\
"x-axis: response intensity(from 0 to 20)\n"+\
"y-axis: log_2(sigma)\n"+\
"z-axis: log_2(count)\n")
plt.xlabel('response intensity')
plt.ylabel('log2(sigma)')
fig.show()
"""
print "outputFolder:", outputFolder
| cc0-1.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/mpl_toolkits/mplot3d/axis3d.py | 10 | 19421 | # axis3d.py, original mplot3d version by John Porter
# Created: 23 Sep 2005
# Parts rewritten by Reinier Heeres <[email protected]>
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
import copy
from matplotlib import lines as mlines, axis as maxis, \
patches as mpatches
from matplotlib import rcParams
from . import art3d
from . import proj3d
import numpy as np
def get_flip_min_max(coord, index, mins, maxs):
if coord[index] == mins[index]:
return maxs[index]
else:
return mins[index]
def move_from_center(coord, centers, deltas, axmask=(True, True, True)):
'''Return a coordinate that is moved by "deltas" away from the center.'''
coord = copy.copy(coord)
#print coord, centers, deltas, axmask
for i in range(3):
if not axmask[i]:
continue
if coord[i] < centers[i]:
coord[i] -= deltas[i]
else:
coord[i] += deltas[i]
return coord
def tick_update_position(tick, tickxs, tickys, labelpos):
'''Update tick line and label position and style.'''
for (label, on) in ((tick.label1, tick.label1On), \
(tick.label2, tick.label2On)):
if on:
label.set_position(labelpos)
tick.tick1On, tick.tick2On = True, False
tick.tick1line.set_linestyle('-')
tick.tick1line.set_marker('')
tick.tick1line.set_data(tickxs, tickys)
tick.gridline.set_data(0, 0)
class Axis(maxis.XAxis):
# These points from the unit cube make up the x, y and z-planes
_PLANES = (
(0, 3, 7, 4), (1, 2, 6, 5), # yz planes
(0, 1, 5, 4), (3, 2, 6, 7), # xz planes
(0, 1, 2, 3), (4, 5, 6, 7), # xy planes
)
# Some properties for the axes
_AXINFO = {
'x': {'i': 0, 'tickdir': 1, 'juggled': (1, 0, 2),
'color': (0.95, 0.95, 0.95, 0.5)},
'y': {'i': 1, 'tickdir': 0, 'juggled': (0, 1, 2),
'color': (0.90, 0.90, 0.90, 0.5)},
'z': {'i': 2, 'tickdir': 0, 'juggled': (0, 2, 1),
'color': (0.925, 0.925, 0.925, 0.5)},
}
def __init__(self, adir, v_intervalx, d_intervalx, axes, *args, **kwargs):
# adir identifies which axes this is
self.adir = adir
# data and viewing intervals for this direction
self.d_interval = d_intervalx
self.v_interval = v_intervalx
# This is a temporary member variable.
# Do not depend on this existing in future releases!
self._axinfo = self._AXINFO[adir].copy()
if rcParams['_internal.classic_mode']:
self._axinfo.update({'label':
{'va': 'center',
'ha': 'center'},
'tick':
{'inward_factor': 0.2,
'outward_factor': 0.1,
'linewidth': rcParams['lines.linewidth'],
'color': 'k'},
'axisline':
{'linewidth': 0.75,
'color': (0, 0, 0, 1)},
'grid' :
{'color': (0.9, 0.9, 0.9, 1),
'linewidth': 1.0,
'linestyle': '-'},
})
else:
self._axinfo.update({'label' :
{'va': 'center',
'ha': 'center'},
'tick' :
{'inward_factor': 0.2,
'outward_factor': 0.1,
'linewidth': rcParams.get(
adir + 'tick.major.width',
rcParams['xtick.major.width']),
'color': rcParams.get(
adir + 'tick.color',
rcParams['xtick.color'])},
'axisline':
{'linewidth': rcParams['axes.linewidth'],
'color': rcParams['axes.edgecolor']},
'grid' :
{'color': rcParams['grid.color'],
'linewidth': rcParams['grid.linewidth'],
'linestyle': rcParams['grid.linestyle']},
})
maxis.XAxis.__init__(self, axes, *args, **kwargs)
self.set_rotate_label(kwargs.get('rotate_label', None))
def init3d(self):
self.line = mlines.Line2D(xdata=(0, 0), ydata=(0, 0),
linewidth=self._axinfo['axisline']['linewidth'],
color=self._axinfo['axisline']['color'],
antialiased=True,
)
# Store dummy data in Polygon object
self.pane = mpatches.Polygon(np.array([[0,0], [0,1], [1,0], [0,0]]),
closed=False,
alpha=0.8,
facecolor=(1,1,1,0),
edgecolor=(1,1,1,0))
self.set_pane_color(self._axinfo['color'])
self.axes._set_artist_props(self.line)
self.axes._set_artist_props(self.pane)
self.gridlines = art3d.Line3DCollection([], )
self.axes._set_artist_props(self.gridlines)
self.axes._set_artist_props(self.label)
self.axes._set_artist_props(self.offsetText)
# Need to be able to place the label at the correct location
self.label._transform = self.axes.transData
self.offsetText._transform = self.axes.transData
def get_tick_positions(self):
majorLocs = self.major.locator()
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]
return majorLabels, majorLocs
def get_major_ticks(self, numticks=None):
ticks = maxis.XAxis.get_major_ticks(self, numticks)
for t in ticks:
t.tick1line.set_transform(self.axes.transData)
t.tick2line.set_transform(self.axes.transData)
t.gridline.set_transform(self.axes.transData)
t.label1.set_transform(self.axes.transData)
t.label2.set_transform(self.axes.transData)
return ticks
def set_pane_pos(self, xys):
xys = np.asarray(xys)
xys = xys[:,:2]
self.pane.xy = xys
self.stale = True
def set_pane_color(self, color):
'''Set pane color to a RGBA tuple'''
self._axinfo['color'] = color
self.pane.set_edgecolor(color)
self.pane.set_facecolor(color)
self.pane.set_alpha(color[-1])
self.stale = True
def set_rotate_label(self, val):
'''
Whether to rotate the axis label: True, False or None.
If set to None the label will be rotated if longer than 4 chars.
'''
self._rotate_label = val
self.stale = True
def get_rotate_label(self, text):
if self._rotate_label is not None:
return self._rotate_label
else:
return len(text) > 4
def _get_coord_info(self, renderer):
minx, maxx, miny, maxy, minz, maxz = self.axes.get_w_lims()
if minx > maxx:
minx, maxx = maxx, minx
if miny > maxy:
miny, maxy = maxy, miny
if minz > maxz:
minz, maxz = maxz, minz
mins = np.array((minx, miny, minz))
maxs = np.array((maxx, maxy, maxz))
centers = (maxs + mins) / 2.
deltas = (maxs - mins) / 12.
mins = mins - deltas / 4.
maxs = maxs + deltas / 4.
vals = mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]
tc = self.axes.tunit_cube(vals, renderer.M)
avgz = [tc[p1][2] + tc[p2][2] + tc[p3][2] + tc[p4][2] for \
p1, p2, p3, p4 in self._PLANES]
highs = np.array([avgz[2*i] < avgz[2*i+1] for i in range(3)])
return mins, maxs, centers, deltas, tc, highs
def draw_pane(self, renderer):
renderer.open_group('pane3d')
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
info = self._axinfo
index = info['i']
if not highs[index]:
plane = self._PLANES[2 * index]
else:
plane = self._PLANES[2 * index + 1]
xys = [tc[p] for p in plane]
self.set_pane_pos(xys)
self.pane.draw(renderer)
renderer.close_group('pane3d')
def draw(self, renderer):
self.label._transform = self.axes.transData
renderer.open_group('axis3d')
# code from XAxis
majorTicks = self.get_major_ticks()
majorLocs = self.major.locator()
info = self._axinfo
index = info['i']
# filter locations here so that no extra grid lines are drawn
locmin, locmax = self.get_view_interval()
if locmin > locmax:
locmin, locmax = locmax, locmin
# Rudimentary clipping
majorLocs = [loc for loc in majorLocs if
locmin <= loc <= locmax]
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i)
for i, val in enumerate(majorLocs)]
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
# Determine grid lines
minmax = np.where(highs, maxs, mins)
# Draw main axis line
juggled = info['juggled']
edgep1 = minmax.copy()
edgep1[juggled[0]] = get_flip_min_max(edgep1, juggled[0], mins, maxs)
edgep2 = edgep1.copy()
edgep2[juggled[1]] = get_flip_min_max(edgep2, juggled[1], mins, maxs)
pep = proj3d.proj_trans_points([edgep1, edgep2], renderer.M)
centpt = proj3d.proj_transform(centers[0], centers[1], centers[2], renderer.M)
self.line.set_data((pep[0][0], pep[0][1]), (pep[1][0], pep[1][1]))
self.line.draw(renderer)
# Grid points where the planes meet
xyz0 = []
for val in majorLocs:
coord = minmax.copy()
coord[index] = val
xyz0.append(coord)
# Draw labels
peparray = np.asanyarray(pep)
# The transAxes transform is used because the Text object
# rotates the text relative to the display coordinate system.
# Therefore, if we want the labels to remain parallel to the
# axis regardless of the aspect ratio, we need to convert the
# edge points of the plane to display coordinates and calculate
# an angle from that.
# TODO: Maybe Text objects should handle this themselves?
dx, dy = (self.axes.transAxes.transform([peparray[0:2, 1]]) -
self.axes.transAxes.transform([peparray[0:2, 0]]))[0]
lxyz = 0.5*(edgep1 + edgep2)
# A rough estimate; points are ambiguous since 3D plots rotate
ax_scale = self.axes.bbox.size / self.figure.bbox.size
ax_inches = np.multiply(ax_scale, self.figure.get_size_inches())
ax_points_estimate = sum(72. * ax_inches)
deltas_per_point = 48. / ax_points_estimate
default_offset = 21.
labeldeltas = (self.labelpad + default_offset) * deltas_per_point\
* deltas
axmask = [True, True, True]
axmask[index] = False
lxyz = move_from_center(lxyz, centers, labeldeltas, axmask)
tlx, tly, tlz = proj3d.proj_transform(lxyz[0], lxyz[1], lxyz[2], \
renderer.M)
self.label.set_position((tlx, tly))
if self.get_rotate_label(self.label.get_text()):
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.label.set_rotation(angle)
self.label.set_va(info['label']['va'])
self.label.set_ha(info['label']['ha'])
self.label.draw(renderer)
# Draw Offset text
# Which of the two edge points do we want to
# use for locating the offset text?
if juggled[2] == 2 :
outeredgep = edgep1
outerindex = 0
else :
outeredgep = edgep2
outerindex = 1
pos = copy.copy(outeredgep)
pos = move_from_center(pos, centers, labeldeltas, axmask)
olx, oly, olz = proj3d.proj_transform(pos[0], pos[1], pos[2], renderer.M)
self.offsetText.set_text( self.major.formatter.get_offset() )
self.offsetText.set_position( (olx, oly) )
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.offsetText.set_rotation(angle)
# Must set rotation mode to "anchor" so that
# the alignment point is used as the "fulcrum" for rotation.
self.offsetText.set_rotation_mode('anchor')
#-----------------------------------------------------------------------
# Note: the following statement for determining the proper alignment of
# the offset text. This was determined entirely by trial-and-error
# and should not be in any way considered as "the way". There are
# still some edge cases where alignment is not quite right, but
# this seems to be more of a geometry issue (in other words, I
# might be using the wrong reference points).
#
# (TT, FF, TF, FT) are the shorthand for the tuple of
# (centpt[info['tickdir']] <= peparray[info['tickdir'], outerindex],
# centpt[index] <= peparray[index, outerindex])
#
# Three-letters (e.g., TFT, FTT) are short-hand for the array
# of bools from the variable 'highs'.
# ---------------------------------------------------------------------
if centpt[info['tickdir']] > peparray[info['tickdir'], outerindex] :
# if FT and if highs has an even number of Trues
if (centpt[index] <= peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually, this means align right, except for the FTT case,
# in which offset for axis 1 and 2 are aligned left.
if highs.tolist() == [False, True, True] and index in (1, 2) :
align = 'left'
else :
align = 'right'
else :
# The FF case
align = 'left'
else :
# if TF and if highs has an even number of Trues
if (centpt[index] > peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually mean align left, except if it is axis 2
if index == 2 :
align = 'right'
else :
align = 'left'
else :
# The TT case
align = 'right'
self.offsetText.set_va('center')
self.offsetText.set_ha(align)
self.offsetText.draw(renderer)
# Draw grid lines
if len(xyz0) > 0:
# Grid points at end of one plane
xyz1 = copy.deepcopy(xyz0)
newindex = (index + 1) % 3
newval = get_flip_min_max(xyz1[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz1[i][newindex] = newval
# Grid points at end of the other plane
xyz2 = copy.deepcopy(xyz0)
newindex = (index + 2) % 3
newval = get_flip_min_max(xyz2[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz2[i][newindex] = newval
lines = list(zip(xyz1, xyz0, xyz2))
if self.axes._draw_grid:
self.gridlines.set_segments(lines)
self.gridlines.set_color([info['grid']['color']] * len(lines))
self.gridlines.set_linewidth(
[info['grid']['linewidth']] * len(lines))
self.gridlines.set_linestyle(
[info['grid']['linestyle']] * len(lines))
self.gridlines.draw(renderer, project=True)
# Draw ticks
tickdir = info['tickdir']
tickdelta = deltas[tickdir]
if highs[tickdir]:
ticksign = 1
else:
ticksign = -1
for tick, loc, label in zip(majorTicks, majorLocs, majorLabels):
if tick is None:
continue
# Get tick line positions
pos = copy.copy(edgep1)
pos[index] = loc
pos[tickdir] = edgep1[tickdir] + info['tick']['outward_factor'] * \
ticksign * tickdelta
x1, y1, z1 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
pos[tickdir] = edgep1[tickdir] - info['tick']['inward_factor'] * \
ticksign * tickdelta
x2, y2, z2 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
# Get position of label
default_offset = 8. # A rough estimate
labeldeltas = (tick.get_pad() + default_offset) * deltas_per_point\
* deltas
axmask = [True, True, True]
axmask[index] = False
pos[tickdir] = edgep1[tickdir]
pos = move_from_center(pos, centers, labeldeltas, axmask)
lx, ly, lz = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
tick_update_position(tick, (x1, x2), (y1, y2), (lx, ly))
tick.tick1line.set_linewidth(info['tick']['linewidth'])
tick.tick1line.set_color(info['tick']['color'])
tick.set_label1(label)
tick.set_label2(label)
tick.draw(renderer)
renderer.close_group('axis3d')
self.stale = False
def get_view_interval(self):
"""return the Interval instance for this 3d axis view limits"""
return self.v_interval
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.v_interval = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.v_interval = min(vmin, Vmin), max(vmax, Vmax)
# TODO: Get this to work properly when mplot3d supports
# the transforms framework.
def get_tightbbox(self, renderer) :
# Currently returns None so that Axis.get_tightbbox
# doesn't return junk info.
return None
# Use classes to look at different data limits
class XAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervalx
class YAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervaly
class ZAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.zz_dataLim.intervalx
| gpl-3.0 |
adamginsburg/APEX_CMZ_H2CO | reduction/fourier_baseline_analysis.py | 2 | 10122 | """
Perform fourier analysis (i.e., look for frequency peaks) on the PCA-extracted
Most Correlated Components
"""
from make_apex_cubes import june2013datapath, april2014path
import numpy as np
from astropy.io import fits
import os
import pylab as pl
import paths
import matplotlib
from astropy.convolution import convolve, convolve_fft, Gaussian1DKernel
import pyspeckit
matplotlib.rc_file(paths.pcpath('pubfiguresrc'))
def find_pca_files():
pass
def spline_removal(fn, linecen=1500, linewidth=20, lineamp=10):
sp = pyspeckit.Spectrum(fn)
sp.plotter()
sp.baseline(spline=True, subtract=False, spline_sampling=500, order=3)
bl1 = sp.baseline.basespec
synthline = lineamp*np.exp(-(sp.xarr-linecen)**2/(2*linewidth**2.))
sp2 = sp.copy()
sp2.data += synthline
sp2.plotter()
sp2.baseline.set_spectofit()
sp2.baseline(spline=True, subtract=False, spline_sampling=500, order=3)
return sp,sp2
def suppress_frange(data, frange, convinterp=True, width=1):
"""
Given a spectrum, interpolate across a specified frequency range.
Frequency is in 1/pixel units.
"""
ft = np.fft.fft(data)
freq = np.fft.fftfreq(data.size)
lmask = (abs(freq) < frange[1])
umask = (abs(freq) > frange[0])
mask = lmask & umask
# Select one side of the data
midpt = len(mask)/2
whmask, = np.where(mask)
startpt = whmask[whmask<midpt][0] - 1
endpt = whmask[whmask<midpt][-1] + 1
if endpt >= len(mask):
endpt = len(mask)-1
if convinterp:
mdata = ft.copy()
mdata[mask] = np.nan
kernel = Gaussian1DKernel(width, x_size=width*8+1)
amp = convolve(np.abs(mdata), kernel, boundary='extend')
phase = convolve(np.angle(mdata), kernel, boundary='extend')
interpdata = (np.cos(phase)*amp+1j*np.sin(phase)*amp)[mask]
ft[mask] = interpdata
else:
for order,compare in zip((-1,1),(np.less,np.greater_equal)):
mm = mask & compare(np.arange(len(mask), dtype='int'), midpt)
realdata = np.interp(np.arange(startpt+1, endpt),
[startpt,endpt],
[fff.real[startpt],fff.real[endpt]])
imagdata = np.interp(np.arange(startpt+1, endpt),
[startpt,endpt],
[fff.imag[startpt],fff.imag[endpt]])
fff[mm] = (realdata+1j*imagdata)[::order]
return np.fft.ifft(ft).real
def do_example_fsupp(interpolate=False, vrange=[100,300], **kwargs):
"""
Run a specific example on a specific extracted PCA component
"""
fn = os.path.join(june2013datapath,
'M-091.F-0019-2013-2013-06-12/AP-H201-X202_pca_component_0.fits')
ff = fits.open(fn)
e1, hdr = ff[0].data, ff[0].header
fourier_suppression(e1, hdr, vrange=vrange, save=True, linecen=-1500.,
interpolate=interpolate, **kwargs)
return fn
def fourier_suppression(e1, hdr, withline=False, vrange=[100,'max'], linewidth=20.,
linecen=250., save=False, suppression_factor=3,
amplitude=10,
interpolate=False, convinterp=True, smooth_width=2):
"""
Given a spectrum (fn), suppress a velocity range `vrange` by dividing it by
some factor. Then plot...
"""
vres = hdr['CDELT1']
ff = np.fft.fftfreq(e1.size)
fvr = e1.size*vres
fvelo = fvr/(ff/ff[1])/2.
fvelo[0] = fvr
velo = vres * (np.arange(e1.size)+1-hdr['CRPIX1'])+hdr['CRVAL1']
# Add a 20 km/s wide Gaussian line, see what happens to it
line = np.exp(-(velo+linecen)**2/(linewidth**2*2.)) * amplitude
e2 = line+e1
ft = np.fft.fft(e1)
ft2 = np.fft.fft(e2)
#ft[(abs(fvelo)>100) & (abs(fvelo<165))] /= abs(ft[(abs(fvelo)>100) & (abs(fvelo<165))])/1000
#ft[(abs(fvelo)>230) & (abs(fvelo<556))] /= abs(ft[(abs(fvelo)>230) & (abs(fvelo<556))])/1000
if 'max' in vrange:
vrange[1] = max(fvelo)+1
offset = 0
else:
offset = 10
lmask = (abs(fvelo) < vrange[1])
umask = (abs(fvelo) > vrange[0])
mask = lmask & umask
levelmask = (abs(fvelo) < vrange[0])
if interpolate:
midpt = len(mask)/2
whmask, = np.where(mask)
startpt = whmask[whmask<midpt][0] - 1
endpt = whmask[whmask<midpt][-1] + 1
if endpt >= len(mask):
endpt = len(mask)-1
for fff in (ft,ft2):
if convinterp:
mdata = fff.copy(); mdata[mask] = np.nan
#realdata = convolve(mdata.real, Gaussian1DKernel(1, x_size=51), boundary='extend')
#imagdata = convolve(mdata.imag, Gaussian1DKernel(1, x_size=51), boundary='extend')
#interpdata = (realdata[mask]+1j*imagdata[mask])
amp = convolve(np.abs(mdata), Gaussian1DKernel(smooth_width, x_size=51), boundary='extend')
phase = convolve(np.angle(mdata), Gaussian1DKernel(smooth_width, x_size=51), boundary='extend')
interpdata = (np.cos(phase)*amp+1j*np.sin(phase)*amp)[mask]
#pl.figure(5)
#pl.clf()
#ax1 = pl.subplot(3,1,1)
#ax1.plot(np.arange(1,30), fff.real[1:30])
#ax1.plot(np.arange(1,30), mdata.real[1:30])
#ax1.plot(whmask[:len(whmask)/2], interpdata.real[:len(whmask)/2],'--')
#ax2 = pl.subplot(3,1,2)
#ax2.plot(np.arange(1,30), fff.imag[1:30])
#ax2.plot(np.arange(1,30), mdata.imag[1:30])
#ax2.plot(whmask[:len(whmask)/2], interpdata.imag[:len(whmask)/2],'--')
#ax3 = pl.subplot(3,1,3)
#ax3.plot(np.arange(1,30), abs(fff)[1:30])
#ax3.plot(np.arange(1,30), abs(mdata)[1:30])
#ax3.plot(whmask[:len(whmask)/2], abs(interpdata[:len(whmask)/2]),'--')
fff[mask] = interpdata
#ax1.plot(np.arange(1,30), fff.real[1:30],':')
#ax2.plot(np.arange(1,30), fff.imag[1:30],':')
#ax3.plot(np.arange(1,30), abs(fff)[1:30],':')
else:
for order,compare in zip((-1,1),(np.less,np.greater_equal)):
mm = mask & compare(np.arange(len(mask), dtype='int'), midpt)
realdata = np.interp(np.arange(startpt+1, endpt),
[startpt,endpt],
[fff.real[startpt],fff.real[endpt]])
imagdata = np.interp(np.arange(startpt+1, endpt),
[startpt,endpt],
[fff.imag[startpt],fff.imag[endpt]])
fff[mm] = (realdata+1j*imagdata)[::order]
else:
level = max(abs(ft)[levelmask])/suppression_factor
level2 = max(abs(ft2)[levelmask])/suppression_factor
ft[(mask)] /= abs(ft[(mask)])/level
ft2[(mask)] /= abs(ft2[(mask)])/level2
ax1 = pl.figure(1).gca()
ax1.cla()
ax1.plot(velo, e1, linestyle='none', color='k', marker=',', label='Input')
ax1.plot(velo, np.fft.ifft(ft)+offset, alpha=0.8, linestyle='none',
marker=',', color='r', label='FT suppressed')
ax1.set_xlabel("Velocity")
ax1.set_ylabel("Brightness (K-ish)")
leg = ax1.legend(loc='best', markerscale=20)
for ll in leg.get_lines():
ll.set_marker('o')
ll.set_markersize(5)
if save:
pl.figure(1).savefig(paths.fpath('baselines/ft_suppression_spectra.png'))
ax4 = pl.figure(4).gca()
ax4.cla()
ax4.set_title("Synthetic {0} km/s line".format(linewidth))
ax4.plot(velo, e2-np.median(e2), linestyle='none', color='b', marker=',', zorder=-1,
alpha=0.5, label='Input')
ax4.plot(velo, np.fft.ifft(ft2)+offset-np.median(e2), alpha=0.5, linestyle='none',
marker=',', color='g', zorder=-1, label='FT Suppressed')
ax4.plot(velo, e2-np.fft.ifft(ft2)-offset, alpha=0.5, linestyle='none',
marker=',', color='r', zorder=-1, label='(a) Diff')
ax4.plot(velo, e1-np.fft.ifft(ft)-offset*2, alpha=0.5, linestyle='none',
marker=',', color='m', zorder=-1, label='(b) Diff with no synthetic line')
ax4.plot(velo, (e2-np.fft.ifft(ft2))-(e1-np.fft.ifft(ft))-offset*3, alpha=0.5, linestyle='none',
marker=',', color='k', zorder=-1, label='(a)-(b)')
ax4.plot(velo, line+offset, alpha=0.5,
linewidth=0.5, color='k', zorder=1, label='Synthetic Line')
ax4.set_xlabel("Velocity")
ax4.set_ylabel("Brightness (K-ish)")
leg = ax4.legend(loc='lower left', markerscale=20, fontsize=16)
for ll in leg.get_lines():
ll.set_marker('o')
ll.set_markersize(5)
if save:
pl.figure(4).savefig(paths.fpath('baselines/ft_suppression_spectra_synthline.png'))
ax4.axis([1300,1700,-35,21])
pl.figure(4).savefig(paths.fpath('baselines/ft_suppression_spectra_synthline_zoom.png'))
ax2 = pl.figure(2).gca()
ax2.cla()
ax2.loglog(fvelo, abs(np.fft.fft(e1)), linewidth=1, label='Input')
ax2.loglog(fvelo, abs(np.fft.fft(e2)), linewidth=1,
label='Synthetic {0} km/s line'.format(linewidth))
ax2.loglog(fvelo, abs(ft), linewidth=1, label='FT Suppressed (Input)')
ax2.loglog(fvelo, abs(ft2), linewidth=1, label='FT Suppressed (Synthetic)')
ax2.set_xlabel("Velocity Scale")
ax2.set_ylabel("$|FT(spectrum)|$")
ax2.legend(loc='best')
if save:
pl.figure(2).savefig(paths.fpath('baselines/ft_suppression_fourierpower.png'))
ax3 = pl.figure(3).gca()
ax3.cla()
ax3.plot(velo, e1-np.fft.ifft(ft), label='Original')
ax3.plot(velo, e2-np.fft.ifft(ft2), label='Synthetic {0} km/s line'.format(linewidth))
ax3.set_xlabel("Velocity")
ax3.set_ylabel("Difference (Input-FT_suppressed)")
leg = ax3.legend(loc='best', markerscale=20)
if save:
pl.figure(3).savefig(paths.fpath('baselines/ft_suppression_differences.png'))
pl.draw()
pl.show()
| bsd-3-clause |
datactive/bigbang | tests/unit/test_listserv.py | 1 | 7315 | import os
import tempfile
from pathlib import Path
from unittest import mock
import pytest
import yaml
import bigbang
from bigbang import listserv
from bigbang.listserv import (
ListservArchive,
ListservList,
ListservMessageParser,
)
from config.config import CONFIG
dir_temp = tempfile.gettempdir()
file_temp_mbox = dir_temp + "/listserv.mbox"
file_auth = CONFIG.config_path + "authentication.yaml"
auth_key_mock = {"username": "bla", "password": "bla"}
@pytest.fixture(name="mlist", scope="module")
def get_mailinglist():
mlist = ListservList.from_listserv_directories(
name="3GPP_TSG_SA_ITUT_AHG",
directorypaths=[
CONFIG.test_data_path + "3GPP/3GPP_TSG_SA_ITUT_AHG/"
],
)
return mlist
@pytest.fixture(name="msg_parser", scope="module")
def get_message_parser():
msg_parser = ListservMessageParser()
return msg_parser
@pytest.fixture(name="msg", scope="module")
def get_message(msg_parser):
file_path = CONFIG.test_data_path + \
"3GPP/3GPP_TSG_SA_ITUT_AHG/3GPP_TSG_SA_ITUT_AHG.LOG1705B"
msg = msg_parser.from_listserv_file(
list_name="3GPP_TSG_SA_ITUT_AHG",
file_path=file_path,
header_start_line_nr=1,
fields="total",
)
return msg
class TestListservMessageParser:
def test__first_message_header(self, msg):
assert msg["From"] == "Stephen Hayes <[email protected]>"
assert msg["Reply-To"] == "Stephen Hayes <[email protected]>"
assert (
msg["In-Reply-To"]
== "<[email protected]>"
)
assert msg["Date"] == "Mon, 08 May 2017 10:47:41 +0000"
def test__first_message_body(self, msg):
assert msg.get_payload().split("\n")[3] == "Hi,"
assert len(msg.get_payload()) == 24809
def test__to_pandas_dataframe(self, msg_parser, msg):
df = msg_parser.to_pandas_dataframe(msg)
assert len(df.columns.values) == 12
assert len(df.index.values) == 1
def test__to_mbox(self, msg_parser, msg):
file_temp_mbox = f"{dir_temp}/bigbang_test_listserv.mbox"
msg_parser.to_mbox(msg, filepath=file_temp_mbox)
f = open(file_temp_mbox, "r")
lines = f.readlines()
assert len(lines) == 638
assert "See my comments below.\n" in lines
f.close()
Path(file_temp_mbox).unlink()
class TestListservList:
def test__from_mbox(self):
mlist = ListservList.from_mbox(
name="3GPP_MENTORING",
filepath=CONFIG.test_data_path + "3GPP_mbox/3GPP_MENTORING.mbox",
)
assert len(mlist) == 27
assert mlist.messages[0]["From"] == "John M Meredith <[log in to unmask]>"
def test__from_listserv_files(self):
filepath = CONFIG.test_data_path + \
"3GPP/3GPP_TSG_SA_ITUT_AHG/3GPP_TSG_SA_ITUT_AHG.LOG1703B"
mlist = ListservList.from_listserv_files(
name="3GPP_TSG_SA_ITUT_AHG",
filepaths=[filepath],
)
assert len(mlist) == 1
assert mlist.messages[0]["From"] == "Kevin Holley <[email protected]>"
def test__number_of_messages(self, mlist):
assert len(mlist) == 25
def test__to_dict(self, mlist):
dic = mlist.to_dict()
assert len(list(dic.keys())) == 13
assert len(dic[list(dic.keys())[0]]) == 25
def test__to_mbox(self, mlist):
mlist.to_mbox(dir_temp, filename=mlist.name)
file_temp_mbox = f"{dir_temp}/{mlist.name}.mbox"
f = open(file_temp_mbox, "r")
lines = f.readlines()
assert len(lines) >= 49294
assert "What do you think of the approach?\n" in lines
f.close()
Path(file_temp_mbox).unlink()
def test__missing_date_in_message(self, mlist):
"""
Test that when a message has no date show, a default value
"""
msg = [
msg
for msg in mlist.messages
if msg["Subject"] == "R: How to proceed with ITUT-AH"
][0]
assert msg["Date"] is None
ListservMessageParser().to_mbox(
msg, filepath=f"{dir_temp}/msg_test.mbox"
)
file_temp_mbox = f"{dir_temp}/msg_test.mbox"
f = open(file_temp_mbox, "r")
lines = f.readlines()
assert len(lines) == 547
assert "Inviato: mercoled=3DEC 15 marzo 2017 16:06\n" in lines
f.close()
Path(file_temp_mbox).unlink()
class TestListservArchive:
def test__from_mbox(self):
march = ListservArchive.from_mbox(
name="3GPP_mbox_test",
directorypath=CONFIG.test_data_path + "3GPP_mbox/",
)
assert len(march.lists) == 1
assert len(march.lists[0].messages) == 27
assert march.lists[0].messages[0]["From"] == "John M Meredith <[log in to unmask]>"
@pytest.fixture(name="arch", scope="session")
def get_mailarchive(self):
arch = ListservArchive.from_listserv_directory(
name="3GPP",
directorypath=CONFIG.test_data_path + "3GPP/",
)
return arch
def test__mailinglist_in_archive(self, arch):
assert arch.name == "3GPP"
mlist_names = [mlist.name for mlist in arch.lists]
assert "3GPP_TSG_SA_ITUT_AHG" in mlist_names
assert "3GPP_TSG_SA_WG2_MTCE" in mlist_names
ahg_index = mlist_names.index("3GPP_TSG_SA_ITUT_AHG")
mtce_index = mlist_names.index("3GPP_TSG_SA_WG2_MTCE")
assert len(arch.lists[ahg_index]) == 25
assert len(arch.lists[mtce_index]) == 57
def test__message_in_mailinglist_in_archive(self, arch):
mlist_names = [mlist.name for mlist in arch.lists]
mtce_index = mlist_names.index("3GPP_TSG_SA_WG2_MTCE")
msg = [
msg
for msg in arch.lists[mtce_index].messages
if msg["Subject"] == "test email - please ignore"
][0]
assert msg["From"] == '"Jain, Puneet" <[email protected]>'
assert msg["Reply-To"] == '"Jain, Puneet" <[email protected]>'
assert msg["Date"] == "Thu, 28 Feb 2013 18:58:18 +0000"
def test__to_dict(self, arch):
dic = arch.to_dict()
assert len(list(dic.keys())) == 14
assert len(dic[list(dic.keys())[0]]) == 49
def test__to_mbox(self, arch):
arch.to_mbox(dir_temp)
file_dic = {
f"{dir_temp}/3GPP_TSG_SA_ITUT_AHG.mbox": 40000,
f"{dir_temp}/3GPP_TSG_SA_WG2_MTCE.mbox": 60000,
}
for filepath, line_nr in file_dic.items():
assert Path(filepath).is_file()
f = open(filepath, "r")
lines = f.readlines()
assert line_nr < len(lines)
f.close()
Path(filepath).unlink()
@mock.patch("bigbang.listserv.ask_for_input", return_value="check")
def test__get_login_from_terminal(input):
"""test if login keys will be documented"""
file_auth = dir_temp + "/authentication.yaml"
_, _ = listserv.get_login_from_terminal(
username=None, password=None, file_auth=file_auth
)
f = open(file_auth, "r")
lines = f.readlines()
assert lines[0].strip("\n") == "username: 'check'"
assert lines[1].strip("\n") == "password: 'check'"
os.remove(file_auth)
| agpl-3.0 |
agiovann/Constrained_NMF | use_cases/CaImAnpaper/edge-cutter/train_net_cifar_edge_cutter_FOV.py | 2 | 13503 | #!/usr/bin/env python
"""
Created on Thu Aug 24 12:30:19 2017
@author: agiovann
"""
'''From keras example of convnet on the MNIST dataset.
TRAIN ON DATA EXTRACTED FROM RESIDUALS WITH generate_GT script. THIS IS MORE OF A OVERFEAT TYPE OF NETWORK
'''
#%%
import cv2
import glob
try:
cv2.setNumThreads(1)
except:
print('Open CV is naturally single threaded')
try:
if __IPYTHON__:
print(1)
# this is used for debugging purposes only. allows to reload classes
# when changed
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
except NameError:
print('Not launched under iPython')
import caiman as cm
import numpy as np
import os
import time
import pylab as pl
import psutil
import sys
from ipyparallel import Client
from skimage.external.tifffile import TiffFile
import scipy
import copy
from caiman.utils.utils import download_demo
from caiman.base.rois import extract_binary_masks_blob
from caiman.utils.visualization import plot_contours, view_patches_bar
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.motion_correction import MotionCorrect
from caiman.components_evaluation import estimate_components_quality
from caiman.components_evaluation import evaluate_components
from caiman.tests.comparison import comparison
from caiman.motion_correction import tile_and_correct, motion_correction_piecewise
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from sklearn.model_selection import train_test_split
from keras.layers import Dense, Dropout, Activation, Flatten
import json as simplejson
from keras.models import model_from_json
from sklearn.utils import class_weight as cw
from caiman.utils.image_preprocessing_keras import ImageDataGenerator
from keras.layers import merge
from keras.layers.core import Lambda
from keras.models import Model
import tensorflow as tf
#%%
def make_parallel(model, gpu_count):
def get_slice(data, idx, parts):
shape = tf.shape(data)
size = tf.concat([shape[:1] // parts, shape[1:]], axis=0)
stride = tf.concat([shape[:1] // parts, shape[1:] * 0], axis=0)
start = stride * idx
return tf.slice(data, start, size)
outputs_all = []
for i in range(len(model.outputs)):
outputs_all.append([])
# Place a copy of the model on each GPU, each getting a slice of the batch
for i in range(gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
inputs = []
# Slice each input into a piece for processing on this GPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_n = Lambda(get_slice, output_shape=input_shape, arguments={
'idx': i, 'parts': gpu_count})(x)
inputs.append(slice_n)
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
# Save all the outputs for merging back together later
for l in range(len(outputs)):
outputs_all[l].append(outputs[l])
# merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs in outputs_all:
merged.append(merge(outputs, mode='concat', concat_axis=0))
return Model(input=model.inputs, output=merged)
#%%
# the data, shuffled and split between train and test sets
with np.load('use_cases/edge-cutter/residual_crops_all_classes.npz') as ld:
all_masks_gt = ld['all_masks_gt'][:, 1:-1, 1:-1]
labels_gt = ld['labels_gt']
all_masks_gt = all_masks_gt[labels_gt < 2]
labels_gt = labels_gt[labels_gt < 2]
#%%
batch_size = 128
num_classes = 2
epochs = 5
test_fraction = 0.25
augmentation = True
# input image dimensions
img_rows, img_cols = 48, 48
x_train, x_test, y_train, y_test = train_test_split(
all_masks_gt, labels_gt, test_size=test_fraction)
class_weight = cw.compute_class_weight('balanced', np.unique(y_train), y_train)
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
#x_train /= 255
#x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
#y_train = keras.utils.to_categorical(y_train, num_classes)
#y_test = keras.utils.to_categorical(y_test, num_classes)
#%%
# def get_conv(input_shape=(48,48,1), filename=None):
# model = Sequential()
## model.add(Lambda(lambda x: (x-np.mean(x))/np.std(x),input_shape=input_shape, output_shape=input_shape))
# model.add(Conv2D(32, (3, 3), activation='relu', name='conv1', input_shape=input_shape, padding="same"))
# model.add(Conv2D(32, (3, 3), activation='relu', name='conv2', padding="same"))
# model.add(MaxPooling2D(pool_size=(2,2)))
# model.add(Dropout(0.25))
#
# model.add(Conv2D(48, (3, 3), name = 'conv3', padding='same'))
# model.add(Activation('relu'))
# model.add(Conv2D(48, (3, 3), name = 'conv4', padding='same'))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(3, 3)))
# model.add(Dropout(0.25))
#
# model.add(Conv2D(256,(8,8), activation="relu", name="dense1")) # This was Dense(128)
# model.add(Dropout(0.5))
# model.add(Conv2D(1, (1,1), name="dense2", activation="tanh")) # This was Dense(1)
# if filename:
# model.load_weights(filename)
# return model
def get_conv(input_shape=(48, 48, 1), filename=None):
model = Sequential()
# model.add(Lambda(lambda x: (x-np.mean(x))/np.std(x),input_shape=input_shape, output_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation='relu', name='conv1',
input_shape=input_shape, padding="same"))
model.add(Conv2D(32, (3, 3), activation='relu',
name='conv2', padding="same"))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), name='conv3', padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), name='conv4', padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(Dropout(0.25))
model.add(Conv2D(256, (3, 3), activation="relu",
name="dense1")) # This was Dense(128)
model.add(Dropout(0.5))
# This was Dense(1)
model.add(Conv2D(1, (1, 1), name="dense2", activation="tanh"))
if filename:
model.load_weights(filename)
return model
#model = Sequential()
#
# model.add(Conv2D(32, kernel_size=(3, 3),
# activation='relu',
# input_shape=input_shape))
# model.add(Activation('relu'))
#model.add(Conv2D(32, (3, 3)))
# model.add(Activation('relu'))
#model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
#
#model.add(Conv2D(64, (3, 3), padding='same'))
# model.add(Activation('relu'))
#model.add(Conv2D(64, (3, 3)))
# model.add(Activation('relu'))
#model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
#
#
# model.add(Conv2D(128,(8,8), activation="relu", name="dense1")) # This was Dense(128)
# model.add(Dropout(0.5))
# model.add(Conv2D(1, (1,1), name="dense2", activation="tanh")) # This was Dense(1)
model = get_conv()
model.add(Flatten())
#opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
model.compile(loss='mse', optimizer='adadelta', metrics=['accuracy'])
# model.add(Flatten())
# model.add(Dense(512))
# model.add(Activation('relu'))
# model.add(Dropout(0.5))
# model.add(Dense(num_classes))
# model.add(Activation('softmax'))
#model = make_parallel(model, 2)
# initiate RMSprop optimizer
# model.compile(loss=keras.losses.categorical_crossentropy,
# optimizer=opt,
# metrics=['accuracy'])
if augmentation:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# featurewise_center=True,
# featurewise_std_normalization=True,
shear_range=0.3,
rotation_range=360,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=[0.8, 1.2],
horizontal_flip=True,
vertical_flip=True,
random_mult_range=[.25, 2]
)
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
history = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
verbose=1,
class_weight=class_weight,
validation_data=(x_test, y_test))
else:
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
#%%
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#%% Save model and weights
import datetime
save_dir = 'use_cases/edge-cutter/'
model_name = str(datetime.datetime.now()).replace(' ', '-').replace(':', '-')
model_json = model.to_json()
json_path = os.path.join(save_dir, model_name + '.json')
with open(json_path, "w") as json_file:
json_file.write(simplejson.dumps(simplejson.loads(model_json), indent=4))
print('Saved trained model at %s ' % json_path)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name + '.h5')
model.save(model_path)
print('Saved trained model at %s ' % model_path)
#%% Turn the classifier into a heat map¶
heatmodel = get_conv(input_shape=(None, None, 1), filename=model_path)
import matplotlib.pylab as plt
def locate(data):
# data = cv2.cvtColor(cv2.imread("test1.jpg"), cv2.COLOR_BGR2RGB)
heatmap = heatmodel.predict(data.reshape(
1, data.shape[0], data.shape[1], data.shape[2]))
plt.imshow(heatmap[0, :, :, 0])
plt.title("Heatmap")
plt.show()
plt.imshow(heatmap[0, :, :, 0] > 0.99, cmap="gray")
plt.title("Car Area")
plt.show()
xx, yy = np.meshgrid(
np.arange(heatmap.shape[2]), np.arange(heatmap.shape[1]))
x = (xx[heatmap[0, :, :, 0] > 0.99])
y = (yy[heatmap[0, :, :, 0] > 0.99])
for i, j in zip(x, y):
cv2.rectangle(data, (i * 8, j * 8),
(i * 8 + 64, j * 8 + 64), (0, 0, 255), 5)
return data
annotated = locate(data)
plt.title("Augmented")
plt.imshow(annotated)
plt.show()
#%% visualize_results
num_sampl = 30000
predictions = model.predict(
all_masks_gt[:num_sampl, :, :, None], batch_size=32, verbose=1)
cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:num_sampl, 0] >= 0.95)[
0]])).play(gain=3., magnification=5, fr=10)
#%%
cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:num_sampl, 1] >= 0.95)[
0]])).play(gain=3., magnification=5, fr=10)
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt[:num_sampl] == 0) & (
predictions[:num_sampl, 1] > 0.95))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt[:num_sampl] == 1) & (
predictions[:num_sampl, 0] > 0.95))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where(
(predictions[:num_sampl, 0] > 0.95))[0]].squeeze()))
#%% retrieve and test
json_file = open(json_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(model_path)
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
loaded_model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=opt,
metrics=['accuracy'])
print("Loaded model from disk")
score = loaded_model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
score = loaded_model.evaluate(x_train, y_train, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#%%
from skimage.util.montage import montage2d
predictions = loaded_model.predict(
all_masks_gt[:num_sampl], batch_size=32, verbose=1)
cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:num_sampl, 1] < 0.1)[0]])).play(
gain=3., magnification=5, fr=10)
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt[:num_sampl] == 0) & (
predictions[:num_sampl, 1] >= 0.5))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt == 1) & (
predictions[:num_sampl, 0] >= 0.5) & (predictions[:, 0] >= 0.5))[0]].squeeze()))
| gpl-2.0 |
kkozarev/mwacme | casa_commands_instructions/find_local_maxima_multi_integrated.py | 2 | 12206 | import numpy as np
import scipy
import scipy.optimize as opt
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
import matplotlib.pyplot as plt
import glob, os, sys, fnmatch
from astropy.io import fits as pyfits
from astropy.io import ascii
#define model Gaussian function and pass independent variables x and y as a list
def twoD_Gaussian(xytuple, amplitude, xo, yo, sigma_x, sigma_y, theta, offset):
(x, y) = xytuple
xo = float(xo)
yo = float(yo)
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
g = offset + amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))
return g.ravel()
def check_xy_range (x,y,peak_box):
result = False
if (peak_box['x'][0] <= x <= peak_box['x'][1]):
if (peak_box['y'][0] <= y <= peak_box['y'][1]):
result = True
return result
#TEST check_xy_range
#print check_xy_range(10,10,{'x':[0,20],'y':[0,20]})
#The old data location
#if sys.platform == 'darwin': BASEDIR='/Users/kamen/ubuntu_share/MWA_DATA/'
#if sys.platform == 'linux2': BASEDIR='/mnt/ubuntu_share/MWA_DATA/'
#The new data location
if sys.platform == 'darwin': BASEDIR='/Volumes/Transcend/MWA_DATA/'
if sys.platform == 'linux2': BASEDIR='/mnt/MWA_DATA/'
#'139-140'#'103-104','125-126'#'069-070','113-114'#'093-094' #'084-085' # '076-077' # '062-063'
CHANNELS = ['062-063','069-070','076-077','084-085','093-094','113-114','139-140','153-154','169-170','187-188','125-126']
CHANNELS = ['062-063']
#CHANNELS = ['069-070']
#CHANNELS = ['103-104']
polarization='XX'
vv=0 #Whether to look for VV-corrected data or not
date='2015/11/04 '
dpi=40
neighborhood_size = 10
threshold = 50
force=1
#DEFINE THE PEAK BOX AROUND THE SUN LOCATION WITHIN WHICH TO SEARCH
all_peak_box={'062-063':{'x':[280,629],'y':[300,649]},
'069-070':{'x':[280,629],'y':[300,649]},
'076-077':{'x':[240,589],'y':[350,699]},
'084-085':{'x':[280,629],'y':[300,649]},
'093-094':{'x':[230,579],'y':[340,689]},
'103-104':{'x':[320,669],'y':[290,639]},
'113-114':{'x':[230,579],'y':[340,689]},
'125-126':{'x':[230,579],'y':[340,689]},
'139-140':{'x':[230,579],'y':[340,689]},
'153-154':{'x':[290,639],'y':[300,649]},
'169-170':{'x':[240,589],'y':[330,679]},
'187-188':{'x':[315,664],'y':[285,634]}}
for CHANNEL in CHANNELS:
print 'Working on channel '+CHANNEL
datadir=BASEDIR+'synchrotron/'
OUTDIR=datadir
peak_box=all_peak_box[CHANNEL]
#GET the Peak and RMS for the Dynamic Range of the image.
drfile='DR_'+CHANNEL+'_'+polarization+'_synchrotron.txt'
drdata=ascii.read(datadir+drfile)
tmp=drdata['col1']
drfnames=[]
fitsfiles=[]
drtimestrings=[]
for fname in tmp:
drtimestrings.append(fname.split('_t')[1].split('_XX')[0])
fitsfiles.append(BASEDIR+fname.split('/mnt/MWA_DATA/')[1].split('.image')[0]+'.fits')
drfnames.append(os.path.basename(fname.split('.image')[0]))
peak=drdata['col2']
rms=drdata['col3']
#CHECK if previous Maxima files exist, record the data
maxindices=['1','2']
old_maxdata={}
for maxindex in maxindices:
maxfile='Max'+maxindex+'_info_'+CHANNEL+'_'+polarization+'_synchrotron.txt'
if os.path.exists(datadir+maxfile):
maxdata=ascii.read(datadir+maxfile)
old_timestring=[]
for time in maxdata['times']:
old_timestring.append(''.join(time.split(' ')[1].split(':')))
maxdata['timestring']=old_timestring
old_maxdata[maxindex]=maxdata
#CHECK if previous Integrated Maxima file exists, get the last time saved
update=0
maxfile='Max1_info_'+CHANNEL+'_'+polarization+'_synchrotron_integrated.txt'
if os.path.exists(datadir+maxfile):
if update == 0: update=1
maxdata=ascii.read(datadir+maxfile)
inttimestring=[]
for time in maxdata['times']:
inttimestring.append(''.join(time.split(' ')[1].split(':')))
lasttime=inttimestring[-1]
lasttimeind=drtimestrings.index(lasttime)
fitsfiles=fitsfiles[lasttimeind+1:]
#Load the real data, and try to fit a gaussian.
xwidth=5
ywidth=5
#new_maxdata={}
#all_maxintens1=[]
#all_maxintens2=[]
#all_maxlocx_px1=[]
#all_maxlocy_px1=[]
#all_maxlocx_px2=[]
#all_maxlocy_px2=[]
#all_max1err=[]
#all_max2err=[]
#all_integ1flux=[]
#all_integ2flux=[]
max1file='Max1_info_'+CHANNEL+'_'+polarization+'_synchrotron_integrated.txt'
max2file='Max2_info_'+CHANNEL+'_'+polarization+'_synchrotron_integrated.txt'
for ii,infile in enumerate(fitsfiles):
print "Working on file "+infile
#Load the data
hd=pyfits.open(infile)
indata=np.squeeze(hd[0].data)
dataflat=indata.ravel()
# Create x and y indices
xsize, ysize = indata.shape
x = np.linspace(0, xsize, ysize)
y = np.linspace(0, xsize, ysize)
x, y = np.meshgrid(x, y)
#Get the original RMS value
rmsval=rms[ii]
#Get the original Maximum locations and intensities
maxintens1=old_maxdata['1']['maxintens'][ii]
maxlocx_px1=old_maxdata['1']['maxlocx_px'][ii]
maxlocy_px1=old_maxdata['1']['maxlocy_px'][ii]
maxintens2=old_maxdata['2']['maxintens'][ii]
maxlocx_px2=old_maxdata['2']['maxlocx_px'][ii]
maxlocy_px2=old_maxdata['2']['maxlocy_px'][ii]
#MAXIMUM 1 FITTING AND RECORDING THE INFORMATION
initial_guess=(maxintens1-rmsval,maxlocx_px1,maxlocy_px1,xwidth,ywidth,0.,rmsval)
try:
popt1, pcov1 = opt.curve_fit(twoD_Gaussian, (x,y), dataflat, p0=initial_guess)
except RuntimeError as e:
print "Runtime error! Continuing..."
continue
#popt1=[0.,0.,0.,0.,0.,0.,0.]
#pcov1=np.zeros((7,7))
#all_maxintens1.append(popt1[0])
#all_maxlocx_px1.append(popt1[1])
#all_maxlocy_px1.append(popt1[2])
#all_max1err.append(np.sqrt(np.diag(pcov1)))
max1data_fitted = twoD_Gaussian((x, y), *popt1)
max1indices=zip(np.where(max1data_fitted.reshape(xsize, ysize) > rmsval))
#all_integ1flux.append(np.sum(indata[max1indices]))
#MAXIMUM 2 FITTING
initial_guess=(maxintens2-rmsval,maxlocx_px2,maxlocy_px2,xwidth,ywidth,0.,rmsval)
try:
popt2, pcov2 = opt.curve_fit(twoD_Gaussian, (x,y), dataflat, p0=initial_guess)
except RuntimeError:
print "Runtime error! Continuing..."
continue
#popt2=[0.,0.,0.,0.,0.,0.,0.]
#pcov2=np.zeros((7,7))
#all_maxintens2.append(popt2[0])
#all_maxlocx_px2.append(popt2[1])
#all_maxlocy_px2.append(popt2[2])
#all_max2err.append(np.sqrt(np.diag(pcov2)))
max2data_fitted = twoD_Gaussian((x, y), *popt2)
max2indices=zip(np.where(max2data_fitted.reshape(xsize, ysize) > rmsval))
#all_integ2flux.append(np.sum(indata[max2indices]))
#Write to files
if ii == 0 and update == 0:
max1f = open(datadir+max1file, 'w')
max1f.write("maxintens maxlocx_px maxlocy_px times integrated_flux\n")
max1f.close()
max2f = open(datadir+max2file, 'w')
max2f.write("maxintens maxlocx_px maxlocy_px times integrated_flux\n")
max2f.close()
#max1file='Max1_info_'+CHANNEL+'_'+polarization+'_synchrotron_integrated.txt'
max1f = open(datadir+max1file, 'a')
max1f.write('{0:.3f} {1:d} {2:d} "{3}" {4:.3f}\n'.format(popt1[0],int(popt1[1]),int(popt1[2]),old_maxdata['1']['times'][ii],np.sum(indata[max1indices])))
max1f.close()
#max2file='Max2_info_'+CHANNEL+'_'+polarization+'_synchrotron_integrated.txt'
max2f = open(datadir+max2file, 'a')
max2f.write('{0:.3f} {1:d} {2:d} "{3}" {4:.3f}\n'.format(popt2[0],int(popt2[1]),int(popt2[2]),old_maxdata['2']['times'][ii],np.sum(indata[max2indices])))
max2f.close()
#Save the Time history of Max #1
#tempmax1file='max1_temp.txt'
#maxintens1=old_maxdata['1']['maxintens']
#maxlocx_px1=old_maxdata['1']['maxlocx_px']
#maxlocy_px1=old_maxdata['1']['maxlocy_px']
times1=old_maxdata['1']['times']
#table1 = {'maxintens':maxintens1, 'maxlocx_px':maxlocx_px1,'maxlocy_px':maxlocy_px1,'integrated_source_flux':integrated_source_flux1, 'times':times1}
#ascii.write(table1,datadir+max1file,formats={'maxlocx_px':'%4u', 'maxlocy_px':'%4u', 'maxintens':'%.4f', 'integrated_source_flux':'%.4f'})
table1 = {'maxintens':all_maxintens1, 'maxlocx_px':all_maxlocx_px1,'maxlocy_px':all_maxlocy_px1, 'times':times1, 'integrated_source_flux':all_integ1flux}
##ascii.write(table1,datadir+max1file,formats={'maxlocx_px':'%4u', 'maxlocy_px':'%4u', 'maxintens':'%.4f', 'integrated_source_flux':'%.4f'})
#Save the Time history of Max #2
#tempmax2file='max2_temp.txt'
#maxintens2=old_maxdata['2']['maxintens']
#maxlocx_px2=old_maxdata['2']['maxlocx_px']
#maxlocy_px2=old_maxdata['2']['maxlocy_px']
times2=old_maxdata['2']['times']
#table2 = {'maxintens':maxintens2, 'maxlocx_px':maxlocx_px2,'maxlocy_px':maxlocy_px2,'integrated_source_flux':integrated_source_flux2, 'times':times2}
#ascii.write(table2,datadir+max2file,formats={'maxlocx_px':'%4u', 'maxlocy_px':'%4u', 'maxintens':'%.4f', 'integrated_source_flux':'%.4f'})
table2 = {'maxintens':all_maxintens2, 'maxlocx_px':all_maxlocx_px2,'maxlocy_px':all_maxlocy_px2,'times':times2, 'integrated_source_flux':all_integ2flux}
##ascii.write(table2,datadir+max2file,formats={'maxlocx_px':'%4u', 'maxlocy_px':'%4u', 'maxintens':'%.4f', 'integrated_source_flux':'%.4f'})
#MAKE SOME PLOTS - 1ST THE DATA, THEN THE MAXIMA LOCATION
updatemax=0
if updatemax > 0:
hd=pyfits.open(fname)
frame=np.squeeze(hd[0].data) #hd[0].data[0,0,:,:]
hd.close()
plots=0
if plots > 0:
plt.imshow(frame, origin='lower', interpolation='none')
x1,x2,y1,y2 = plt.axis()
#Plot the box location
#plt.plot([peak_box['x'][0],peak_box['x'][0]],peak_box['y'],'w-') #left vertical
#plt.plot(peak_box['x'],[peak_box['y'][0],peak_box['y'][0]],'w-') #lower horizontal
#plt.plot([peak_box['x'][1],peak_box['x'][1]],peak_box['y'],'w-') #right vertical
#plt.plot(peak_box['x'],[peak_box['y'][1],peak_box['y'][1]],'w-') #upper horizontal
#plt.axis((x1,x2,y1,y2))
sub=[peak_box['y'][0],peak_box['y'][1],peak_box['x'][0],peak_box['x'][1]]
subdata=frame[sub[0]:sub[1],sub[2]:sub[3]]
ypixels,xpixels=subdata.shape
fig=plt.figure(figsize=(xpixels/dpi,ypixels/dpi))
ax=plt.axes([0.08, 0.08, 0.87, 0.87])
ax.imshow(subdata,vmin=rms[0],vmax=peak[0], origin='lower', interpolation='none')
plt.autoscale(False)
for xy in maxlocs:
nmax=len(xy[:,0])
if nmax > 0:
xpos1 = xy[0,1] - peak_box['x'][0]
ypos1 = xy[0,0] - peak_box['y'][0]
ax.plot(xpos1,ypos1, 'go',markersize=5)
if nmax > 1:
xpos2 = xy[1,1] - peak_box['x'][0]
ypos2 = xy[1,0] - peak_box['y'][0]
ax.plot(xpos2,ypos2, 'ro',markersize=5)
plt.savefig(datadir+CHANNEL+'_result.png', bbox_inches = 'tight')
plt.close()
fig=plt.figure(figsize=(xpixels/dpi,ypixels/dpi))
ax=plt.axes([0.08, 0.08, 0.87, 0.87])
ax.imshow(subdata,vmin=rms[0],vmax=peak[0], origin='lower', interpolation='none')
plt.autoscale(False)
plt.plot(maxintens1,color='green',linestyle='-',label='Max1')
plt.plot(maxintens2,color='blue',linestyle='-',label='Max2')
plt.legend(loc='upper left')
plt.savefig(datadir+CHANNEL+'_maxima_series.png', bbox_inches = 'tight')
plt.close()
| gpl-2.0 |
sunmont/textclassifier | bow.py | 1 | 2357 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import encoders
from tensorflow.python.ops import nn
import setting
learn = tf.contrib.learn
BOW_EMBEDING_DIM = 80 #50
#activation_fn = nn.relu #tf.nn.relu
#activation_fn = nn.sigmoid
#activation_fn = nn.tanh
ACTIVATIONS = {
"relu" : nn.relu,
"sigmod" : nn.sigmoid,
"tanh" : nn.tanh
}
activation_fn = ACTIVATIONS[setting.activation_fn]
def bag_of_words_model(features, target):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(target, 15, 1, 0)
features = encoders.bow_encoder(
features, vocab_size=setting.n_words, embed_dim=BOW_EMBEDING_DIM)
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn) #=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
#loss = tf.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def emb_bag_of_words_model(features, target):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(target, 15, 1, 0)
## features = encoders.bow_encoder(
## features, vocab_size=setting.n_words, embed_dim=BOW_EMBEDING_DIM)
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn) #=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
#loss = tf.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
# test
if __name__ == '__main__':
with tf.Session() as sess:
docs = [[0, 1], [2, 3]]
enc = encoders.bow_encoder(docs, 4, 3)
sess.run(tf.global_variables_initializer())
#self.assertAllEqual([2, 3], enc.eval().shape)
print(enc.eval())
| apache-2.0 |
MohammedWasim/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
lokik/sfepy | script/gen_lobatto1d_c.py | 5 | 7664 | #!/usr/bin/env python
"""
Generate lobatto1d.c and lobatto1h.c files.
"""
from __future__ import print_function
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
import os
from argparse import ArgumentParser
import sympy as sm
import numpy as nm
import matplotlib.pyplot as plt
from sfepy import top_dir
from sfepy.base.ioutils import InDir
hdef = 'float64 %s(float64 x);\n'
cdef = """
float64 %s(float64 x)
{
return(%s);
}
"""
fun_list = """
const fun %s[%d] = {%s};
"""
def gen_lobatto(max_order):
assert max_order > 2
x = sm.symbols('x')
lobs = [0, 1]
lobs[0] = (1 - x) / 2
lobs[1] = (1 + x) / 2
dlobs = [lob.diff('x') for lob in lobs]
legs = [sm.legendre(0, 'y')]
clegs = [sm.ccode(legs[0])]
dlegs = [sm.legendre(0, 'y').diff('y')]
cdlegs = [sm.ccode(dlegs[0])]
clobs = [sm.ccode(lob) for lob in lobs]
cdlobs = [sm.ccode(dlob) for dlob in dlobs]
denoms = [] # for lobs.
for ii in range(2, max_order + 1):
coef = sm.sympify('sqrt(2 * (2 * %s - 1)) / 2' % ii)
leg = sm.legendre(ii - 1, 'y')
pleg = leg.as_poly()
coefs = pleg.all_coeffs()
denom = max(sm.denom(val) for val in coefs)
cleg = sm.ccode(sm.horner(leg*denom)/denom)
dleg = leg.diff('y')
cdleg = sm.ccode(sm.horner(dleg*denom)/denom)
lob = sm.simplify(coef * sm.integrate(leg, ('y', -1, x)))
lobnc = sm.simplify(sm.integrate(leg, ('y', -1, x)))
plobnc = lobnc.as_poly()
coefs = plobnc.all_coeffs()
denom = sm.denom(coef) * max(sm.denom(val) for val in coefs)
clob = sm.ccode(sm.horner(lob*denom)/denom)
dlob = lob.diff('x')
cdlob = sm.ccode(sm.horner(dlob*denom)/denom)
legs.append(leg)
clegs.append(cleg)
dlegs.append(dleg)
cdlegs.append(cdleg)
lobs.append(lob)
clobs.append(clob)
dlobs.append(dlob)
cdlobs.append(cdlob)
denoms.append(denom)
coef = sm.sympify('sqrt(2 * (2 * %s - 1)) / 2' % (max_order + 1))
leg = sm.legendre(max_order, 'y')
pleg = leg.as_poly()
coefs = pleg.all_coeffs()
denom = max(sm.denom(val) for val in coefs)
cleg = sm.ccode(sm.horner(leg*denom)/denom)
dleg = leg.diff('y')
cdleg = sm.ccode(sm.horner(dleg*denom)/denom)
legs.append(leg)
clegs.append(cleg)
dlegs.append(dleg)
cdlegs.append(cdleg)
kerns = []
ckerns = []
dkerns = []
cdkerns = []
for ii, lob in enumerate(lobs[2:]):
kern = sm.simplify(lob / (lobs[0] * lobs[1]))
dkern = kern.diff('x')
denom = denoms[ii] / 4
ckern = sm.ccode(sm.horner(kern*denom)/denom)
cdkern = sm.ccode(sm.horner(dkern*denom)/denom)
kerns.append(kern)
ckerns.append(ckern)
dkerns.append(dkern)
cdkerns.append(cdkern)
return (legs, clegs, dlegs, cdlegs,
lobs, clobs, dlobs, cdlobs,
kerns, ckerns, dkerns, cdkerns,
denoms)
def plot_polys(fig, polys, var_name='x'):
plt.figure(fig)
plt.clf()
x = sm.symbols(var_name)
vx = nm.linspace(-1, 1, 100)
for ii, poly in enumerate(polys):
print(ii)
print(poly)
print(poly.as_poly(x).all_coeffs())
vy = [float(poly.subs(x, xx)) for xx in vx]
plt.plot(vx, vy)
def append_declarations(out, cpolys, comment, cvar_name, shift=0):
names = []
out.append('\n// %s functions.\n' % comment)
for ii, cpoly in enumerate(cpolys):
name = '%s_%03d' % (cvar_name, ii + shift)
function = hdef % name
out.append(function)
names.append(name)
return names
def append_polys(out, cpolys, comment, cvar_name, var_name='x', shift=0):
names = []
out.append('\n// %s functions.\n' % comment)
for ii, cpoly in enumerate(cpolys):
name = '%s_%03d' % (cvar_name, ii + shift)
function = cdef % (name, cpoly.replace(var_name, 'x'))
out.append(function)
names.append(name)
return names
def append_lists(out, names, length):
args = ', '.join(['&%s' % name for name in names])
name = names[0][:-4]
_list = fun_list % (name, length, args)
out.append(_list)
helps = {
'max_order' :
'maximum order of polynomials [default: %(default)s]',
'plot' :
'plot polynomials',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-m', '--max-order', metavar='order', type=int,
action='store', dest='max_order',
default=10, help=helps['max_order'])
parser.add_argument('--plot',
action='store_true', dest='plot',
default=False, help=helps['plot'])
options = parser.parse_args()
max_order = options.max_order
(legs, clegs, dlegs, cdlegs,
lobs, clobs, dlobs, cdlobs,
kerns, ckerns, dkerns, cdkerns,
denoms) = gen_lobatto(max_order)
if options.plot:
plot_polys(1, lobs)
plot_polys(11, dlobs)
plot_polys(2, kerns)
plot_polys(21, dkerns)
plot_polys(3, legs, var_name='y')
plot_polys(31, dlegs, var_name='y')
plt.show()
indir = InDir(os.path.join(top_dir, 'sfepy/discrete/fem/extmods/'))
fd = open(indir('lobatto1d_template.h'), 'r')
template = fd.read()
fd.close
fd = open(indir('lobatto1d.h'), 'w')
out = []
append_declarations(out, clobs, 'Lobatto', 'lobatto')
append_declarations(out, cdlobs, 'Derivatives of Lobatto', 'd_lobatto')
append_declarations(out, ckerns, 'Kernel', 'kernel',
shift=2)
append_declarations(out, cdkerns, 'Derivatives of kernel', 'd_kernel',
shift=2)
append_declarations(out, clegs, 'Legendre', 'legendre')
append_declarations(out, cdlegs, 'Derivatives of Legendre', 'd_legendre')
fd.write(template.replace('// REPLACE_TEXT', ''.join(out)))
fd.close()
fd = open(indir('lobatto1d_template.c'), 'r')
template = fd.read()
fd.close()
fd = open(indir('lobatto1d.c'), 'w')
out = []
names_lobatto = append_polys(out, clobs,
'Lobatto', 'lobatto')
names_d_lobatto = append_polys(out, cdlobs,
'Derivatives of Lobatto', 'd_lobatto')
names_kernel = append_polys(out, ckerns,
'Kernel', 'kernel',
shift=2)
names_d_kernel = append_polys(out, cdkerns,
'Derivatives of kernel', 'd_kernel',
shift=2)
names_legendre = append_polys(out, clegs,
'Legendre', 'legendre',
var_name='y')
names_d_legendre = append_polys(out, cdlegs,
'Derivatives of Legendre', 'd_legendre',
var_name='y')
out.append('\n// Lists of functions.\n')
out.append('\nconst int32 max_order = %d;\n' % max_order)
append_lists(out, names_lobatto, max_order + 1)
append_lists(out, names_d_lobatto, max_order + 1)
append_lists(out, names_kernel, max_order - 1)
append_lists(out, names_d_kernel, max_order - 1)
append_lists(out, names_legendre, max_order + 1)
append_lists(out, names_d_legendre, max_order + 1)
fd.write(template.replace('// REPLACE_TEXT', ''.join(out)))
fd.close()
if __name__ == '__main__':
main()
| bsd-3-clause |
AlexanderFabisch/scikit-learn | sklearn/utils/tests/test_testing.py | 107 | 4210 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
bgris/ODL_bgris | setup.py | 1 | 6365 | # Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Setup script for ODL.
Installation command::
pip install [--user] [-e] .
"""
from __future__ import print_function, absolute_import
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import os
import sys
root_path = os.path.dirname(__file__)
requires = open(os.path.join(root_path, 'requirements.txt')).readlines()
test_requires = open(
os.path.join(root_path, 'test_requirements.txt')).readlines()
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
test_path = os.path.join(root_path, 'odl', 'test')
def find_tests():
"""Discover the test files for packaging."""
tests = []
for path, _, filenames in os.walk(os.path.join(root_path, test_path)):
for filename in filenames:
basename, suffix = os.path.splitext(filename)
if (suffix == '.py' and
(basename.startswith('test_') or
basename.endswith('_test'))):
tests.append(os.path.join(path, filename))
return tests
# Determine version from top-level package __init__.py file
with open(os.path.join(root_path, 'odl', '__init__.py')) as f:
for line in f:
if line.startswith('__version__'):
version = line.strip().split()[-1][1:-1]
break
long_description = """
Operator Discretization Library (ODL) is a Python library for fast prototyping focusing on (but not restricted to) inverse problems. ODL is being developed at `KTH Royal Institute of Technology <https://www.kth.se/en/sci/institutioner/math>`_.
The main intent of ODL is to enable mathematicians and applied scientists to use different numerical methods on real-world problems without having to implement all necessary parts from the bottom up.
This is reached by an `Operator` structure which encapsulates all application-specific parts, and a high-level formulation of solvers which usually expect an operator, data and additional parameters.
The main advantages of this approach is that
1. Different problems can be solved with the same method (e.g. TV regularization) by simply switching operator and data.
2. The same problem can be solved with different methods by simply calling into different solvers.
3. Solvers and application-specific code need to be written only once, in one place, and can be tested individually.
4. Adding new applications or solution methods becomes a much easier task.
Features
========
- Efficient and well-tested data containers based on Numpy (default) or CUDA (optional)
- Objects to represent mathematical notions like vector spaces and operators, including properties as expected from mathematics (inner product, norm, operator composition, ...)
- Convenience functionality for operators like arithmetic, composition, operator matrices etc., which satisfy the known mathematical rules.
- Out-of-the-box support for frequently used operators like scaling, partial derivative, gradient, Fourier transform etc.
- A versatile and pluggable library of optimization routines for smooth and non-smooth problems, such as CGLS, BFGS, Chambolle-Pock and Douglas-Rachford splitting.
- Support for tomographic imaging with a unified geometry representation and bindings to external libraries for efficient computation of projections and back-projections.
- Standardized tests to validate implementations against expected behavior of the corresponding mathematical object, e.g. if a user-defined norm satisfies `norm(x + y) <= norm(x) + norm(y)` for a number of input vectors `x` and `y`.
"""
setup(
name='odl',
version=version,
description='Operator Discretization Library',
long_description=long_description,
url='https://github.com/odlgroup/odl',
author='ODL development group',
author_email='[email protected]',
license='GPLv3+',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: OS Independent'
],
keywords='research development mathematics prototyping imaging tomography',
packages=find_packages(),
package_dir={'odl': 'odl'},
package_data={'odl': find_tests() + ['odl/pytest.ini']},
include_package_data=True,
entry_points={'pytest11': ['odl_plugins = odl.util.pytest_plugins']},
install_requires=[requires],
tests_require=['pytest'],
extras_require={
'testing': test_requires,
'show': 'matplotlib',
'fftw': 'pyfftw',
'pywavelets': 'Pywavelets>=0.4',
'scikit': 'scikit-image',
'proximal': 'proximal',
},
cmdclass={'test': PyTest},
)
| gpl-3.0 |
jigargandhi/UdemyMachineLearning | Machine Learning A-Z Template Folder/Part 3 - Classification/Section 20 - Random Forest Classification/random_forest_classification.py | 6 | 2748 | # Random Forest Classification
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting Random Forest Classification to the Training set
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Random Forest Classification (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Random Forest Classification (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show() | mit |
gromitsun/sim-xrf-py | others/snr_90_180/snr_0_180_varyt.py | 1 | 2469 | import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
# # #fonts# # #
import matplotlib
from matplotlib import rc
matplotlib.rcParams['pdf.fonttype'] = 'truetype'
fontProperties = {'family':'serif','serif':['Arial'],
'weight' : 'normal', 'size' : '12'}
rc('font',**fontProperties)
# # #
openings = np.load('snr_wpw180_100nm.npz')['openings']
snr180 = np.load('snr_wpw180_100nm.npz')['snr']
snr0 = np.load('snr_wpw0_100nm.npz')['snr']
f180 = interp1d(snr180,openings)
r = np.where(snr0>=snr180.min())
x = openings[r]
y = f180(snr0[r])
plt.plot(y, x, 'r-', label='100 nm')
snr180 = np.load('snr_wpw180_1um.npz')['snr']
snr0 = np.load('snr_wpw0_1um.npz')['snr']
f180 = interp1d(snr180,openings)
r = np.where(snr0>=snr180.min())
x = openings[r]
y = f180(snr0[r])
plt.plot(y, x,'b-',label = '1 um')
snr180 = np.load('snr_wpw180_10um.npz')['snr']
snr0 = np.load('snr_wpw0_10um.npz')['snr']
f180 = interp1d(snr180,openings)
r = np.where(snr0>=snr180.min())
x = openings[r]
y = f180(snr0[r])
plt.plot(y, x,'g-',label = '10 um')
snr180 = np.load('snr_wpw180_100um.npz')['snr']
snr0 = np.load('snr_wpw0_100um.npz')['snr']
f180 = interp1d(snr180,openings)
r = np.where(snr0>=snr180.min())
x = openings[r]
y = f180(snr0[r])
plt.plot(y, x,'m-',label = '100 um')
#### as sample
openings = np.load('snr_as180_500nm.npz')['openings']
snr180 = np.load('snr_as180_500nm.npz')['snr']
snr0 = np.load('snr_as0_500nm.npz')['snr']
f180 = interp1d(snr180,openings)
r = np.where(snr0>=snr180.min())
x = openings[r]
y = f180(snr0[r])
plt.plot(y, x, 'r--', label='500 nm')
snr180 = np.load('snr_as180_5um.npz')['snr']
snr0 = np.load('snr_as0_5um.npz')['snr']
f180 = interp1d(snr180,openings)
r = np.where(snr0>=snr180.min())
x = openings[r]
y = f180(snr0[r])
plt.plot(y, x,'b--',label = '5 um')
snr180 = np.load('snr_as180_50um.npz')['snr']
snr0 = np.load('snr_as0_50um.npz')['snr']
f180 = interp1d(snr180,openings)
r = np.where(snr0>=snr180.min())
x = openings[r]
y = f180(snr0[r])
plt.plot(y, x,'g--',label = '50 um')
# snr180 = np.load('snr_as180_500um.npz')['snr']
# snr0 = np.load('snr_as0_500um.npz')['snr']
# f180 = interp1d(snr180,openings)
# r = np.where(snr0>=snr180.min())
# x = openings[r]
# y = f180(snr0[r])
# plt.plot(y, x,'m--',label = '500 um')
plt.ylabel(r'Collection semi-angle @ 0$^\circ$ (deg)')
plt.xlabel(r'Collection semi-angle @ 180$^\circ$ (deg)')
plt.legend(loc=0,ncol=2)
plt.show()
| mit |
waterponey/scikit-learn | benchmarks/bench_isolation_forest.py | 46 | 3782 | """
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
np.random.seed(1)
datasets = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
fig_roc, ax_roc = plt.subplots(1, 1, figsize=(8, 5))
for dat in datasets:
# loading and vectorization
print('loading data')
if dat in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True)
X = dataset.data
y = dataset.target
if dat == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
X, y = sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dat == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dat == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dat == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dat == 'http' or dat == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = X.shape
n_samples_train = n_samples // 2
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print('IsolationForest processing...')
model = IsolationForest(n_jobs=-1)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = - model.decision_function(X_test) # the lower, the more normal
# Show score histograms
fig, ax = plt.subplots(3, sharex=True, sharey=True)
bins = np.linspace(-0.5, 0.5, 200)
ax[0].hist(scoring, bins, color='black')
ax[0].set_title('decision function for %s dataset' % dat)
ax[0].legend(loc="lower right")
ax[1].hist(scoring[y_test == 0], bins, color='b',
label='normal data')
ax[1].legend(loc="lower right")
ax[2].hist(scoring[y_test == 1], bins, color='r',
label='outliers')
ax[2].legend(loc="lower right")
# Show ROC Curves
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
label = ('%s (area: %0.3f, train-time: %0.2fs, '
'test-time: %0.2fs)' % (dat, AUC, fit_time, predict_time))
ax_roc.plot(fpr, tpr, lw=1, label=label)
ax_roc.set_xlim([-0.05, 1.05])
ax_roc.set_ylim([-0.05, 1.05])
ax_roc.set_xlabel('False Positive Rate')
ax_roc.set_ylabel('True Positive Rate')
ax_roc.set_title('Receiver operating characteristic (ROC) curves')
ax_roc.legend(loc="lower right")
fig_roc.tight_layout()
plt.show()
| bsd-3-clause |
jls713/jfactors | flattened/triax_cusps.py | 1 | 3237 | ## Checking triaxial cusps SEG(2016)
## ============================================================================
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import quad
from matplotlib.patches import Ellipse
import flattened as fJ
from scipy.optimize import curve_fit
import seaborn as sns
## ============================================================================
def geometric_factor(q,gamma):
''' Geometric factor for infinite axisymmetric cusp '''
## \int dt (cos^2(t)+sin^2(t)/q^2)^{1/2-gamma}
return quad(lambda t: np.power(np.power(np.cos(t),2.)+np.power(np.sin(t),2.)/q/q,0.5-gamma),0.,2.*np.pi)[0]
def f1(p,q,gamma):
''' Virial ratio for infinite triaxial cusp '''
return quad(lambda phi:quad(lambda t: np.cos(phi)**2*np.sin(t)**3*(np.sin(t)**2*np.cos(phi)**2+np.sin(t)**2*np.sin(phi)**2/p/p+np.cos(t)**2./q/q)**(-gamma/2.),0.,np.pi)[0],0.,2.*np.pi)[0]/quad(lambda phi:quad(lambda t: np.sin(t)*np.cos(t)**2*(np.sin(t)**2*np.cos(phi)**2+np.sin(t)**2*np.sin(phi)**2/p/p+np.cos(t)**2./q/q)**(-gamma/2.),0.,np.pi)[0],0.,2.*np.pi)[0]
def f2(p,q,gamma):
''' Virial ratio for infinite triaxial cusp '''
return quad(lambda phi:quad(lambda t: np.sin(phi)**2*np.sin(t)**3*(np.sin(t)**2*np.cos(phi)**2+np.sin(t)**2*np.sin(phi)**2/p/p+np.cos(t)**2./q/q)**(-gamma/2.),0.,np.pi)[0],0.,2.*np.pi)[0]/quad(lambda phi:quad(lambda t: np.sin(t)*np.cos(t)**2*(np.sin(t)**2*np.cos(phi)**2+np.sin(t)**2*np.sin(phi)**2/p/p+np.cos(t)**2./q/q)**(-gamma/2.),0.,np.pi)[0],0.,2.*np.pi)[0]
def jkin(p,q,gamma,th,ph):
''' Kinematic factor for infinite triaxial cusp '''
P = p/fJ.qpot_from_q(p)
Q = q/fJ.qpot_from_q(q)
ff1 = f1(P,Q,gamma)
ff2 = f2(P,Q,gamma)
return ((1.+ff1+ff2)/(np.cos(th)**2+ff1*np.sin(th)**2*np.cos(ph)**2+ff2*np.sin(th)**2*np.sin(ph)**2)/3.)**2
## ============================================================================
def jgeo_x(p,q,gamma):
return p/q/q*geometric_factor(q/p,gamma)
def jgeo_y(p,q,gamma):
return 1./p/q/q*geometric_factor(q,gamma)
def jgeo_z(p,q,gamma):
return 1./p/p/q*geometric_factor(p,gamma)
def jkin_x(p,q,gamma):
return jkin(p,q,gamma,.5*np.pi,0.)
def jkin_y(p,q,gamma):
return jkin(p,q,gamma,.5*np.pi,.5*np.pi)
def jkin_z(p,q,gamma):
return jkin(p,q,gamma,0.,0.)
def jtot_x(p,q,gammaDM,gammaST):
return jgeo_x(p,q,gammaDM)*jkin_x(p,q,gammaST)
def jtot_y(p,q,gammaDM,gammaST):
return jgeo_y(p,q,gammaDM)*jkin_y(p,q,gammaST)
def jtot_z(p,q,gammaDM,gammaST):
return jgeo_z(p,q,gammaDM)*jkin_z(p,q,gammaST)
if __name__ == '__main__':
q = 0.7
p = 0.8
gg = np.linspace(0.,5.,10)
ggst = 3.
qq = np.linspace(0.1,p,10)
# plt.plot(gg,map(lambda g:jgeo_x(p,q,g),gg))
# plt.plot(gg,map(lambda g:jgeo_y(p,q,g),gg))
# plt.plot(gg,map(lambda g:jgeo_z(p,q,g),gg))
plt.plot(qq,map(lambda g:jgeo_x(p,g,1.),qq))
plt.plot(qq,map(lambda g:jgeo_y(p,g,1.),qq))
plt.plot(qq,map(lambda g:jgeo_z(p,g,1.),qq))
# plt.plot(gg,map(lambda g:jkin_x(p,g,ggst),qq))
# plt.plot(gg,map(lambda g:jkin_y(p,g,ggst),qq))
# plt.plot(gg,map(lambda g:jkin_z(p,g,ggst),qq))
# plt.plot(gg,map(lambda g:jtot_x(p,q,g,ggst),gg))
# plt.plot(gg,map(lambda g:jtot_y(p,q,g,ggst),gg))
# plt.plot(gg,map(lambda g:jtot_z(p,q,g,ggst),gg))
plt.savefig('tmp.pdf',bbox_inches='tight')
| mit |
wait4pumpkin/tmall | solution/analysis/single_buy.py | 1 | 13533 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import random
import glob
import os
import sys
import time
import math
import Queue
from collections import Counter
import numpy
import pylab as pl
import matplotlib.font_manager
import matplotlib.pyplot as plt
from scipy import sparse
from scipy.sparse import lil_matrix
from sklearn import svm
from sklearn import linear_model, cross_validation, datasets, metrics
from scipy.ndimage import convolve
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
N_MONTH = 4
N_DAY_PER_MONTH = 31
BASE_MONTH = 4
TYPE_LENGTH = 4
CLICK_TAG = 0
BUY_TAG = 1
FAVOR_TAG = 2
CART_TAG = 3
class User(object):
def __init__(self, id, info):
self.id = id;
self.info = info;
self.brands = info.keys()
self.data = dict()
for brand_id in self.brands:
brand = info[brand_id]
for month, day, action in brand:
if month not in self.data:
self.data[month] = dict()
if brand_id not in self.data[month]:
self.data[month][brand_id] = [0, 0, 0, 0]
self.data[month][brand_id][action] += 1
def data_per_day(self, brand_id, n_day=1, n_month=3):
bin_per_month = int(math.ceil(N_DAY_PER_MONTH / float(n_day)))
# data = [0 for num in xrange(n_month * bin_per_month * TYPE_LENGTH)]
data = [0 for num in xrange(n_month * bin_per_month)]
if brand_id not in self.info.keys(): return data
for month, day, action in self.info[brand_id]:
if month not in range(BASE_MONTH, BASE_MONTH + n_month):
continue
# data[((month - BASE_MONTH) * bin_per_month + int(math.ceil(float(day) / n_day)) - 1) * TYPE_LENGTH + action] += 1
data[(month - BASE_MONTH) * bin_per_month + int(math.ceil(float(day) / n_day)) - 1] = 1
return data
def data_per_week(self, brand_id, n_month=3):
return self.data_per_day(brand_id, n_day=7, n_month=n_month)
def data_per_month(self, brand_id, n_month=3):
return self.data_per_day(brand_id, n_day=N_DAY_PER_MONTH, n_month=n_month)
def __str__(self):
return str(self.id) + ' ' + str(len(self.bands))
if __name__ == '__main__':
userInfo = dict()
with open('../../dataset/t_alibaba_data.csv', 'rb') as csvfile:
# with open('../../dataset/dataset/demo.csv', 'rb') as csvfile:
user_table = dict()
brand_table = dict()
user_counter = 0
brand_counter = 0
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
userID, brandID, actionType, month, day = [int(field) for field in row]
if userID not in user_table:
user_table[userID] = user_counter
user_counter += 1
if brandID not in brand_table:
brand_table[brandID] = brand_counter
brand_counter += 1
userID = user_table[userID]
brandID = brand_table[brandID]
if userID not in userInfo:
userInfo[userID] = dict()
user = userInfo[userID]
if brandID not in user:
user[brandID] = []
if month in (4, 5, 6):
day = day - 14
else:
day = day - 15
if day <= 0:
month -= 1
day += 31
band = user[brandID]
band.append((month, day, actionType))
users = dict()
for (userID, info) in userInfo.iteritems():
users[userID] = User(userID, info)
history = dict()
for user_id, user in users.items():
if user_id not in history:
history[user_id] = dict()
for brand_id in user.brands:
if brand_id not in history[user_id]:
history[user.id][brand_id] = \
{ 'view': 0, 'click': 0, 'cart': 0, 'favor': 0, 'buy': 0, 'label': 0 }
history[user_id][brand_id]['view'] = 1 if \
sum([sum(user.data[month][brand_id]) - user.data[month][brand_id][1] \
for month in \
xrange(BASE_MONTH, BASE_MONTH + N_MONTH - 1)
if month in user.data and \
brand_id in user.data[month]]) > 0 \
else 0
history[user_id][brand_id]['click'] = \
sum([user.data[month][brand_id][CLICK_TAG] \
for month in \
xrange(BASE_MONTH + N_MONTH - 2, BASE_MONTH + N_MONTH - 1)
if month in user.data and \
brand_id in user.data[month]])
history[user_id][brand_id]['cart'] = 1 if \
sum([user.data[month][brand_id][CART_TAG] \
for month in \
xrange(BASE_MONTH, BASE_MONTH + N_MONTH - 1)
if month in user.data and \
brand_id in user.data[month]]) > 0 \
else 0
history[user_id][brand_id]['favor'] = 1 if \
sum([user.data[month][brand_id][FAVOR_TAG] \
for month in \
xrange(BASE_MONTH, BASE_MONTH + N_MONTH - 1)
if month in user.data and \
brand_id in user.data[month]]) > 0 \
else 0
history[user.id][brand_id]['buy'] = 1 if \
sum([user.data[month][brand_id][1] for month in \
xrange(BASE_MONTH, BASE_MONTH + N_MONTH - 1)
if month in user.data and \
brand_id in user.data[month]]) > 0 \
else 0
history[user.id][brand_id]['label'] = \
user.data[BASE_MONTH + N_MONTH - 1][brand_id][1] \
if BASE_MONTH + N_MONTH - 1 in user.data and \
brand_id in user.data[BASE_MONTH + N_MONTH - 1] \
else 0
view_before_pos = [(user_id, brand_id) for user_id, brands in history.items() \
for brand_id, counter in brands.items() \
if counter['label'] > 0 and counter['view'] > 0 and counter['buy'] < 1]
view_before_neg = [(user_id, brand_id) for user_id, brands in history.items() \
for brand_id, counter in brands.items() \
if counter['label'] < 1 and counter['view'] > 0 and counter['buy'] < 1]
click_before_pos = [(user_id, brand_id, counter['click']) for user_id, brands in history.items() \
for brand_id, counter in brands.items() \
if counter['label'] > 0 and counter['click'] > 0 and counter['buy'] < 1]
click_before_neg = [(user_id, brand_id, counter['click']) for user_id, brands in history.items() \
for brand_id, counter in brands.items() \
if counter['label'] < 1 and counter['click'] > 0 and counter['buy'] < 1]
cart_before_pos = [(user_id, brand_id) for user_id, brands in history.items() \
for brand_id, counter in brands.items() \
if counter['label'] > 0 and counter['cart'] > 0 and counter['buy'] < 1]
cart_before_neg = [(user_id, brand_id) for user_id, brands in history.items() \
for brand_id, counter in brands.items() \
if counter['label'] < 1 and counter['cart'] > 0 and counter['buy'] < 1]
favor_before_pos = [(user_id, brand_id) for user_id, brands in history.items() \
for brand_id, counter in brands.items() \
if counter['label'] > 0 and counter['favor'] > 0 and counter['buy'] < 1]
favor_before_neg = [(user_id, brand_id) for user_id, brands in history.items() \
for brand_id, counter in brands.items() \
if counter['label'] < 1 and counter['favor'] > 0 and counter['buy'] < 1]
buy_before = [(user_id, brand_id) for user_id, brands in history.items() \
for brand_id, counter in brands.items() \
if counter['label'] > 0 and counter['buy'] > 0]
buy_total = [(user_id, brand_id) for user_id, brands in history.items() \
for brand_id, counter in brands.items() \
if counter['label'] > 0]
print Counter([times for _, _, times in click_before_pos])
print Counter([times for _, _, times in click_before_neg])
print 'View Not Buy before: ', len(view_before_pos), len(view_before_neg), len(buy_total), \
'{:.2f}%'.format(float(len(view_before_pos)) / len(buy_total) * 100)
print 'Click Not Buy before: ', len(click_before_pos), len(click_before_neg), len(buy_total), \
'{:.2f}%'.format(float(len(click_before_pos)) / len(buy_total) * 100)
print 'Cart Not Buy before: ', len(cart_before_pos), len(cart_before_neg), len(buy_total), \
'{:.2f}%'.format(float(len(cart_before_pos)) / len(buy_total) * 100)
print 'Favor Not Buy before: ', len(favor_before_pos), len(favor_before_neg), len(buy_total), \
'{:.2f}%'.format(float(len(favor_before_pos)) / len(buy_total) * 100)
print 'Buy before: ', len(buy_before), len(buy_total), \
'{:.2f}%'.format(float(len(buy_before)) / len(buy_total) * 100)
print ''
view_before = view_before_pos + view_before_neg
k_fold = cross_validation.KFold(len(view_before), n_folds=5, shuffle=True)
data = []
label = []
for user_id, brand_id in view_before:
user = users[user_id]
data.append(user.data_per_day(brand_id) +
user.data_per_week(brand_id) +
user.data_per_month(brand_id) +
user.data_per_day(brand_id, n_day=10) +
user.data_per_day(brand_id, n_day=15))
label.append(1 if (user_id, brand_id) in view_before_pos else 0)
data = numpy.asarray(data)
label = numpy.asarray(label)
"""
for train_index, test_index in k_fold:
logistic = linear_model.LogisticRegression(class_weight='auto')
logistic.fit(data[train_index], label[train_index])
print 'Training: ', sum(label[train_index]), '/', len(label)
print 'Validation: ', sum(label[test_index]), '/', len(label)
print '-------------------------------------------------------------'
pos_idx = [idx for idx, tag in enumerate(label[train_index]) if tag > 0]
neg_idx = [idx for idx, tag in enumerate(label[train_index]) if tag < 1]
predict = logistic.predict(data[train_index])
pos2neg = [(a, b) for a, b in zip(label[train_index][pos_idx], predict[pos_idx]) if a != b]
neg2pos = [(a, b) for a, b in zip(label[train_index][neg_idx], predict[neg_idx]) if a != b]
error = numpy.sum(numpy.absolute(predict - label[train_index]))
print 'Training:', error, len(train_index), \
'{:.2f}%'.format(float(error) / len(train_index) * 100)
print 'Pos2neg: ', len(pos2neg), ' ', 'Neg2pos: ', len(neg2pos)
print '-------------------------------------------------------------'
pos_idx = [idx for idx, tag in enumerate(label[test_index]) if tag > 0]
neg_idx = [idx for idx, tag in enumerate(label[test_index]) if tag < 1]
predict = logistic.predict(data[test_index])
pos2neg = [(a, b) for a, b in zip(label[test_index][pos_idx], predict[pos_idx]) if a != b]
neg2pos = [(a, b) for a, b in zip(label[test_index][neg_idx], predict[neg_idx]) if a != b]
error = numpy.sum(numpy.absolute(predict - label[test_index]))
print 'Validation:', error, len(test_index), \
'{:.2f}%'.format(float(error) / len(test_index) * 100)
print 'Pos2neg: ', len(pos2neg), ' ', 'Neg2pos: ', len(neg2pos)
print ''
"""
# Load Data
# Models we will use
logistic = linear_model.LogisticRegression(class_weight='auto')
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
# logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(data, label)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(class_weight='auto')
logistic_classifier.fit(data, label)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
label,
classifier.predict(data))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
label,
logistic_classifier.predict(data))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| mit |
MechCoder/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 104 | 3139 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the mean and std for each candidate along with the parameter
# settings for all the candidates explored by grid search.
n_candidates = len(grid_search.cv_results_['params'])
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.cv_results_['params'][i],
grid_search.cv_results_['mean_test_score'][i],
grid_search.cv_results_['std_test_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
aggle/ccd-exposure-time-calculator | guiccdnoise_grid.py | 1 | 11140 | #!/usr/bin/env python
"""
Observational SNR and exposure time calculator
Jonathan Aguilar
Nov. 15, 2013
license
----------
Copyright 2014 Jonathan A. Aguilar (JHU)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
usage
----------
- Run from ipython with %run guiccdnoise.py or from command line with ./guiccdnoise.py
- Click on the sliders to set the parameter values.
to-do
----------
- Make the display as useful as the matplotlib version
"""
import warnings
import sys
import matplotlib as mpl
try:
mpl.use('TkAgg')
except UserWarning:
pass
if sys.version_info < (3,0):
import Tkinter as Tk
import Tkconstants
else:
import tkinter as Tk
import tkinter.constants as Tkconstants
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.widgets import Slider, Button#, RadioButtons
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import numpy as np
# some text formatting
mpl.rcParams["font.size"]=16
mpl.rcParams["text.color"]='white'
mpl.rcParams["axes.labelcolor"]='white'
mpl.rcParams["xtick.color"]='white'
mpl.rcParams["ytick.color"]='white'
dt = np.dtype([("flux",'f8'),("label","S20")])
# values for the sliders
xlims = (np.log10(1e-3), np.log10(1e10))
middle = np.mean(xlims)
targetcolor='blue'
bgndcolor='red'
telescopecolor='green'
sliderlength=400
sliderres=0.01
### Signal-to-Noise
def calcSNR(sig=1,bgnd=0,readnoise=0,Idc=0,npix=1,time=1):
"""
The number of pixels is already taken into account
if you pull values from the bar chart, so leave it at 1
"""
snr = []
time = np.asarray(time)
warnings.filterwarnings("error")
try:
snr = [sig*t/np.sqrt( (sig+bgnd*npix+Idc*npix)*t+(readnoise**2)*npix ) for t in time]
except RuntimeWarning:
print "Error: cannot handle time = 0. Try again."
return np.array(snr)
### Cosmics
def calcCosmicHits(time, pixel_size, ccd_size, cosmics,
psfs_on_target, pixels_per_psf):
"""
The probability of losing a pixel to cosmics
"""
time = np.asarray(time)
ccdflux = (pixel_size.get()*ccd_size.get()*1e-4)**2 * 10**cosmics.get() # flux through ccd
npix = psfs_on_target.get()*pixels_per_psf.get()
probPerPix = (1-1./(ccd_size.get()**2))**(ccdflux*time)
probPerTargetPix = probPerPix**npix
return (probPerPix,probPerTargetPix)
### GUI
class ccd_gui(Tk.Tk):
def __init__(self, parent):
Tk.Tk.__init__(self, parent)
self.parent = parent
self.initialize()
self.initialize_target_options()
self.initialize_background_options()
self.initialize_telescope_options()
self.initialize_signals_dict()
self.initialize_snr_figure()
self.initialize_signals_figure()
self.initialize_layout()
def initialize(self):
pass
### Sliders ###
def initialize_target_options(self):
self.starget=Tk.Scale(self,
from_=xlims[0],to=xlims[1],
label='Targets [V-band mag]',
orient='horizontal',
troughcolor=targetcolor,
length=sliderlength,
resolution=sliderres,
showvalue=True,
tickinterval=(xlims[1]-xlims[0])/10,
command=self.OnSliderChange)
def initialize_background_options(self):
self.ssky = Tk.Scale(self, from_=xlims[0],to=xlims[1],
label='Sky',
orient='horizontal',
troughcolor=bgndcolor,
length=sliderlength,
resolution=sliderres)
self.sdarkcurrent = Tk.Scale(self, from_=xlims[0],to=xlims[1],
label='Dark current',
orient='horizontal',
troughcolor=bgndcolor,
length=sliderlength,
resolution=sliderres)
self.sreadnoise = Tk.Scale(self, from_=0, to=15,
label='Read noise',
orient='horizontal',
troughcolor=bgndcolor,
length=sliderlength,
resolution=sliderres)
self.scosmics = Tk.Scale(self, from_=-5, to=1,
label='Cosmics',
orient='horizontal',
troughcolor=bgndcolor,
length=sliderlength,
resolution=sliderres)
def initialize_telescope_options(self):
self.saperture=Tk.Scale(self, from_=0,to=10,
label= "Aperture diameter [m]",
orient='horizontal',
troughcolor=telescopecolor,
length=sliderlength,
resolution=sliderres)
self.sbandwidth=Tk.Scale(self, from_=0,to=10,
label= "Bandwidth [microns]",
orient='horizontal',
troughcolor=telescopecolor,
length=sliderlength,
resolution=sliderres)
self.sccdsize=Tk.Scale(self, from_=500, to=5000,
label = "CCD dimension [pixels]",
orient='horizontal',
troughcolor=telescopecolor,
length=sliderlength,
resolution=sliderres)
self.sqe=Tk.Scale(self, from_=0,to=1,
label="Quantum efficiency",
orient='horizontal',
troughcolor=telescopecolor,
length=sliderlength,
resolution=sliderres)
self.spixelsize=Tk.Scale(self, from_=1,to=100,
label="Pixel size [microns]",
orient='horizontal',
troughcolor=telescopecolor,
length=sliderlength,
resolution=sliderres)
self.spixelsperpsf=Tk.Scale(self, from_=1,to=40,
label="Pixels under PSF [#]",
orient='horizontal',
troughcolor=telescopecolor,
length=sliderlength,
resolution=sliderres)
self.spsfsontarget=Tk.Scale(self, from_=1,to=200,
label="Target size [PSFs]",
orient='horizontal',
troughcolor=telescopecolor,
length=sliderlength,
resolution=sliderres)
def initialize_signals_dict(self):
telescope_factor = np.pi*(self.saperture.get()**2)*\
self.sbandwidth.get()*self.sqe.get()
self.signals_dict = {'Target': (10**self.starget.get()) * telescope_factor ,
'Sky': (10**self.ssky.get()) * telescope_factor,
'Cosmics': (10**self.scosmics.get()) *
(self.spixelsize.get())**2 * (1e-4)**2 *
self.spsfsontarget.get()*self.spixelsperpsf.get() *
4.66e6*1/1.12,
'DC': 10**self.sdarkcurrent.get() *
self.spsfsontarget.get()*self.spixelsperpsf.get(),
'RN': self.sreadnoise.get() *
self.spsfsontarget.get() *
self.spixelsperpsf.get()}
def initialize_snr_figure(self):
pass
def initialize_signals_figure(self):
self.f_signals = mpl.figure.Figure()
self.ax_signals = self.f_signals.add_subplot(111)
self.ax_signals.hist(np.random.normal(size=100))
self.canvas_signals = FigureCanvasTkAgg(self.f_signals, master=self)
def initialize_layout(self):
self.grid()
self.grid_columnconfigure(0,weight=1) # resize column 0 when window resized
self.grid_columnconfigure(1,weight=1) # resize column 0 when window resized
# sky
self.starget.grid(column=0,row=0,sticky='EW')
# noise
self.ssky.grid(column=0,row=1,sticky='EW')
self.sdarkcurrent.grid(column=0,row=2,sticky='EW')
self.sreadnoise.grid(column=0,row=3,sticky='EW')
self.scosmics.grid(column=0,row=4,sticky='EW')
# telescope
self.saperture.grid(column=0,row=5,sticky='EW')
self.sbandwidth.grid(column=0,row=6,sticky='EW')
self.sccdsize.grid(column=0,row=7,sticky='EW')
self.sqe.grid(column=0,row=8,sticky='EW')
self.spixelsize.grid(column=0,row=9,sticky='EW')
self.spixelsperpsf.grid(column=0,row=10,sticky='EW')
self.spsfsontarget.grid(column=0,row=11,sticky='EW')
# figures
self.canvas_signals.get_tk_widget().grid(column=1,row=0,
rowspan=8,
sticky='')
self.signalsvals = np.array([
((10**self.starget.get())*(np.pi*self.saperture.get()**2)*self.sbandwidth.get()*self.sqe.get(),"Target"),
((10**self.ssky.get())*(np.pi*self.saperture.get()**2)*self.sbandwidth.get()*self.sqe.get(),"Sky"),
((10**self.scosmics.get()) * (self.spixelsize.get())**2 * (1e-4)**2 * self.spsfsontarget.get()*self.spixelsperpsf.get() * 4.66e6*1/1.12, "Cosmics"),
((10**self.sdarkcurrent.get())*self.spsfsontarget.get()*self.spixelsperpsf.get(),"DC"),
(self.sreadnoise.get()*self.spsfsontarget.get()*self.spixelsperpsf.get(),"RN")
],dt)
### Actions
def OnSliderChange(self, value):
"""
Update the figures using the slider values
"""
pass
if __name__ == "__main__":
gui = ccd_gui(None)
gui.title('CCD Noise!')
gui.mainloop() # start looping, looking for user interaction
| gpl-3.0 |
vermouthmjl/scikit-learn | sklearn/svm/classes.py | 34 | 40599 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while ``"crammer_singer"``
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
``[x, self.intercept_scaling]``,
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set ``epsilon=0``.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
QuantumTechDevStudio/RUDNEVGAUSS | source/visualiser_old.py | 1 | 6109 | import numpy as np
import tensorflow as tf
from matplotlib import pylab as plt
import d1_osc
import ann_constructor
# Класс упрощает визуализацию для ИНС и образов.
class Visualiser(object):
def __init__(self, net, net_output, net_sum, M):
self.net = net
self.net_output = net_output
self.net_sum = net_sum
first_deriative = tf.gradients(self.net_output, net.x)[0]
self.net_images = (-(tf.gradients(first_deriative, net.x)[0]) + tf.multiply(tf.square(net.x), net.output))
self.net_images_sum = tf.reduce_sum(input_tensor = self.net_images, axis = 0)
self.M = M
def show_outputs_sum(self, x):
y = self.__net_sum_value(x)
plt.title('Output:')
plt.grid(True)
plt.plot(x[0], y)
def show_outputs(self, x):
y = self.net.run(x)
plt.title('Outputs: ')
plt.grid(True)
for i in range(self.M):
net_i = y[i,:]
plt.plot(x[0], net_i)
def show_images(self, x):
y = self.__net_images_value(x)
plt.title('Images:')
plt.grid(True)
for i in range(M):
net_image_i = y[i,:]
plt.plot(x[0], net_image_i)
def show_images_sum(self, x):
y = self.__net_images_sum_value(x)
plt.title('Output:')
plt.grid(True)
plt.plot(x[0], y)
def plot_four(self, x):
y1 = self.net.run(x)
y2 = self.__net_images_value(x)
y3 = self.__net_sum_value(x)
y4 = self.__net_images_sum_value(x)
fig = plt.figure()
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1 = plt.subplot(221)
ax1.set_title('Sum of original net outputs:')
ax1.plot(x[0], y3, 'x')
ax2 = plt.subplot(222)
ax2.set_title('Sum of net images:')
ax2.plot(x[0], y4, 'x')
ax3 = plt.subplot(223)
ax3.set_title('Original net outputs:')
for i in range(self.M):
net_i = y1[i,:]
ax3.plot(x[0], net_i)
ax4 = plt.subplot(224)
ax4.set_title('Image of net outputs:')
for i in range(self.M):
net_image_i = y2[i,:]
ax4.plot(x[0], net_image_i)
fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)
def __net_outs_value(self, x):
return self.net.calc(self.net_output, {self.net.x : x})
def __net_sum_value(self, x):
return self.net.calc(self.net_sum, {self.net.x : x})
def __net_images_value(self, x):
return self.net.calc(self.net_images, {self.net.x: x})
def __net_images_sum_value(self, x):
return self.net.calc(self.net_images_sum, {self.net.x: x})
@staticmethod
def show_wf(n, x):
plt.title('Wave function:')
plt.grid(True)
plt.plot(x[0], d1_osc.wf(n,x), "r--")
@staticmethod
def show_wf_system(n_max, x):
plt.title('Wavefunctions system:')
plt.grid(True)
for i in range(n_max+1):
plt.plot(x[0], d1_osc.wf(i,x))
# Класс упрощает визуализацию для ИНС и образов.
class Visualiser2(object):
def __init__(self, net, trial_func, M):
self.net = net
self.trial_func = trial_func
self.func_sum = tf.reduce_sum(input_tensor = self.trial_func, axis=0)
first_deriative = tf.gradients(self.trial_func, net.x)[0]
self.func_images = (-(tf.gradients(first_deriative, net.x)[0]) + tf.multiply(tf.square(net.x), self.trial_func))
self.func_images_sum = tf.reduce_sum(input_tensor = self.func_images, axis = 0)
self.M = M
def show_func_sum(self, x):
y = self.__func_sum_value(x)
plt.title('Output:')
plt.grid(True)
plt.plot(x[0], y)
def show_funcs(self, x):
y = self.__funcs_value(x)
plt.title('Outputs: ')
plt.grid(True)
for i in range(self.M):
func_i = y[i,:]
plt.plot(x[0], func_i)
def show_images(self, x):
y = self.__images_value(x)
plt.title('Images:')
plt.grid(True)
for i in range(M):
func_image_i = y[i,:]
plt.plot(x[0], func_image_i)
def show_images_sum(self, x):
y = self.__images_sum_value(x)
plt.title('Output:')
plt.grid(True)
plt.plot(x[0], y)
def plot_four(self, x):
y1 = self.__funcs_value(x)
y2 = self.__images_value(x)
y3 = self.__func_sum_value(x)
y4 = self.__images_sum_value(x)
fig = plt.figure()
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1 = plt.subplot(221)
ax1.set_title('Sum of original net outputs:')
ax1.plot(x[0], y3, 'x')
ax2 = plt.subplot(222)
ax2.set_title('Sum of net images:')
ax2.plot(x[0], y4, 'x')
ax3 = plt.subplot(223)
ax3.set_title('Original net outputs:')
for i in range(self.M):
func_i = y1[i,:]
ax3.plot(x[0], func_i)
ax4 = plt.subplot(224)
ax4.set_title('Image of net outputs:')
for i in range(self.M):
image_i = y2[i,:]
ax4.plot(x[0], image_i)
fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)
def __funcs_value(self, x):
return self.net.calc(self.trial_func, {self.net.x : x})
def __func_sum_value(self, x):
return self.net.calc(self.func_sum, {self.net.x : x})
def __images_value(self, x):
return self.net.calc(self.func_images, {self.net.x: x})
def __images_sum_value(self, x):
return self.net.calc(self.func_images_sum, {self.net.x: x})
@staticmethod
def show_wf(n, x):
plt.title('Wave function:')
plt.grid(True)
plt.plot(x[0], d1_osc.wf(n,x), "r--")
@staticmethod
def show_wf_system(n_max, x):
plt.title('Wavefunctions system:')
plt.grid(True)
for i in range(n_max+1):
plt.plot(x[0], d1_osc.wf(i,x))
| gpl-3.0 |
ajamesl/VectorTarget | ISStest.py | 1 | 6223 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
from Basis import *
import numpy as np
import time
import csv
import datetime
import math
import geocoder
import serial
import sys
# Class defining x, y, z vectors and the vector arrow-head appearance/size
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def ECI_Conversion(lat, lon, el):
now = datetime.datetime.utcnow()
STt0 = SidTime(JulianDay(now.year, now.month, now.day), lon, now.hour,
now.minute, now.second)
return Geocentric(lat, STt0, el)
#####################################################
# Get GPS location of radar, convert to ECI
# Read data from radar, vector to target and distance
# Convert data to find ECI of target and elevation
#####################################################
# Reads the number of lines(data points) in the file
with open('ISSGPS.txt') as f:
data_points = sum(1 for line in f)
# Reading longitude and latitude from file
Lat = []
Long = []
with open('ISSGPS.txt', 'r') as csvfile:
coords = csv.reader(csvfile, delimiter=',')
for row in coords:
Lat.append(float(row[0]))
Long.append(float(row[1]))
# In km
Elev = 402.450
GS0 = ECI_Conversion(Lat[0], Long[0], Elev)
xd = GS0[0]
yd = GS0[1]
zd = GS0[2]
Drone = np.array([xd, yd, zd])
######################################################
# Read latitude and longitude of ground station from GPS
ser = serial.Serial('/dev/ttyUSB1', 4800, timeout = 5)
while True:
line = ser.readline()
splitline = line.split(",")
if splitline[0] == '$GPGGA':
latitude = splitline[2]
latDirec = splitline[3]
longitude = splitline[4]
longDirec = splitline[5]
print line
break
# Exits programme if GPS device is not reading/connecting to satellites
if latitude == '' or longitude == '':
print "GPS device is not reading the ground station's location."
sys.exit()
else:
if latDirec == 'N':
Ground_Lat = float(latitude)/100.0
else:
Ground_Lat = -float(latitude)/100.0
if longDirec == 'E':
Ground_Long = float(longitude)/100.0
else:
Ground_Long = -float(longitude)/100.0
# Get elevation from geocoder
g = geocoder.elevation([Ground_Lat, Ground_Long])
Ground_Elev = float(g.meters/1000.0)
print 'Elevation: ' + str(Ground_Elev)
##################################################
# Convert from ECI to Az-El coordinates
GS1 = ECI_Conversion(Ground_Lat, Ground_Long, Ground_Elev)
xo = GS1[0]
yo = GS1[1]
zo = GS1[2]
Ground = np.array([xo, yo, zo])
now = datetime.datetime.utcnow()
STt0 = SidTime(JulianDay(now.year, now.month, now.day), Ground_Long, now.hour,
now.minute, now.second)
Top = Topocentric(Ground, Drone, Ground_Lat, STt0)
AzEl = Angles(Top)
#####################################################
# Defines figure as 3D and interactive
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.ion()
# Defines a sphere mesh to model the Earth with radius Re
u = np.linspace(0, 2 * np.pi, 30)
v = np.linspace(0, np.pi, 18)
xs = 6378.135 * np.outer(np.cos(u), np.sin(v))
ys = 6378.135 * np.outer(np.sin(u), np.sin(v))
zs = 6378.135 * np.outer(np.ones(np.size(u)), np.cos(v))
c = ax.plot_surface(xs, ys, zs, rstride=1, cstride=1, color='w', shade=0)
ax.add_artist(c)
# Axes ranges & labels
ax.set_xlim([-6500, 6500])
ax.set_ylim([-6500, 6500])
ax.set_zlim([-6500, 6500])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Definition of the vector from the station to the target
x = [xo, xd]
y = [yo, yd]
z = [zo, zd]
# Define and draw vector on figure
a = Arrow3D(x, y, z, mutation_scale=20, lw=1, arrowstyle="->",
color="r")
ax.add_artist(a)
if AzEl[1] < -5.0:
print "Target is currently below the horizon."
i = 1
# Loop updating vector from station to target
while True:
a.remove()
GS0 = ECI_Conversion(Lat[0 + i], Long[0 + i], Elev)
xd = GS0[0]
yd = GS0[1]
zd = GS0[2]
Drone = np.array([xd, yd, zd])
Top = Topocentric(Ground, Drone, Ground_Lat, STt0)
AzEl = Angles(Top)
x = [xo, xd]
y = [yo, yd]
z = [zo, zd]
a = Arrow3D(x, y, z, mutation_scale=20, lw=1, arrowstyle="->",
color="r")
ax.add_artist(a)
plt.pause(0.05)
if i < data_points - 1:
i += 1
if AzEl[1] < -5.0:
print "Target is below the horizon."
a.remove()
# Loop updating position of target to check if it is above the
# ground station's horizon
while True:
if AzEl[1] < -5.0:
GS0 = ECI_Conversion(Lat[0 + i], Long[0 + i], Elev)
xd = GS0[0]
yd = GS0[1]
zd = GS0[2]
Drone = np.array([xd, yd, zd])
Top = Topocentric(Ground, Drone, Ground_Lat, STt0)
AzEl = Angles(Top)
plt.pause(0.05)
if i < data_points - 1:
i += 1
else:
GS0 = ECI_Conversion(Lat[0 + i], Long[0 + i], Elev)
xd = GS0[0]
yd = GS0[1]
zd = GS0[2]
Drone = np.array([xd, yd, zd])
Top = Topocentric(Ground, Drone, Ground_Lat, STt0)
AzEl = Angles(Top)
x = [xo, xd]
y = [yo, yd]
z = [zo, zd]
a = Arrow3D(x, y, z, mutation_scale=20, lw=1, arrowstyle="->",
color="r")
ax.add_artist(a)
plt.pause(0.05)
if i < data_points - 1:
i += 1
a.remove()
# Keeps figure open
while True:
plt.pause(0.05)
| mit |
alvarofierroclavero/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
muthu1993/InteliEQ | audioAnalysis.py | 1 | 25640 | #!/usr/bin/env python2.7
import argparse
import os
import audioop
import numpy
import glob
import scipy
import subprocess
import wave
import cPickle
import threading
import shutil
import ntpath
import matplotlib.pyplot as plt
import audioFeatureExtraction as aF
import audioTrainTest as aT
import audioSegmentation as aS
import audioVisualization as aV
import audioBasicIO
import utilities as uT
import scipy.io.wavfile as wavfile
import matplotlib.patches
def dirMp3toWavWrapper(directory, samplerate, channels):
if not os.path.isdir(directory):
raise Exception("Input path not found!")
useMp3TagsAsNames = True
audioBasicIO.convertDirMP3ToWav(directory, samplerate, channels, useMp3TagsAsNames)
def dirWAVChangeFs(directory, samplerate, channels):
if not os.path.isdir(directory):
raise Exception("Input path not found!")
audioBasicIO.convertFsDirWavToWav(directory, samplerate, channels)
def featureExtractionFileWrapper(wavFileName, outFile, mtWin, mtStep, stWin, stStep):
if not os.path.isfile(wavFileName):
raise Exception("Input audio file not found!")
aF.mtFeatureExtractionToFile(wavFileName, mtWin, mtStep, stWin, stStep, outFile, True, True, True)
def beatExtractionWrapper(wavFileName, plot):
if not os.path.isfile(wavFileName):
raise Exception("Input audio file not found!")
[Fs, x] = audioBasicIO.readAudioFile(wavFileName)
F = aF.stFeatureExtraction(x, Fs, 0.050 * Fs, 0.050 * Fs)
BPM, ratio = aF.beatExtraction(F, 0.050, plot)
print "Beat: {0:d} bpm ".format(int(BPM))
print "Ratio: {0:.2f} ".format(ratio)
def featureExtractionDirWrapper(directory, mtWin, mtStep, stWin, stStep):
if not os.path.isdir(directory):
raise Exception("Input path not found!")
aF.mtFeatureExtractionToFileDir(directory, mtWin, mtStep, stWin, stStep, True, True, True)
def featureVisualizationDirWrapper(directory):
if not os.path.isdir(directory):
raise Exception("Input folder not found!")
aV.visualizeFeaturesFolder(directory, "pca", "")
#aV.visualizeFeaturesFolder(directory, "lda", "artist")
def fileSpectrogramWrapper(wavFileName):
if not os.path.isfile(wavFileName):
raise Exception("Input audio file not found!")
[Fs, x] = audioBasicIO.readAudioFile(wavFileName)
x = audioBasicIO.stereo2mono(x)
specgram, TimeAxis, FreqAxis = aF.stSpectogram(x, Fs, round(Fs * 0.040), round(Fs * 0.040), True)
def fileChromagramWrapper(wavFileName):
if not os.path.isfile(wavFileName):
raise Exception("Input audio file not found!")
[Fs, x] = audioBasicIO.readAudioFile(wavFileName)
x = audioBasicIO.stereo2mono(x)
specgram, TimeAxis, FreqAxis = aF.stChromagram(x, Fs, round(Fs * 0.040), round(Fs * 0.040), True)
def trainClassifierWrapper(method, beatFeatures, directories, modelName):
if len(directories) < 2:
raise Exception("At least 2 directories are needed")
aT.featureAndTrain(directories, 1, 1, aT.shortTermWindow, aT.shortTermStep,
method.lower(), modelName, computeBEAT=beatFeatures)
def trainRegressionWrapper(method, beatFeatures, dirName, modelName):
aT.featureAndTrainRegression(dirName, 1, 1, aT.shortTermWindow, aT.shortTermStep,
method.lower(), modelName, computeBEAT=beatFeatures)
def classifyFileWrapper(inputFile, modelType, modelName):
if not os.path.isfile(modelName):
raise Exception("Input modelName not found!")
if not os.path.isfile(inputFile):
raise Exception("Input audio file not found!")
[Result, P, classNames] = aT.fileClassification(inputFile, modelName, modelType)
print "{0:s}\t{1:s}".format("Class", "Probability")
for i, c in enumerate(classNames):
print "{0:s}\t{1:.2f}".format(c, P[i])
print "Winner class: " + classNames[int(Result)]
def regressionFileWrapper(inputFile, modelType, modelName):
if not os.path.isfile(inputFile):
raise Exception("Input audio file not found!")
R, regressionNames = aT.fileRegression(inputFile, modelName, modelType)
for i in range(len(R)):
print "{0:s}\t{1:.3f}".format(regressionNames[i], R[i])
def classifyFolderWrapper(inputFolder, modelType, modelName, outputMode=False):
if not os.path.isfile(modelName):
raise Exception("Input modelName not found!")
files = "*.wav"
if os.path.isdir(inputFolder):
strFilePattern = os.path.join(inputFolder, files)
else:
strFilePattern = inputFolder + files
wavFilesList = []
wavFilesList.extend(glob.glob(strFilePattern))
wavFilesList = sorted(wavFilesList)
if len(wavFilesList) == 0:
print "No WAV files found!"
return
Results = []
for wavFile in wavFilesList:
[Result, P, classNames] = aT.fileClassification(wavFile, modelName, modelType)
Result = int(Result)
Results.append(Result)
if outputMode:
print "{0:s}\t{1:s}".format(wavFile, classNames[Result])
Results = numpy.array(Results)
# print distribution of classes:
[Histogram, _] = numpy.histogram(Results, bins=numpy.arange(len(classNames) + 1))
for i, h in enumerate(Histogram):
print "{0:20s}\t\t{1:d}".format(classNames[i], h)
def regressionFolderWrapper(inputFolder, modelType, modelName):
files = "*.wav"
if os.path.isdir(inputFolder):
strFilePattern = os.path.join(inputFolder, files)
else:
strFilePattern = inputFolder + files
wavFilesList = []
wavFilesList.extend(glob.glob(strFilePattern))
wavFilesList = sorted(wavFilesList)
if len(wavFilesList) == 0:
print "No WAV files found!"
return
Results = []
for wavFile in wavFilesList:
R, regressionNames = aT.fileRegression(wavFile, modelName, modelType)
Results.append(R)
Results = numpy.array(Results)
for i, r in enumerate(regressionNames):
[Histogram, bins] = numpy.histogram(Results[:, i])
centers = (bins[0:-1] + bins[1::]) / 2.0
plt.subplot(len(regressionNames), 1, i + 1)
plt.plot(centers, Histogram)
plt.title(r)
plt.show()
def trainHMMsegmenter_fromfile(wavFile, gtFile, hmmModelName, mtWin, mtStep):
if not os.path.isfile(wavFile):
print "Error: wavfile does not exist!"
return
if not os.path.isfile(gtFile):
print "Error: groundtruth does not exist!"
return
aS.trainHMM_fromFile(wavFile, gtFile, hmmModelName, mtWin, mtStep)
def trainHMMsegmenter_fromdir(directory, hmmModelName, mtWin, mtStep):
if not os.path.isdir(directory):
raise Exception("Input folder not found!")
aS.trainHMM_fromDir(directory, hmmModelName, mtWin, mtStep)
def segmentclassifyFileWrapper(inputWavFile, modelName, modelType):
if not os.path.isfile(modelName):
raise Exception("Input modelName not found!")
if not os.path.isfile(inputWavFile):
raise Exception("Input audio file not found!")
gtFile = inputWavFile.replace(".wav", ".segments")
aS.mtFileClassification(inputWavFile, modelName, modelType, True, gtFile)
def segmentclassifyFileWrapperHMM(wavFile, hmmModelName):
gtFile = wavFile.replace(".wav", ".segments")
aS.hmmSegmentation(wavFile, hmmModelName, PLOT=True, gtFileName=gtFile)
def segmentationEvaluation(dirName, modelName, methodName):
aS.evaluateSegmentationClassificationDir(dirName, modelName, methodName)
def silenceRemovalWrapper(inputFile, smoothingWindow, weight):
if not os.path.isfile(inputFile):
raise Exception("Input audio file not found!")
[Fs, x] = audioBasicIO.readAudioFile(inputFile) # read audio signal
segmentLimits = aS.silenceRemoval(x, Fs, 0.05, 0.05, smoothingWindow, weight, True) # get onsets
for i, s in enumerate(segmentLimits):
strOut = "{0:s}_{1:.3f}-{2:.3f}.wav".format(inputFile[0:-4], s[0], s[1])
wavfile.write(strOut, Fs, x[int(Fs * s[0]):int(Fs * s[1])])
def speakerDiarizationWrapper(inputFile, numSpeakers, useLDA):
if useLDA:
aS.speakerDiarization(inputFile, numSpeakers, PLOT=True)
else:
aS.speakerDiarization(inputFile, numSpeakers, LDAdim=0, PLOT=True)
def thumbnailWrapper(inputFile, thumbnailWrapperSize):
stWindow = 1.0
stStep = 1.0
if not os.path.isfile(inputFile):
raise Exception("Input audio file not found!")
[Fs, x] = audioBasicIO.readAudioFile(inputFile) # read file
if Fs == -1: # could not read file
return
[A1, A2, B1, B2, Smatrix] = aS.musicThumbnailing(x, Fs, stWindow, stStep, thumbnailWrapperSize) # find thumbnailWrapper endpoints
# write thumbnailWrappers to WAV files:
thumbnailWrapperFileName1 = inputFile.replace(".wav", "_thumb1.wav")
thumbnailWrapperFileName2 = inputFile.replace(".wav", "_thumb2.wav")
wavfile.write(thumbnailWrapperFileName1, Fs, x[int(Fs * A1):int(Fs * A2)])
wavfile.write(thumbnailWrapperFileName2, Fs, x[int(Fs * B1):int(Fs * B2)])
print "1st thumbnailWrapper (stored in file {0:s}): {1:4.1f}sec -- {2:4.1f}sec".format(thumbnailWrapperFileName1, A1, A2)
print "2nd thumbnailWrapper (stored in file {0:s}): {1:4.1f}sec -- {2:4.1f}sec".format(thumbnailWrapperFileName2, B1, B2)
# Plot self-similarity matrix:
fig = plt.figure()
ax = fig.add_subplot(111, aspect="auto")
plt.imshow(Smatrix)
# Plot best-similarity diagonal:
Xcenter = (A1 / stStep + A2 / stStep) / 2.0
Ycenter = (B1 / stStep + B2 / stStep) / 2.0
e1 = matplotlib.patches.Ellipse((Ycenter, Xcenter), thumbnailWrapperSize * 1.4, 3, angle=45, linewidth=3, fill=False)
ax.add_patch(e1)
plt.plot([B1, Smatrix.shape[0]], [A1, A1], color="k", linestyle="--", linewidth=2)
plt.plot([B2, Smatrix.shape[0]], [A2, A2], color="k", linestyle="--", linewidth=2)
plt.plot([B1, B1], [A1, Smatrix.shape[0]], color="k", linestyle="--", linewidth=2)
plt.plot([B2, B2], [A2, Smatrix.shape[0]], color="k", linestyle="--", linewidth=2)
plt.xlim([0, Smatrix.shape[0]])
plt.ylim([Smatrix.shape[1], 0])
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
plt.xlabel("frame no")
plt.ylabel("frame no")
plt.title("Self-similarity matrix")
plt.show()
def parse_arguments():
parser = argparse.ArgumentParser(description="A demonstration script for pyAudioAnalysis library")
tasks = parser.add_subparsers(
title="subcommands", description="available tasks", dest="task", metavar="")
dirMp3Wav = tasks.add_parser("dirMp3toWav", help="Convert .mp3 files in a directory to .wav format")
dirMp3Wav.add_argument("-i", "--input", required=True, help="Input folder")
dirMp3Wav.add_argument("-r", "--rate", type=int, choices=[8000, 16000, 32000, 44100],
required=True, help="Samplerate of generated WAV files")
dirMp3Wav.add_argument("-c", "--channels", type=int, choices=[1, 2],
required=True, help="Audio channels of generated WAV files")
dirWavRes = tasks.add_parser("dirWavResample", help="Change samplerate of .wav files in a directory")
dirWavRes.add_argument("-i", "--input", required=True, help="Input folder")
dirWavRes.add_argument("-r", "--rate", type=int, choices=[8000, 16000, 32000, 44100],
required=True, help="Samplerate of generated WAV files")
dirWavRes.add_argument("-c", "--channels", type=int, choices=[1, 2],
required=True, help="Audio channels of generated WAV files")
featExt = tasks.add_parser("featureExtractionFile", help="Extract audio features from file")
featExt.add_argument("-i", "--input", required=True, help="Input audio file")
featExt.add_argument("-o", "--output", required=True, help="Output file")
featExt.add_argument("-mw", "--mtwin", type=float, required=True, help="Mid-term window size")
featExt.add_argument("-ms", "--mtstep", type=float, required=True, help="Mid-term window step")
featExt.add_argument("-sw", "--stwin", type=float, default=0.050, help="Short-term window size")
featExt.add_argument("-ss", "--ststep", type=float, default=0.050, help="Short-term window step")
beat = tasks.add_parser("beatExtraction", help="Compute beat features of an audio file")
beat.add_argument("-i", "--input", required=True, help="Input audio file")
beat.add_argument("--plot", action="store_true", help="Generate plot")
featExtDir = tasks.add_parser("featureExtractionDir", help="Extract audio features from files in a folder")
featExtDir.add_argument("-i", "--input", required=True, help="Input directory")
featExtDir.add_argument("-mw", "--mtwin", type=float, required=True, help="Mid-term window size")
featExtDir.add_argument("-ms", "--mtstep", type=float, required=True, help="Mid-term window step")
featExtDir.add_argument("-sw", "--stwin", type=float, default=0.050, help="Short-term window size")
featExtDir.add_argument("-ss", "--ststep", type=float, default=0.050, help="Short-term window step")
featVis = tasks.add_parser("featureVisualization")
featVis.add_argument("-i", "--input", required=True, help="Input directory")
spectro = tasks.add_parser("fileSpectrogram")
spectro.add_argument("-i", "--input", required=True, help="Input audio file")
chroma = tasks.add_parser("fileChromagram")
chroma.add_argument("-i", "--input", required=True, help="Input audio file")
trainClass = tasks.add_parser("trainClassifier", help="Train an SVM or KNN classifier")
trainClass.add_argument("-i", "--input", nargs="+", required=True, help="Input directories")
trainClass.add_argument("--method", choices=["svm", "knn", "randomforest","gradientboosting","extratrees"], required=True, help="Classifier type")
trainClass.add_argument("--beat", action="store_true", help="Compute beat features")
trainClass.add_argument("-o", "--output", required=True, help="Generated classifier filename")
trainReg = tasks.add_parser("trainRegression")
trainReg.add_argument("-i", "--input", required=True, help="Input directory")
trainReg.add_argument("--method", choices=["svm", "knn"], required=True, help="Classifier type")
trainReg.add_argument("--beat", action="store_true", help="Compute beat features")
trainReg.add_argument("-o", "--output", required=True, help="Generated classifier filename")
classFile = tasks.add_parser("classifyFile", help="Classify a file using an existing classifier")
classFile.add_argument("-i", "--input", required=True, help="Input audio file")
classFile.add_argument("--model", choices=["svm", "knn", "randomforest","gradientboosting", "extratrees"], required=True, help="Classifier type (svm or knn or randomforest or gradientboosting or extratrees)")
classFile.add_argument("--classifier", required=True, help="Classifier to use (path)")
trainHMM = tasks.add_parser("trainHMMsegmenter_fromfile", help="Train an HMM from file + annotation data")
trainHMM.add_argument("-i", "--input", required=True, help="Input audio file")
trainHMM.add_argument("--ground", required=True, help="Ground truth path (segments CSV file)")
trainHMM.add_argument("-o", "--output", required=True, help="HMM model name (path)")
trainHMM.add_argument("-mw", "--mtwin", type=float, required=True, help="Mid-term window size")
trainHMM.add_argument("-ms", "--mtstep", type=float, required=True, help="Mid-term window step")
trainHMMDir = tasks.add_parser("trainHMMsegmenter_fromdir", help="Train an HMM from file + annotation data stored in a directory (batch)")
trainHMMDir.add_argument("-i", "--input", required=True, help="Input audio folder")
trainHMMDir.add_argument("-o", "--output", required=True, help="HMM model name (path)")
trainHMMDir.add_argument("-mw", "--mtwin", type=float, required=True, help="Mid-term window size")
trainHMMDir.add_argument("-ms", "--mtstep", type=float, required=True, help="Mid-term window step")
segmentClassifyFile = tasks.add_parser("segmentClassifyFile", help="Segmentation - classification of a WAV file given a trained SVM or kNN")
segmentClassifyFile.add_argument("-i", "--input", required=True, help="Input audio file")
segmentClassifyFile.add_argument("--model", choices=["svm", "knn", "randomforest","gradientboosting","extratrees"], required=True, help="Model type")
segmentClassifyFile.add_argument("--modelName", required=True, help="Model path")
segmentClassifyFileHMM = tasks.add_parser("segmentClassifyFileHMM", help="Segmentation - classification of a WAV file given a trained HMM")
segmentClassifyFileHMM.add_argument("-i", "--input", required=True, help="Input audio file")
segmentClassifyFileHMM.add_argument("--hmm", required=True, help="HMM Model to use (path)")
segmentationEvaluation = tasks.add_parser("segmentationEvaluation", help=
"Segmentation - classification evaluation for a list of WAV files and CSV ground-truth stored in a folder")
segmentationEvaluation.add_argument("-i", "--input", required=True, help="Input audio folder")
segmentationEvaluation.add_argument("--model", choices=["svm", "knn", "hmm"], required=True, help="Model type")
segmentationEvaluation.add_argument("--modelName", required=True, help="Model path")
regFile = tasks.add_parser("regressionFile")
regFile.add_argument("-i", "--input", required=True, help="Input audio file")
regFile.add_argument("--model", choices=["svm", "knn"], required=True, help="Regression type")
regFile.add_argument("--regression", required=True, help="Regression model to use")
classFolder = tasks.add_parser("classifyFolder")
classFolder.add_argument("-i", "--input", required=True, help="Input folder")
classFolder.add_argument("--model", choices=["svm", "knn", "randomforest","gradientboosting","extratrees"], required=True, help="Classifier type")
classFolder.add_argument("--classifier", required=True, help="Classifier to use (filename)")
classFolder.add_argument("--details", action="store_true", help="Plot details (otherwise only counts per class are shown)")
regFolder = tasks.add_parser("regressionFolder")
regFolder.add_argument("-i", "--input", required=True, help="Input folder")
regFolder.add_argument("--model", choices=["svm", "knn"], required=True, help="Classifier type")
regFolder.add_argument("--regression", required=True, help="Regression model to use")
silrem = tasks.add_parser("silenceRemoval", help="Remove silence segments from a recording")
silrem.add_argument("-i", "--input", required=True, help="input audio file")
silrem.add_argument("-s", "--smoothing", type=float, default=1.0, help="smoothing window size in seconds.")
silrem.add_argument("-w", "--weight", type=float, default=0.5, help="weight factor in (0, 1)")
spkrDir = tasks.add_parser("speakerDiarization")
spkrDir.add_argument("-i", "--input", required=True, help="Input audio file")
spkrDir.add_argument("-n", "--num", type=int, required=True, help="Number of speakers")
spkrDir.add_argument("--flsd", action="store_true", help="Enable FLsD method")
speakerDiarizationScriptEval = tasks.add_parser("speakerDiarizationScriptEval", help="Train an SVM or KNN classifier")
speakerDiarizationScriptEval.add_argument("-i", "--input", required=True, help="Input directory")
speakerDiarizationScriptEval.add_argument("--LDAs", type=int, nargs="+", required=True, help="List FLsD params")
thumb = tasks.add_parser("thumbnail", help="Generate a thumbnailWrapper for an audio file")
thumb.add_argument("-i", "--input", required=True, help="input audio file")
thumb.add_argument("-s", "--size", default=10.0, type=float, help="thumbnailWrapper size in seconds.")
return parser.parse_args()
def trainHMMsegmenter_fromdir(directory, hmmModelName, mtWin, mtStep):
if not os.path.isdir(directory):
raise Exception("Input folder not found!")
aS.trainHMM_fromDir(directory, hmmModelName, mtWin, mtStep)
def segmentclassifyFileWrapperHMM(wavFile, hmmModelName):
gtFile = wavFile.replace(".wav", ".segments")
aS.hmmSegmentation(wavFile, hmmModelName, PLOT=True, gtFileName=gtFile)
if __name__ == "__main__":
args = parse_arguments()
if args.task == "dirMp3toWav": # Convert mp3 to wav (batch - folder)
dirMp3toWavWrapper(args.input, args.rate, args.channels)
elif args.task == "dirWavResample": # Convert Fs for a list of wavs stored in a folder
dirWAVChangeFs(args.input, args.rate, args.channels)
elif args.task == "featureExtractionFile": # Feature extraction for WAV file
featureExtractionFileWrapper(args.input, args.output, args.mtwin, args.mtstep, args.stwin, args.ststep)
elif args.task == "featureExtractionDir": # Feature extraction for all WAV files stored in a folder
featureExtractionDirWrapper(args.input, args.mtwin, args.mtstep, args.stwin, args.ststep)
elif args.task == "fileSpectrogram": # Extract spectrogram from a WAV file
fileSpectrogramWrapper(args.input)
elif args.task == "fileChromagram": # Extract chromagram from a WAV file
fileChromagramWrapper(args.input)
elif args.task == "featureVisualization": # Visualize the content of a list of WAV files stored in a folder
featureVisualizationDirWrapper(args.input)
elif args.task == "beatExtraction": # Extract bpm from file
beatExtractionWrapper(args.input, args.plot)
elif args.task == "trainClassifier": # Train classifier from data (organized in folders)
trainClassifierWrapper(args.method, args.beat, args.input, args.output)
elif args.task == "trainRegression": # Train a regression model from data (organized in a single folder, while ground-truth is provided in a CSV)
trainRegressionWrapper(args.method, args.beat, args.input, args.output)
elif args.task == "classifyFile": # Apply audio classifier on audio file
classifyFileWrapper(args.input, args.model, args.classifier)
elif args.task == "trainHMMsegmenter_fromfile": # Train an hmm segmenter-classifier from WAV file + annotation
trainHMMsegmenter_fromfile(args.input, args.ground, args.output, args.mtwin, args.mtstep)
elif args.task == "trainHMMsegmenter_fromdir": # Train an hmm segmenter-classifier from a list of WAVs and annotations stored in a folder
trainHMMsegmenter_fromdir(args.input, args.output, args.mtwin, args.mtstep)
elif args.task == "segmentClassifyFile": # Apply a classifier (svm or knn or randomforest or gradientboosting or extratrees) for segmentation-classificaiton to a WAV file
segmentclassifyFileWrapper(args.input, args.modelName, args.model)
elif args.task == "segmentClassifyFileHMM": # Apply an hmm for segmentation-classificaiton to a WAV file
segmentclassifyFileWrapperHMM(args.input, args.hmm)
elif args.task == "segmentationEvaluation": # Evaluate segmentation-classification for a list of WAV files (and ground truth CSVs) stored in a folder
segmentationEvaluation(args.input, args.modelName, args.model)
elif args.task == "regressionFile": # Apply a regression model to an audio signal stored in a WAV file
regressionFileWrapper(args.input, args.model, args.regression)
elif args.task == "classifyFolder": # Classify every WAV file in a given path
classifyFolderWrapper(args.input, args.model, args.classifier, args.details)
elif args.task == "regressionFolder": # Apply a regression model on every WAV file in a given path
regressionFolderWrapper(args.input, args.model, args.regression)
elif args.task == "silenceRemoval": # Detect non-silent segments in a WAV file and output to seperate WAV files
silenceRemovalWrapper(args.input, args.smoothing, args.weight)
elif args.task == "speakerDiarization": # Perform speaker diarization on a WAV file
speakerDiarizationWrapper(args.input, args.num, args.flsd)
elif args.task == "speakerDiarizationScriptEval": # Evaluate speaker diarization given a folder that contains WAV files and .segment (Groundtruth files)
aS.speakerDiarizationEvaluateScript(args.input, args.LDAs)
elif args.task == "thumbnail": # Audio thumbnailing
thumbnailWrapper(args.input, args.size)
| apache-2.0 |
xyguo/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 28 | 3652 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37},
{"foo": "baz", "gaga": u"string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_feature_hasher_pairs_with_string_values():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": "a"},
{"baz": u"abc", "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 1], x1_nz)
assert_equal([1, 1, 4], x2_nz)
raw_X = (iter(d.items()) for d in [{"bax": "abc"},
{"bax": "abc"}])
x1, x2 = h.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert_equal([1], x1_nz)
assert_equal([1], x2_nz)
assert_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
thientu/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
stoewer/nix-demo | plotting.py | 1 | 4645 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import nix
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
class Plotter(object):
def __init__(self, try_ggplot_style=True):
self._post_plot_hook = None
if try_ggplot_style:
self._setup_ggplot()
self.fig = plt.figure(facecolor='white')
plt.hold('on')
self._n_plots = 0
self._x_range = None
def _setup_ggplot(self):
try:
from ggplot import theme_bw
t = theme_bw()
for k, v in t.get_rcParams().iteritems():
mpl.rcParams[k] = v
def plot_post_hook():
for ax in self.figure.axes:
t.post_plot_callback(ax)
self._post_plot_hook = plot_post_hook
except ImportError:
pass
except AttributeError:
pass
@property
def figure(self):
return self.fig
@property
def xrange(self):
return self._x_range
@xrange.setter
def xrange(self, value):
self._x_range = value
def add_plot(self, array):
shape = array.data.shape
nd = len(shape)
if nd == 1:
self._plot_array_1d(array, shape)
elif nd == 2:
self._plot_array_2d(array, shape)
self._n_plots += 1
if self._post_plot_hook is not None:
self._post_plot_hook()
def _plot_array_1d(self, array, shape):
dim = array.dimensions[0]
if dim.dimension_type == nix.DimensionType.Set:
x = array.data[self.xrange or Ellipsis]
y = np.ones_like(x)
#TODO: the color logic below is stupid
plt.scatter(x, y, 10, 'k' if self._n_plots == 0 else 'r', linewidths=0)
plt.xlabel('%s [%s]' % (array.label, array.unit))
plt.ylabel(array.name)
plt.title('%s [%s]' % (array.name, array.type))
plt.yticks([])
elif dim.dimension_type == nix.DimensionType.Sample:
y = array.data[:]
x_start = dim.offset or 0
x = np.arange(0, shape[0]) * dim.sampling_interval + x_start
plt.plot(x, y, 'k')
plt.xlabel('%s [%s]' % (dim.label, dim.unit))
plt.ylabel('%s [%s]' % (array.label, array.unit))
plt.title('%s [%s]' % (array.name, array.type))
plt.xlim([np.min(x), np.max(x)])
else:
raise Exception('Unsupported data')
def _plot_array_2d(self, array, shape):
d1 = array.dimensions[0]
d2 = array.dimensions[1]
d1_type = d1.dimension_type
d2_type = d2.dimension_type
if d1_type == nix.DimensionType.Sample and d2_type == nix.DimensionType.Sample:
z = array.data[:]
x_start = d1.offset or 0
y_start = d2.offset or 0
x_end = x_start + shape[0] * d1.sampling_interval
y_end = y_start + shape[1] * d2.sampling_interval
plt.imshow(z, origin='lower', extent=[x_start, x_end, y_start, y_end])
plt.xlabel('%s [%s]' % (d1.label, d1.unit))
plt.ylabel('%s [%s]' % (d2.label, d2.unit))
plt.title('%s [%s]' % (array.name, array.type))
bar = plt.colorbar()
bar.set_label('%s [%s]' % (array.label, array.unit))
elif d1_type == nix.DimensionType.Set and d2_type == nix.DimensionType.Sample:
x_start = d2.offset or 0
x_one = x_start + np.arange(0, shape[1]) * d2.sampling_interval
x = np.tile(x_one.reshape(shape[1], 1), shape[0])
y = array.data[:]
plt.plot(x, y.T)
plt.title('%s [%s]' % (array.name, array.type))
plt.xlabel('%s [%s]' % (d2.label, d2.unit))
plt.ylabel('%s [%s]' % (array.label, array.unit))
if d1.labels is not None:
plt.legend(d1.labels)
else:
raise Exception('Unsupported data')
def save(self, filename, width=None, height=None, units='cm', **kwargs):
# units conversion taken from ggsave
if units not in ["in", "cm", "mm"]:
raise Exception("units not 'in', 'cm', or 'mm'")
to_inch = {"in": lambda x: x, "cm": lambda x: x / 2.54, "mm": lambda x: x * 2.54 * 10}
w_old, h_old = self.figure.get_size_inches()
w = to_inch[units](width) if width is not None else w_old
h = to_inch[units](height) if height is not None else h_old
self.figure.set_size_inches(w, h)
self.figure.savefig(filename, **kwargs)
| bsd-3-clause |
BiaDarkia/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 78 | 6016 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
from sklearn.ensemble.gradient_boosting import QuantileLossFunction
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float64)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float64)
y.fill(0.0)
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float64)
y.fill(1.0)
sw = np.ones(102, dtype=np.float64)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_quantile_loss_function():
# Non regression test for the QuantileLossFunction object
# There was a sign problem when evaluating the function
# for negative values of 'ytrue - ypred'
x = np.asarray([-1.0, 0.0, 1.0])
y_found = QuantileLossFunction(1, 0.9)(x, np.zeros_like(x))
y_expected = np.asarray([0.1, 0.0, 0.9]).mean()
np.testing.assert_allclose(y_found, y_expected)
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/matplotlib/backends/backend_gtk3cairo.py | 1 | 1888 | from . import backend_gtk3
from . import backend_cairo
from matplotlib.figure import Figure
class RendererGTK3Cairo(backend_cairo.RendererCairo):
def set_context(self, ctx):
self.gc.ctx = ctx
class FigureCanvasGTK3Cairo(backend_gtk3.FigureCanvasGTK3,
backend_cairo.FigureCanvasCairo):
def __init__(self, figure):
backend_gtk3.FigureCanvasGTK3.__init__(self, figure)
def _renderer_init(self):
"""use cairo renderer"""
self._renderer = RendererGTK3Cairo(self.figure.dpi)
def _render_figure(self, width, height):
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def on_draw_event(self, widget, ctx):
""" GtkDrawable draw event, like expose_event in GTK 2.X
"""
# the _need_redraw flag doesnt work. it sometimes prevents
# the rendering and leaving the canvas blank
#if self._need_redraw:
self._renderer.set_context(ctx)
allocation = self.get_allocation()
x, y, w, h = allocation.x, allocation.y, allocation.width, allocation.height
self._render_figure(w, h)
#self._need_redraw = False
return False # finish event propagation?
class FigureManagerGTK3Cairo(backend_gtk3.FigureManagerGTK3):
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK3Cairo(figure)
manager = FigureManagerGTK3Cairo(canvas, num)
return manager
FigureManager = FigureManagerGTK3Cairo
show = backend_gtk3.show
| gpl-3.0 |
Mogeng/IO-HMM | tests/test_SemiSupervisedIOHMM.py | 2 | 9401 | from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
import json
import unittest
import numpy as np
import pandas as pd
from IOHMM import SemiSupervisedIOHMM
from IOHMM import OLS, CrossEntropyMNL
class SemiSupervisedIOHMMTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data_speed = pd.read_csv('examples/data/speed.csv')
cls.states = cls._mock_states()
@classmethod
def _mock_states(cls):
states = {}
corr = np.array(cls.data_speed['corr'])
for i in range(int(old_div(len(corr), 2))):
if corr[i] == 'cor':
states[i] = np.array([0, 1, 0, 0])
cls.data_speed.at[i, 'rt'] = 1
else:
states[i] = np.array([1, 0, 0, 0])
cls.data_speed.at[i, 'rt'] = 0
return states
def setUp(self):
np.random.seed(0)
def test_train_no_covariates(self):
np.random.seed(0)
self.model = SemiSupervisedIOHMM(num_states=4, max_EM_iter=100, EM_tol=1e-10)
self.model.set_models(
model_initial=CrossEntropyMNL(solver='newton-cg', reg_method='l2'),
model_transition=CrossEntropyMNL(solver='newton-cg', reg_method='l2'),
model_emissions=[OLS()])
self.model.set_inputs(covariates_initial=[], covariates_transition=[],
covariates_emissions=[[]])
self.model.set_outputs([['rt']])
self.model.set_data([[self.data_speed, self.states]])
self.model.train()
# emission coefficients
np.testing.assert_array_almost_equal(
self.model.model_emissions[0][0].coef,
np.array([[0]]), decimal=1)
np.testing.assert_array_almost_equal(
self.model.model_emissions[1][0].coef,
np.array([[1]]), decimal=1)
np.testing.assert_array_almost_equal(
self.model.model_emissions[2][0].coef,
np.array([[6.4]]), decimal=1)
np.testing.assert_array_almost_equal(
self.model.model_emissions[3][0].coef,
np.array([[5.5]]), decimal=1)
# emission dispersion
np.testing.assert_array_almost_equal(
self.model.model_emissions[0][0].dispersion, np.array([[0]]), decimal=2)
np.testing.assert_array_almost_equal(
self.model.model_emissions[1][0].dispersion, np.array([[0]]), decimal=2)
np.testing.assert_array_almost_equal(
self.model.model_emissions[2][0].dispersion, np.array([[0.051]]), decimal=2)
np.testing.assert_array_almost_equal(
self.model.model_emissions[3][0].dispersion, np.array([[0.032]]), decimal=2)
# transition
np.testing.assert_array_almost_equal(
np.exp(self.model.model_transition[0].predict_log_proba(np.array([[]]))),
np.array([[0.4, 0.6, 0, 0]]), decimal=1)
np.testing.assert_array_almost_equal(
np.exp(self.model.model_transition[1].predict_log_proba(np.array([[]]))),
np.array([[0.19, 0.81, 0, 0]]), decimal=2)
np.testing.assert_array_almost_equal(
np.exp(self.model.model_transition[2].predict_log_proba(np.array([[]]))),
np.array([[0, 0, 0.93, 0.07]]), decimal=2)
np.testing.assert_array_almost_equal(
np.exp(self.model.model_transition[3].predict_log_proba(np.array([[]]))),
np.array([[0, 0, 0.11, 0.89]]), decimal=2)
# to_json
json_dict = self.model.to_json('tests/IOHMM_models/SemiSupervisedIOHMM/')
self.assertEqual(json_dict['data_type'], 'SemiSupervisedIOHMM')
self.assertSetEqual(
set(json_dict['properties'].keys()),
set(['num_states', 'EM_tol', 'max_EM_iter',
'covariates_initial', 'covariates_transition',
'covariates_emissions', 'responses_emissions',
'model_initial', 'model_transition', 'model_emissions']))
with open('tests/IOHMM_models/SemiSupervisedIOHMM/model.json', 'w') as outfile:
json.dump(json_dict, outfile, indent=4, sort_keys=True)
def test_from_json(self):
with open('tests/IOHMM_models/SemiSupervisedIOHMM/model.json') as json_data:
json_dict = json.load(json_data)
self.model = SemiSupervisedIOHMM.from_json(json_dict)
self.assertEqual(type(self.model), SemiSupervisedIOHMM)
# emission coefficients
np.testing.assert_array_almost_equal(
self.model.model_emissions[0][0].coef,
np.array([[0]]), decimal=1)
np.testing.assert_array_almost_equal(
self.model.model_emissions[1][0].coef,
np.array([[1]]), decimal=1)
np.testing.assert_array_almost_equal(
self.model.model_emissions[2][0].coef,
np.array([[6.4]]), decimal=1)
np.testing.assert_array_almost_equal(
self.model.model_emissions[3][0].coef,
np.array([[5.5]]), decimal=1)
# emission dispersion
np.testing.assert_array_almost_equal(
self.model.model_emissions[0][0].dispersion, np.array([[0]]), decimal=2)
np.testing.assert_array_almost_equal(
self.model.model_emissions[1][0].dispersion, np.array([[0]]), decimal=2)
np.testing.assert_array_almost_equal(
self.model.model_emissions[2][0].dispersion, np.array([[0.051]]), decimal=2)
np.testing.assert_array_almost_equal(
self.model.model_emissions[3][0].dispersion, np.array([[0.032]]), decimal=2)
# transition
np.testing.assert_array_almost_equal(
np.exp(self.model.model_transition[0].predict_log_proba(np.array([[]]))),
np.array([[0.4, 0.6, 0, 0]]), decimal=1)
np.testing.assert_array_almost_equal(
np.exp(self.model.model_transition[1].predict_log_proba(np.array([[]]))),
np.array([[0.19, 0.81, 0, 0]]), decimal=2)
np.testing.assert_array_almost_equal(
np.exp(self.model.model_transition[2].predict_log_proba(np.array([[]]))),
np.array([[0, 0, 0.93, 0.07]]), decimal=2)
np.testing.assert_array_almost_equal(
np.exp(self.model.model_transition[3].predict_log_proba(np.array([[]]))),
np.array([[0, 0, 0.11, 0.89]]), decimal=2)
def test_from_config(self):
with open('tests/IOHMM_models/SemiSupervisedIOHMM/model.json') as json_data:
json_dict = json.load(json_data)
json_dict['properties'].update({
'model_initial': {
'data_type': 'CrossEntropyMNL',
'properties': {
'reg_method': 'l2',
'solver': 'newton-cg'
}
},
'model_transition': {
'data_type': 'CrossEntropyMNL',
'properties': {
'reg_method': 'l2',
'solver': 'newton-cg'
}
},
'model_emissions': [
{
'data_type': 'OLS',
'properties': {}
},
]})
print(json_dict['properties']['model_initial'])
self.model = SemiSupervisedIOHMM.from_config(json_dict)
self.assertEqual(type(self.model), SemiSupervisedIOHMM)
self.model.set_data([[self.data_speed, self.states]])
self.model.train()
# emission coefficients
np.testing.assert_array_almost_equal(
self.model.model_emissions[0][0].coef,
np.array([[0]]), decimal=1)
np.testing.assert_array_almost_equal(
self.model.model_emissions[1][0].coef,
np.array([[1]]), decimal=1)
np.testing.assert_array_almost_equal(
self.model.model_emissions[2][0].coef,
np.array([[6.4]]), decimal=1)
np.testing.assert_array_almost_equal(
self.model.model_emissions[3][0].coef,
np.array([[5.5]]), decimal=1)
# emission dispersion
np.testing.assert_array_almost_equal(
self.model.model_emissions[0][0].dispersion, np.array([[0]]), decimal=2)
np.testing.assert_array_almost_equal(
self.model.model_emissions[1][0].dispersion, np.array([[0]]), decimal=2)
np.testing.assert_array_almost_equal(
self.model.model_emissions[2][0].dispersion, np.array([[0.051]]), decimal=2)
np.testing.assert_array_almost_equal(
self.model.model_emissions[3][0].dispersion, np.array([[0.032]]), decimal=2)
# transition
np.testing.assert_array_almost_equal(
np.exp(self.model.model_transition[0].predict_log_proba(np.array([[]]))),
np.array([[0.4, 0.6, 0, 0]]), decimal=1)
np.testing.assert_array_almost_equal(
np.exp(self.model.model_transition[1].predict_log_proba(np.array([[]]))),
np.array([[0.19, 0.81, 0, 0]]), decimal=2)
np.testing.assert_array_almost_equal(
np.exp(self.model.model_transition[2].predict_log_proba(np.array([[]]))),
np.array([[0, 0, 0.93, 0.07]]), decimal=2)
np.testing.assert_array_almost_equal(
np.exp(self.model.model_transition[3].predict_log_proba(np.array([[]]))),
np.array([[0, 0, 0.11, 0.89]]), decimal=2)
| mit |
sanketloke/scikit-learn | sklearn/datasets/__init__.py | 72 | 3807 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_breast_cancer
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
jseabold/statsmodels | statsmodels/tsa/descriptivestats.py | 5 | 2303 | # -*- coding: utf-8 -*-
"""Descriptive Statistics for Time Series
Created on Sat Oct 30 14:24:08 2010
Author: josef-pktd
License: BSD(3clause)
"""
import numpy as np
from . import stattools as stt
#todo: check subclassing for descriptive stats classes
class TsaDescriptive(object):
'''collection of descriptive statistical methods for time series
'''
def __init__(self, data, label=None, name=''):
self.data = data
self.label = label
self.name = name
def filter(self, num, den):
from scipy.signal import lfilter
xfiltered = lfilter(num, den, self.data)
return self.__class__(xfiltered, self.label, self.name + '_filtered')
def detrend(self, order=1):
from . import tsatools
xdetrended = tsatools.detrend(self.data, order=order)
return self.__class__(xdetrended, self.label, self.name + '_detrended')
def fit(self, order=(1,0,1), **kwds):
from .arima_model import ARMA
self.mod = ARMA(self.data)
self.res = self.mod.fit(order=order, **kwds)
#self.estimated_process =
return self.res
def acf(self, nlags=40):
return stt.acf(self.data, nlags=nlags)
def pacf(self, nlags=40):
return stt.pacf(self.data, nlags=nlags)
def periodogram(self):
#does not return frequesncies
return stt.periodogram(self.data)
# copied from fftarma.py
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
data = self.data
acf = self.acf(nacf)
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq, endpoint=False)
spdr = self.periodogram()[:nfreq] #(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
namestr = ' for %s' % self.name if self.name else ''
ax.plot(data)
ax.set_title('Time series' + namestr)
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title('Autocorrelation' + namestr)
ax = fig.add_subplot(2,2,3)
ax.plot(spdr) # (wr, spdr)
ax.set_title('Power Spectrum' + namestr)
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title('Partial Autocorrelation' + namestr)
return fig
| bsd-3-clause |
glennhickey/teHmm | tests/cfgTest.py | 1 | 10281 | #!/usr/bin/env python
#Copyright (C) 2013 by Glenn Hickey
#
# Contains some fragments of code from sklearn/tests/test_hmm.py
# (2010 - 2013, scikit-learn developers (BSD License))
#
#Released under the MIT license, see LICENSE.txt
import unittest
import sys
import os
import math
from numpy.testing import assert_array_equal, assert_array_almost_equal
from teHmm.track import *
from teHmm.trackIO import readBedIntervals
from teHmm.hmm import MultitrackHmm
from teHmm.cfg import MultitrackCfg
from teHmm.emission import IndependentMultinomialEmissionModel, PairEmissionModel
from teHmm.tests.common import getTestDirPath
from teHmm.tests.common import TestBase
from teHmm.tests.bedTrackTest import getTracksInfoPath
from teHmm.tests.emissionTest import getBedStates
class TestCase(TestBase):
def setUp(self):
super(TestCase, self).setUp()
def tearDown(self):
super(TestCase, self).tearDown()
def testInit(self):
emissionModel = IndependentMultinomialEmissionModel(
10, [3], zeroAsMissingData=False)
pairModel = PairEmissionModel(emissionModel, [1.0] *
emissionModel.getNumStates())
cfg = MultitrackCfg(emissionModel, pairModel)
cfg = MultitrackCfg(emissionModel, [3,8])
cfg.validate()
def testDefaultVsHmm(self):
emissionModel = IndependentMultinomialEmissionModel(
10, [3], zeroAsMissingData=False)
hmm = MultitrackHmm(emissionModel)
pairModel = PairEmissionModel(emissionModel, [1.0] *
emissionModel.getNumStates())
cfg = MultitrackCfg(emissionModel, pairModel)
def testDefaultHmmViterbi(self):
emissionModel = IndependentMultinomialEmissionModel(
5, [3], zeroAsMissingData=False)
hmm = MultitrackHmm(emissionModel)
pairModel = PairEmissionModel(emissionModel, [1.0] *
emissionModel.getNumStates())
cfg = MultitrackCfg(emissionModel, pairModel)
obs = np.array([[0],[0],[1],[2]], dtype=np.uint8)
hmmProb, hmmStates = hmm.decode(obs)
cfgProb, cfgStates = cfg.decode(obs)
assert_array_almost_equal(hmmProb, cfgProb)
def testTraceBack(self):
# a model with 2 states. state 0 has a .75 chance of emitting 0
# state 1 has a 0.95 chance of emitting 1
emissionModel = IndependentMultinomialEmissionModel(
2, [2], zeroAsMissingData=False)
emProbs = np.zeros((1, 2, 2), dtype=np.float)
emProbs[0,0] = [0.75, 0.25]
emProbs[0,1] = [0.05, 0.95]
emissionModel.logProbs = np.log(emProbs)
hmm = MultitrackHmm(emissionModel)
pairModel = PairEmissionModel(emissionModel, [1.0] *
emissionModel.getNumStates())
cfg = MultitrackCfg(emissionModel, pairModel)
obs = np.array([[0],[0],[1],[0]], dtype=np.uint8)
hmmProb, hmmStates = hmm.decode(obs)
cfgProb, cfgStates = cfg.decode(obs)
assert_array_almost_equal(hmmProb, cfgProb)
assert_array_almost_equal(hmmStates, [0, 0, 1, 0])
assert_array_almost_equal(hmmStates, cfgStates)
def testBasicNesting(self):
# a model with 3 states. state 0 has a .75 chance of emitting 0
# state 1 has a 0.95 chance of emitting 1
# state 2 has a 0.90 chance of emitting 1
emissionModel = IndependentMultinomialEmissionModel(
3, [2], zeroAsMissingData=False)
emProbs = np.zeros((1, 3, 2), dtype=np.float)
emProbs[0,0] = [0.75, 0.25]
emProbs[0,1] = [0.05, 0.95]
emProbs[0,2] = [0.01, 0.90]
emissionModel.logProbs = np.log(emProbs)
# state 1 is a nested pair state!
pairModel = PairEmissionModel(emissionModel, [1.0] *
emissionModel.getNumStates())
cfg = MultitrackCfg(emissionModel, pairModel, nestStates = [1])
obs = np.array([[0],[0],[1],[0]], dtype=np.uint8)
cfgProb, cfgStates = cfg.decode(obs)
# 1 is a pair only state. no way it should be here
assert 1 not in cfgStates
assert_array_equal(cfgStates, [0,0,2,0])
obs = np.array([[1],[0],[0],[1]], dtype=np.uint8)
cfgProb, cfgStates = cfg.decode(obs)
assert_array_equal(cfgStates, [2,0,0,2])
alignment = np.array([[1],[0],[0],[1]], dtype=np.uint16)
cfgProb, cfgStates = cfg.decode(obs, alignmentTrack = alignment,
defAlignmentSymbol=0)
assert_array_equal(cfgStates, [1,0,0,1])
alignment = np.array([[1],[0],[0],[2]], dtype=np.uint16)
cfgProb, cfgStates = cfg.decode(obs, alignmentTrack = alignment,
defAlignmentSymbol=0)
assert_array_equal(cfgStates, [2,0,0,2])
def testHmmSupervisedLearn(self):
""" Pretty much copied from the HMM unit test. We try to recapitualte
all results with a CFG with no nest states, which should be same as
HMM"""
intervals = readBedIntervals(getTestDirPath("truth.bed"), ncol=4)
truthIntervals = []
for i in intervals:
truthIntervals.append((i[0], i[1], i[2], int(i[3])))
allIntervals = [(truthIntervals[0][0],
truthIntervals[0][1],
truthIntervals[-1][2])]
trackData = TrackData()
trackData.loadTrackData(getTracksInfoPath(3), allIntervals)
assert len(trackData.getTrackTableList()) == 1
# set the fudge to 1 since when the test was written this was
# hardcoded default
em = IndependentMultinomialEmissionModel(
4, trackData.getNumSymbolsPerTrack(),
fudge = 1.0)
hmm = MultitrackHmm(em)
hmm.supervisedTrain(trackData, truthIntervals)
hmm.validate()
pairModel = PairEmissionModel(em, [1.0] *
em.getNumStates())
# Test validates with neststate just for fun
cfg = MultitrackCfg(em, pairModel, nestStates = [1])
cfg.supervisedTrain(trackData, truthIntervals)
cfg.validate()
# Then reload as an hmm-equivalent
cfg = MultitrackCfg(em, pairModel, nestStates = [])
cfg.supervisedTrain(trackData, truthIntervals)
cfg.validate()
# check emissions, they should basically be binary.
trackList = cfg.getTrackList()
emp = np.exp(em.getLogProbs())
ltrTrack = trackList.getTrackByName("ltr")
track = ltrTrack.getNumber()
cmap = ltrTrack.getValueMap()
s0 = cmap.getMap(None)
s1 = cmap.getMap(0)
# we add 1 to all frequencies like emission trainer
assert_array_almost_equal(emp[track][0][s0], 36. / 37.)
assert_array_almost_equal(emp[track][0][s1], 1 - 36. / 37.)
assert_array_almost_equal(emp[track][1][s0], 1 - 6. / 7.)
assert_array_almost_equal(emp[track][1][s1], 6. / 7.)
assert_array_almost_equal(emp[track][2][s0], 26. / 27.)
assert_array_almost_equal(emp[track][2][s1], 1. - 26. / 27.)
assert_array_almost_equal(emp[track][3][s0], 1. - 6. / 7.)
assert_array_almost_equal(emp[track][3][s1], 6. / 7.)
insideTrack = trackList.getTrackByName("inside")
track = insideTrack.getNumber()
cmap = insideTrack.getValueMap()
s0 = cmap.getMap(None)
s1 = cmap.getMap("Inside")
assert_array_almost_equal(emp[track][0][s0], 36. / 37.)
assert_array_almost_equal(emp[track][0][s1], 1 - 36. / 37.)
assert_array_almost_equal(emp[track][1][s0], 6. / 7.)
assert_array_almost_equal(emp[track][1][s1], 1 - 6. / 7.)
assert_array_almost_equal(emp[track][2][s0], 1. - 26. / 27.)
assert_array_almost_equal(emp[track][2][s1], 26. / 27.)
assert_array_almost_equal(emp[track][3][s0], 6. / 7.)
assert_array_almost_equal(emp[track][3][s1], 1. - 6. / 7.)
# crappy check for start probs. need to test transition too!
freq = [0.0] * em.getNumStates()
total = 0.0
for interval in truthIntervals:
state = interval[3]
freq[state] += float(interval[2]) - float(interval[1])
total += float(interval[2]) - float(interval[1])
sprobs = cfg.getStartProbs()
assert len(sprobs) == em.getNumStates()
for state in xrange(em.getNumStates()):
assert_array_almost_equal(freq[state] / total, sprobs[state])
# transition probabilites
# from eyeball:
#c 0 5 0 0->0 +4 0->1 +1 0-> +5
#c 5 10 1 1->1 +4 1->2 +1 1-> +5
#c 10 35 2 2->2 +24 2->3 +1 2-> +25
#c 35 40 3 3->3 +4 3->0 +1 3-> +5
#c 40 70 0 0->0 +29 0-> +19
realTransProbs = np.array([
[33. / 34., 1. / 34., 0., 0.],
[0., 4. / 5., 1. / 5., 0.],
[0., 0., 24. / 25., 1. / 25.],
[1. / 5., 0., 0., 4. / 5.]
])
tprobs = np.exp(cfg.getLogProbTables()[0])
assert tprobs.shape == (em.getNumStates(), em.getNumStates(),
em.getNumStates())
for i in xrange(em.getNumStates()):
for j in xrange(em.getNumStates()):
fbTot = tprobs[i, i, j]
if i != j:
fbTot += tprobs[i, j, i]
assert_array_almost_equal(fbTot, realTransProbs[i,j])
prob, states = cfg.decode(trackData.getTrackTableList()[0])
for truthInt in truthIntervals:
for i in xrange(truthInt[1], truthInt[2]):
# gah, just realized that ltr track is binary, which means
# ltr states can be either 1 or 3. need to fix test properly
# but just relax comparison for now.
if truthInt[3] == 1 or truthInt[3] == 3:
assert states[i] == 1 or states[i] == 3
else:
assert states[i] == truthInt[3]
def main():
sys.argv = sys.argv[:1]
unittest.main()
if __name__ == '__main__':
main()
| mit |
bthirion/nipy | examples/labs/bayesian_structural_analysis.py | 2 | 3984 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
This script generates a noisy multi-subject activation image dataset
and applies the Bayesian structural analysis on it
Requires matplotlib
Author : Bertrand Thirion, 2009-2013
"""
print(__doc__)
import numpy as np
import scipy.stats as st
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
import nipy.labs.utils.simul_multisubject_fmri_dataset as simul
from nipy.labs.spatial_models.bayesian_structural_analysis import\
compute_landmarks
from nipy.labs.spatial_models.discrete_domain import grid_domain_from_shape
def display_landmarks_2d(landmarks, hrois, stats):
""" Plots the landmarks and associated rois as images"""
shape = stats[0].shape
n_subjects = len(stats)
lmax = 0
grp_map, density = np.zeros(shape), np.zeros(shape)
if landmarks != None:
domain = landmarks.domain
grp_map = landmarks.map_label(domain.coord, .8, sigma).reshape(shape)
density = landmarks.kernel_density(k=None, coord=domain.coord,
sigma=sigma).reshape(shape)
lmax = landmarks.k + 2
# Figure 1: input data
fig_input = plt.figure(figsize=(8, 3.5))
fig_input.text(.5,.9, "Input activation maps", ha='center')
vmin, vmax = stats.min(), stats.max()
for subject in range(n_subjects):
plt.subplot(n_subjects / 5, 5, subject + 1)
plt.imshow(stats[subject], interpolation='nearest',
vmin=vmin, vmax=vmax)
plt.axis('off')
# Figure 2: individual hrois
fig_output = plt.figure(figsize=(8, 3.5))
fig_output.text(.5, .9, "Individual landmark regions", ha="center")
for subject in range(n_subjects):
plt.subplot(n_subjects / 5, 5, subject + 1)
lw = - np.ones(shape)
if hrois[subject].k > 0:
nls = hrois[subject].get_roi_feature('label')
nls[nls == - 1] = np.size(landmarks) + 2
for k in range(hrois[subject].k):
np.ravel(lw)[hrois[subject].label == k] = nls[k]
plt.imshow(lw, interpolation='nearest', vmin=-1, vmax=lmax)
plt.axis('off')
# Figure 3: Group-level results
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plt.imshow(grp_map, interpolation='nearest', vmin=-1, vmax=lmax)
plt.title('group-level position 80% \n confidence regions', fontsize=10)
plt.axis('off')
plt.colorbar(shrink=.8)
plt.subplot(1, 2, 2)
plt.imshow(density, interpolation='nearest')
plt.title('Spatial density under h1', fontsize=10)
plt.axis('off')
plt.colorbar(shrink=.8)
###############################################################################
# Main script
###############################################################################
# generate the data
n_subjects = 10
shape = (60, 60)
pos = np.array([[12, 14],
[20, 20],
[30, 20]])
ampli = np.array([5, 7, 6])
sjitter = 1.0
stats = simul.surrogate_2d_dataset(n_subj=n_subjects, shape=shape, pos=pos,
ampli=ampli, width=5.0)
# set various parameters
threshold = float(st.t.isf(0.01, 100))
sigma = 4. / 1.5
prevalence_threshold = n_subjects * .25
prevalence_pval = 0.9
smin = 5
algorithm = 'co-occurrence' # 'density'
domain = grid_domain_from_shape(shape)
# get the functional information
stats_ = np.array([np.ravel(stats[k]) for k in range(n_subjects)]).T
# run the algo
landmarks, hrois = compute_landmarks(
domain, stats_, sigma, prevalence_pval, prevalence_threshold,
threshold, smin, method='prior', algorithm=algorithm)
display_landmarks_2d(landmarks, hrois, stats)
if landmarks is not None:
landmarks.show()
plt.show()
| bsd-3-clause |
d00d/quantNotebooks | Notebooks/quantopian_research_public/template_algorithms/pairs_trading_template.py | 5 | 6010 | import numpy as np
import statsmodels.api as sm
import pandas as pd
import quantopian.experimental.optimize as opt
import quantopian.algorithm as algo
def initialize(context):
# Quantopian backtester specific variables
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerTrade(cost=1))
set_symbol_lookup_date('2014-01-01')
context.stock_pairs = [(symbol('ABGB'), symbol('FSLR')),
(symbol('CSUN'), symbol('ASTI'))]
context.stocks = symbols('ABGB', 'FSLR', 'CSUN', 'ASTI')
context.num_pairs = len(context.stock_pairs)
# strategy specific variables
context.lookback = 20 # used for regression
context.z_window = 20 # used for zscore calculation, must be <= lookback
context.target_weights = pd.Series(index=context.stocks, data=0.25)
context.spread = np.ndarray((context.num_pairs, 0))
context.inLong = [False] * context.num_pairs
context.inShort = [False] * context.num_pairs
# Only do work 30 minutes before close
schedule_function(func=check_pair_status, date_rule=date_rules.every_day(), time_rule=time_rules.market_close(minutes=30))
# Will be called on every trade event for the securities you specify.
def handle_data(context, data):
# Our work is now scheduled in check_pair_status
pass
def check_pair_status(context, data):
prices = data.history(context.stocks, 'price', 35, '1d').iloc[-context.lookback::]
new_spreads = np.ndarray((context.num_pairs, 1))
for i in range(context.num_pairs):
(stock_y, stock_x) = context.stock_pairs[i]
Y = prices[stock_y]
X = prices[stock_x]
# Comment explaining try block
try:
hedge = hedge_ratio(Y, X, add_const=True)
except ValueError as e:
log.debug(e)
return
context.target_weights = get_current_portfolio_weights(context, data)
new_spreads[i, :] = Y[-1] - hedge * X[-1]
if context.spread.shape[1] > context.z_window:
# Keep only the z-score lookback period
spreads = context.spread[i, -context.z_window:]
zscore = (spreads[-1] - spreads.mean()) / spreads.std()
if context.inShort[i] and zscore < 0.0:
context.target_weights[stock_y] = 0
context.target_weights[stock_x] = 0
context.inShort[i] = False
context.inLong[i] = False
record(X_pct=0, Y_pct=0)
allocate(context, data)
return
if context.inLong[i] and zscore > 0.0:
context.target_weights[stock_y] = 0
context.target_weights[stock_x] = 0
context.inShort[i] = False
context.inLong[i] = False
record(X_pct=0, Y_pct=0)
allocate(context, data)
return
if zscore < -1.0 and (not context.inLong[i]):
# Only trade if NOT already in a trade
y_target_shares = 1
X_target_shares = -hedge
context.inLong[i] = True
context.inShort[i] = False
(y_target_pct, x_target_pct) = computeHoldingsPct(y_target_shares,X_target_shares, Y[-1], X[-1])
context.target_weights[stock_y] = y_target_pct * (1.0/context.num_pairs)
context.target_weights[stock_x] = x_target_pct * (1.0/context.num_pairs)
record(Y_pct=y_target_pct, X_pct=x_target_pct)
allocate(context, data)
return
if zscore > 1.0 and (not context.inShort[i]):
# Only trade if NOT already in a trade
y_target_shares = -1
X_target_shares = hedge
context.inShort[i] = True
context.inLong[i] = False
(y_target_pct, x_target_pct) = computeHoldingsPct( y_target_shares, X_target_shares, Y[-1], X[-1] )
context.target_weights[stock_y] = y_target_pct * (1.0/context.num_pairs)
context.target_weights[stock_x] = x_target_pct * (1.0/context.num_pairs)
record(Y_pct=y_target_pct, X_pct=x_target_pct)
allocate(context, data)
return
context.spread = np.hstack([context.spread, new_spreads])
def hedge_ratio(Y, X, add_const=True):
if add_const:
X = sm.add_constant(X)
model = sm.OLS(Y, X).fit()
return model.params[1]
model = sm.OLS(Y, X).fit()
return model.params.values
def computeHoldingsPct(yShares, xShares, yPrice, xPrice):
yDol = yShares * yPrice
xDol = xShares * xPrice
notionalDol = abs(yDol) + abs(xDol)
y_target_pct = yDol / notionalDol
x_target_pct = xDol / notionalDol
return (y_target_pct, x_target_pct)
def get_current_portfolio_weights(context, data):
positions = context.portfolio.positions
positions_index = pd.Index(positions)
share_counts = pd.Series(
index=positions_index,
data=[positions[asset].amount for asset in positions]
)
current_prices = data.current(positions_index, 'price')
current_weights = share_counts * current_prices / context.portfolio.portfolio_value
return current_weights.reindex(positions_index.union(context.stocks), fill_value=0.0)
def allocate(context, data):
# Set objective to match target weights as closely as possible, given constraints
objective = opt.TargetPortfolioWeights(context.target_weights)
# Define constraints
constraints = []
constraints.append(opt.MaxGrossLeverage(1.0))
algo.order_optimal_portfolio(objective=objective, constraints=constraints, universe=context.stocks)
| unlicense |
qe-team/marmot | marmot/evaluation/evaluate.py | 1 | 8572 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import sys, codecs
import numpy as np
from sklearn import metrics
from marmot.evaluation.evaluation_metrics import weighted_fmeasure
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('testlogger')
issues_accuracy = ['Terminology', 'Mistranslation', 'Omission', 'Addition', 'Untranslated', 'Accuracy']
issues_fluency = ['Agreement', 'Capitalization', 'Fluency', 'Function_words',
'Grammar', 'Morphology_(word_form)', 'Style/register',
'Typography', 'Unintelligible', 'Word_order',
'Tense/aspect/mood', 'Punctuation', 'Spelling',
'Part_of_speech']
def flatten(lofl):
return [item for sublist in lofl for item in sublist]
def read_wmt_annotation(f):
anno = {}
for line_number, line in enumerate(f):
line = line.decode('utf-8').strip().split()
assert len(line) == 6, "line %d: expected 6 elements per line but found %d\n" %(line_number, len(line))
sid, wid, w, a1, a2, a3 = line
sid = sid.split('.')
assert len(sid) == 2 and sid[0].isdigit() and sid[1].isdigit(), \
"line %d: first entry (sentence id) must be if format X.Y\n" %line_number
assert wid.isdigit(), "line %d: second entry (word index) must be integer\n" %line_number
sid = (int(sid[0]), int(sid[1]))
wid = int(wid)
assert a1.lower() == "ok" or \
a1 in issues_accuracy or \
a1.lower() in map(str.lower, issues_accuracy) or \
a1 in issues_fluency or \
a1.lower() in map(str.lower, issues_fluency), \
"line %d: unexpected error category %s\n" %(line_number, a1)
assert a2.lower() in ['ok', 'fluency', 'accuracy'], "line %d: unexpected error category %s\n" %(line_number, a2)
assert a3.lower() in ['ok', 'bad'], "line %d: unexpected error category %s\n" %(line_number, a3)
if not sid in anno:
anno[sid] = {}
assert not wid in anno[sid], "line %d: duplicate entry for s%d:w%d" %(line_number, sid, wid)
anno[sid][wid] = [a1.lower(), a2.lower(), a3.lower(), w]
return anno
def generate_random_with_prior(ref_list, options):
prior_probs = [float(ref_list.count(opt))/len(ref_list) for opt in options]
rand_list = [options[np.random.multinomial(1, prior_probs).argmax()] for i in range(len(ref_list))]
return rand_list
#print confusion matrix
def print_cf(cf, name, options, f1_scores, weighted_f1):
print("----- Results for %s: -----" %name)
print("-------------------------------------")
print("\t\tPREDICT")
print("REFERENCE\t", "\t".join(options))
for linenr, line in enumerate(cf):
print("%s\t\t" %options[linenr])
print("\t".join(map(str,line)))
print("-------------------------------------")
for i in range(len(options)):
print("F1 %24s: %f" %(options[i], f1_scores[i]))
print(" %24s: %f" %("WEIGHTED AVG", weighted_f1))
print("-------------------------------------")
#get scores and confusion matrix
#ref, hyp - lists of labels
def get_scores(ref, hyp, labels, name='default name', mute=0):
assert(all([r in labels for r in ref]))
assert(all([h in labels for h in hyp]))
assert(len(ref) == len(hyp))
label_list = list(labels)
weighted_f1 = metrics.f1_score(ref, hyp, labels=label_list, average='weighted', pos_label=None)
if not mute:
cf_matrix = metrics.confusion_matrix(ref, hyp, labels=label_list)
f1_scores = metrics.f1_score(ref, hyp, labels=label_list, average=None, pos_label=None)
print_cf(cf_matrix, name, label_list, f1_scores, weighted_f1)
return weighted_f1
#return list of labels for every example
# TODO: change the output format of the wmt parser above, this is messing everything up! - we should have dicts containing the annotation data
def choose_wmt_token_subset(anno, tok_list=None):
#use all words
if tok_list is None:
return [anno[sid][wid][-2] for sid in anno for wid in anno[sid]]
#use only words from tok_list
else:
# currently the index of the token in the annotation is -1, the coarse-grained annotation is at i = -2
return [anno[sid][wid][-2] for sid in anno for wid in anno[sid] if anno[sid][wid][-1] in tok_list]
def significance_test(ref, hyp_res, options, granularity=20):
options = list(options)
assert type(hyp_res) != list, 'the performance on the hypothesis should be a float in the range: [0.0,1.0]'
res_random = []
for i in range(granularity):
rand = generate_random_with_prior(ref, options)
res_random.append(get_scores(ref, rand, options, str(i), mute=1))
numerator = len([res for res in res_random if hyp_res <= res])
if numerator == 0:
numerator = 1
p_value = numerator / granularity
if p_value <= 0.05:
print("The result is statistically significant with p = {}".format(p_value))
else:
print("The result is not statistically significant: {}".format(p_value))
return p_value
# evaluate predicted and actual hashed token instances
def evaluate_hashed_predictions(ref, hyp, labels):
ref_keys = ref.keys()
for tok in hyp.keys():
assert tok in ref_keys, 'The reference dict must contain the token'
assert len(ref[tok]) == len(hyp[tok]), 'the dicts must contain the same number of instances for each token'
label_list = set(labels)
result_map = {}
for tok, predicted in hyp.iteritems():
actual = ref[tok]
logger.info("\ttotal instances: " + str(len(predicted)))
logger.info("Evaluating results for token = " + tok)
hyp_res = get_scores(actual, predicted, label_list, '\''+tok+'\'')
token_p_value = significance_test(actual, hyp_res, label_list)
token_result = {'token': tok, 'weighted_f1': hyp_res, 'p_value': token_p_value}
result_map[tok] = token_result
return result_map
# assert that the keys are the same (see experiment_utils.sync)
# evaluate wmt formatted parallel files
def evaluate_wmt(anno_ref, anno_hyp, interesting_words=[]):
option_list = ['ok', 'bad']
# {'token': <token>, 'weighted_f1': <weighted_f1>, 'p_value': <p_value>}
evaluation_results = {'token_level': [], 'all_data': {}}
#scores and confusion matrices for individual words
for tok in interesting_words:
# choose_token_subset maps into [<tag>]
ref_list = choose_wmt_token_subset(anno_ref, tok_list=[tok])
hyp_list = choose_wmt_token_subset(anno_hyp, tok_list=[tok])
hyp_res = get_scores(ref_list, hyp_list, option_list, '\''+tok+'\'')
token_p_value = significance_test(ref_list, hyp_res, option_list)
# {'token': <token>, 'weighted_f1': <weighted_f1>, 'p_value': <p_value>}
token_result = {'token': tok, 'weighted_f1': hyp_res, 'p_value': token_p_value}
evaluation_results.token_level.append(token_result)
#scores for all interesting words or for all words if interesting_words not specified
ref_list = choose_wmt_token_subset(anno_ref, tok_list=None)
hyp_list = choose_wmt_token_subset(anno_hyp, tok_list=None)
overall_result = get_scores(ref_list, hyp_list, option_list, 'all_words')
p_value = significance_test(ref_list, overall_result, option_list)
result_obj = {'weighted_f1': overall_result, 'p_value': p_value}
evaluation_results['all_data'] = result_obj
return evaluation_results
#evaluate
def main(file_ref, file_hyp, words_file):
ref = read_wmt_annotation(open(file_ref))
hyp = read_wmt_annotation(open(file_hyp))
interesting_words = [] if words_file == "" else [line[:-1].decode('utf-8') for line in open(words_file)]
evaluate_wmt(ref, hyp, interesting_words)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('ref', help="reference annotations")
parser.add_argument('sub', help="submission annotations")
parser.add_argument('--token_subset', help="subset of tokens to evaluate")
args = parser.parse_args(sys.argv[1:])
main(args.ref, args.sub, args.token_subset if args.token_subset else "")
| isc |
Huitzilo/FUImaging | regnmf/nnmfgui_main.py | 1 | 16209 | #!/usr/bin/env python
# encoding: utf-8
"""
Created by Stephan Gabler ([email protected])
and Jan Sölter ([email protected]) at FU-Berlin.
Copyright (c) 2012. All rights reserved.
"""
import os, glob, json, sys
import ImageAnalysisComponents as bf
import runlib_new
# import QtStuff
from PyQt4 import QtCore
from PyQt4 import QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
# import GUILayout
from main_window import Ui_MainGuiWin
from conversion_dialog import Ui_conversion_dialog
import logging as l
l.basicConfig(level=l.DEBUG,
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S');
class ConversionDialog(QtGui.QDialog, Ui_conversion_dialog):
"""docstring for ConversionDialog"""
def __init__(self, n_files):
super(ConversionDialog, self).__init__()
self.setupUi(self)
message_text = ('%d data folder has to be converted to our numpy format\n' +
'this is only done once..\n\n' +
'please enter the following values:') % n_files
self.label.setText(message_text)
self.stimulus_on_box.valueChanged.connect(self.validate_on_box)
self.stimulus_end_box.valueChanged.connect(self.validate_end_box)
def validate_on_box(self):
if self.stimulus_on_box.value() >= self.stimulus_end_box.value():
self.stimulus_on_box.setValue(self.stimulus_end_box.value() - 1)
def validate_end_box(self):
if self.stimulus_end_box.value() <= self.stimulus_on_box.value():
self.stimulus_end_box.setValue(self.stimulus_on_box.value() + 1)
class MainGui(QtGui.QMainWindow, Ui_MainGuiWin):
'''gui main class'''
def __init__(self, parent=None):
"""initialize the gui, connect signals, add axes objects, etc.."""
super(MainGui, self).__init__(parent)
self.factorized = False
self.setupUi(self)
self.results = {}
self.export_methods = {}
self.config_file = 'gui_config.json'
self.method_controls = {'nnma': [self.sparseness_label, self.sparseness_spinner,
self.smoothness_label, self.smoothness_spinner,
self.maxcount_label, self.maxcount_spinner],
'sica': []}
self.plot_methods = {'overview': runlib_new.raw_response_overview,
'mf_bases': runlib_new.mfbase_plot}
# reintegrate later
#'mf_overview': runlib_new.mf_overview_plot_single,
#'reconstruction': runlib_new.reconstruction_error_plot
# init gui
basic_plot_methods = ['overview']
self.plot_selection_box.insertItems(0, basic_plot_methods)
self.plot_threshold_box.insertItems(0, [str(x / 10.) for x in range(11)])
self.plot_threshold_box.setCurrentIndex(3)
self.format_box.insertItems(0, ['png', 'jpg', 'svg', 'pdf'])
# connect signals to slots
self.format_box.currentIndexChanged.connect(self.save_controls)
self.plot_export_button.clicked.connect(self.export_results)
self.session_box.currentIndexChanged.connect(self.update_plot)
self.plot_selection_box.currentIndexChanged.connect(self.update_plot)
self.plot_selection_box.currentIndexChanged.connect(self.change_plot_parameters)
self.preprocess_button.clicked.connect(self.preprocess)
self.factorize_button.clicked.connect(self.factorize)
for spinner in self.findChildren((QtGui.QSpinBox, QtGui.QDoubleSpinBox)):
spinner.valueChanged.connect(self.save_controls)
for check_box in self.findChildren(QtGui.QCheckBox):
check_box.stateChanged.connect(self.save_controls)
self.plot_threshold_box.currentIndexChanged.connect(self.update_plot)
self.methods_box.currentIndexChanged.connect(self.mf_method_changed)
self.load_controls()
# add plot widget
self.plot_widget = PlotWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHeightForWidth(self.plot_widget.sizePolicy().hasHeightForWidth())
self.plot_widget.setSizePolicy(sizePolicy)
self.scrollArea.setWidget(self.plot_widget)
def change_plot_parameters(self):
"""enable or disable the correct plot parameters for a certain plot"""
method = str(self.plot_selection_box.currentText())
if method == 'overview':
self.plot_threshold_box.setEnabled(True)
else:
self.plot_threshold_box.setEnabled(False)
def select_data_folder(self, path=''):
"""select data folder, either from given path or dialog"""
if not path:
caption = 'select your data folder'
self.fname = str(QtGui.QFileDialog.getExistingDirectory(caption=caption))
else:
self.fname = path
subfolders = [f for f in os.listdir(self.fname)
if os.path.isdir(os.path.join(self.fname, f))]
to_convert = []
for subfolder in subfolders:
if not glob.glob(os.path.join(self.fname, subfolder, 'timeseries*.npy')):
to_convert.append(subfolder)
if to_convert:
diag = ConversionDialog(len(to_convert))
if diag.exec_() == QtGui.QDialog.Accepted:
l.debug('read values from dialog')
framerate = diag.framerate_box.value()
stim_window = (diag.stimulus_on_box.value(), diag.stimulus_end_box.value())
else:
l.info('conversion cancelled')
sys.exit()
progdialog = QtGui.QProgressDialog('converting image files..',
'cancel',
0, len(to_convert), self)
progdialog.setMinimumDuration(0)
progdialog.setWindowModality(QtCore.Qt.WindowModal)
for i, folder in enumerate(to_convert):
progdialog.setValue(i)
folder_path = os.path.join(self.fname, folder)
QtCore.QCoreApplication.processEvents()
try:
ts = runlib_new.create_timeseries_from_pngs(folder_path, folder)
except OSError:
l.warning('No pngs available for animal %s' % folder)
continue
ts.framerate = framerate
ts.stim_window = stim_window
ts.save(os.path.join(folder_path, 'timeseries'))
if progdialog.wasCanceled():
print 'hui ui ui'
break
progdialog.setValue(len(to_convert))
message = '%d files found in %s' % (len(subfolders), self.fname)
self.statusbar.showMessage(message, msecs=5000)
self.filelist = glob.glob(os.path.join(self.fname, '*', 'timeseries*.json'))
self.filelist = [os.path.splitext(i)[0].split(self.fname)[1][1:] for i in self.filelist]
self.session_box.insertItems(0, self.filelist)
def load_controls(self):
"""initialize the control elements (widgets) from config file"""
config = json.load(open(self.config_file))
self.lowpass_spinner.setValue(config['lowpass'])
self.highpass_spinner.setValue(config['highpass'])
self.spatial_spinner.setValue(config['spatial_down'])
self.methods_box.clear()
self.methods_box.insertItems(0, config['methods'].keys())
self.methods_box.setCurrentIndex(self.methods_box.findText(config['selected_method']))
self.format_box.setCurrentIndex(self.format_box.findText(config['format']))
self.sparseness_spinner.setValue(config['methods']['nnma']['sparse_param'])
self.smoothness_spinner.setValue(config['methods']['nnma']['smooth_param'])
self.maxcount_spinner.setValue(config['methods']['nnma']['maxcount'])
self.n_modes_spinner.setValue(config['n_modes'])
self.config = config
def save_controls(self, export_file=''):
'''after each click, save settings to config file'''
print 'save_controls called, export file is: %s' % export_file
config = {}
config['lowpass'] = self.lowpass_spinner.value()
config['highpass'] = self.highpass_spinner.value()
config['spatial_down'] = self.spatial_spinner.value()
config['selected_method'] = str(self.methods_box.currentText())
config['format'] = str(self.format_box.currentText())
config['methods'] = {'nnma': {}, 'sica': {}}
config['methods']['nnma']['sparse_param'] = self.sparseness_spinner.value()
config['methods']['nnma']['smooth_param'] = self.smoothness_spinner.value()
config['methods']['nnma']['maxcount'] = self.maxcount_spinner.value()
config['n_modes'] = self.n_modes_spinner.value()
self.config = config
json.dump(config, open(self.config_file, 'w'))
if isinstance(export_file, str) and os.path.exists(os.path.dirname(export_file)):
json.dump(config, open(export_file, 'w'))
# TODO: add load and save settings to the menu
def mf_method_changed(self):
"""display the suitable options for the selected method"""
current_method = str(self.methods_box.currentText())
for method in self.config['methods']:
for ui_elem in self.method_controls[method]:
ui_elem.setVisible(method == current_method)
self.save_controls()
def export_results(self):
"""save all selected plots"""
caption = 'select output folder'
out_folder = str(QtGui.QFileDialog.getExistingDirectory(caption=caption))
params = {'threshold': float(self.plot_threshold_box.currentText())}
json.dump(self.config, open(os.path.join(out_folder, 'config.json'), 'w'))
if not os.path.exists(os.path.join(out_folder, 'timeseries')):
os.mkdir(os.path.join(out_folder, 'timeseries'))
progdialog = QtGui.QProgressDialog('export results..',
'cancel',
0, len(self.filelist), self)
progdialog.setMinimumDuration(0)
progdialog.setWindowModality(QtCore.Qt.WindowModal)
fig = Figure()
for i, session in enumerate(self.filelist):
#ToDo: OS independent solution
sessionname = ''.join(session.split('/timeseries'))
progdialog.setValue(i)
for plot_method in self.plot_methods:
fig.clear()
if not os.path.exists(os.path.join(out_folder, plot_method)):
os.mkdir(os.path.join(out_folder, plot_method))
self.plot_methods[plot_method](self.results[session],
fig,
params)
plot_name = sessionname + '_' + plot_method.replace(' ', '_')
plot_name += '.' + self.config['format']
fig.savefig(os.path.join(out_folder, plot_method, plot_name))
self.results[session]['mf'].save(os.path.join(out_folder, 'timeseries', sessionname))
progdialog.setValue(len(self.filelist))
def preprocess(self):
self.results = {}
self.statusbar.showMessage('preprocessing going on..')
progdialog = QtGui.QProgressDialog('', 'cancel', 0, len(self.filelist), self)
progdialog.setMinimumDuration(0)
progdialog.setWindowModality(QtCore.Qt.WindowModal)
for file_ind, filename in enumerate(self.filelist):
print self.fname, filename
progdialog.setValue(file_ind)
if progdialog.wasCanceled():
break
# create timeseries, change shape and preprocess
ts = bf.TimeSeries()
progdialog.setLabelText('%s: loading' % filename)
QtCore.QCoreApplication.processEvents()
ts.load(os.path.join(self.fname, filename))
progdialog.setLabelText('%s: preprocessing' % filename)
QtCore.QCoreApplication.processEvents()
self.results[filename] = runlib_new.preprocess(ts, self.config)
self.results[filename]['mask'] = []
progdialog.setValue(len(self.filelist))
self.statusbar.showMessage('finished preprocessing', msecs=3000)
self.activate_controls()
if self.factorized:
self.factorize_label.setText('preprocessig changed, factorize again!!!')
for plot_method in ['mf_overview', 'reconstruction']:
ind = self.plot_selection_box.findText(plot_method)
if ind >= 0:
self.plot_selection_box.removeItem(ind)
self.update_plot()
def activate_controls(self):
"""activate the widgets after preprocessing"""
self.factorize_box.setEnabled(True)
self.export_box.setEnabled(True)
self.session_box.setEnabled(True)
self.plot_selection_box.setEnabled(True)
self.plot_export_button.setEnabled(True)
def update_plot(self):
"""this is called when a new session or new kind of plot is selected"""
l.debug('update plot called')
if self.results:
self.plot_widget.fig.clear()
session = str(self.session_box.currentText())
plot_method = str(self.plot_selection_box.currentText())
params = {'threshold': float(self.plot_threshold_box.currentText())}
self.plot_methods[plot_method](self.results[session], self.plot_widget.fig, params)
self.plot_widget.canvas.draw()
# TODO: maybe start a new thread for this?
def factorize(self):
self.factorize_label.setText('')
mf_params = {'method': self.config['selected_method'],
'param': self.config['methods'][self.config['selected_method']]}
mf_params['param']['num_components'] = self.config['n_modes']
l.info(mf_params)
self.statusbar.showMessage('factorization going on ..')
progdialog = QtGui.QProgressDialog('factorization going on ..',
'cancel',
0, len(self.filelist), self)
progdialog.setMinimumDuration(0)
progdialog.setWindowModality(QtCore.Qt.WindowModal)
for file_ind, filename in enumerate(self.filelist):
pp = self.results[filename]['pp']
progdialog.setValue(file_ind)
if progdialog.wasCanceled():
break
# do matrix factorization
progdialog.setLabelText('%s: factorization' % filename)
QtCore.QCoreApplication.processEvents()
mf_func = runlib_new.create_mf(mf_params)
mf = mf_func(pp)
self.results[filename]['mf'] = mf
progdialog.setValue(len(self.filelist))
self.statusbar.showMessage('finished', msecs=2000)
self.plot_selection_box.insertItems(0, ['mf_bases', 'mf_overview', 'reconstruction'])
self.plot_selection_box.setCurrentIndex(0)
self.factorized = True
class PlotCanvas(FigureCanvas):
'''a class only containing the figure to manage the qt layout'''
def __init__(self):
self.fig = Figure()
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
class PlotWidget(QtGui.QWidget):
'''all plotting related stuff and also the context menu'''
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.canvas = PlotCanvas()
self.fig = self.canvas.fig
self.vbl = QtGui.QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.setLayout(self.vbl)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
my_gui = MainGui()
my_gui.show()
app.setActiveWindow(my_gui)
my_gui.select_data_folder()
sys.exit(app.exec_())
| mit |
ronalcc/zipline | tests/risk/answer_key.py | 39 | 11989 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hashlib
import os
import numpy as np
import pandas as pd
import pytz
import xlrd
import requests
from six.moves import map
def col_letter_to_index(col_letter):
# Only supports single letter,
# but answer key doesn't need multi-letter, yet.
index = 0
for i, char in enumerate(reversed(col_letter)):
index += ((ord(char) - 65) + 1) * pow(26, i)
return index
DIR = os.path.dirname(os.path.realpath(__file__))
ANSWER_KEY_CHECKSUMS_PATH = os.path.join(DIR, 'risk-answer-key-checksums')
ANSWER_KEY_CHECKSUMS = open(ANSWER_KEY_CHECKSUMS_PATH, 'r').read().splitlines()
ANSWER_KEY_FILENAME = 'risk-answer-key.xlsx'
ANSWER_KEY_PATH = os.path.join(DIR, ANSWER_KEY_FILENAME)
ANSWER_KEY_BUCKET_NAME = 'zipline-test_data'
ANSWER_KEY_DL_TEMPLATE = """
https://s3.amazonaws.com/zipline-test-data/risk/{md5}/risk-answer-key.xlsx
""".strip()
LATEST_ANSWER_KEY_URL = ANSWER_KEY_DL_TEMPLATE.format(
md5=ANSWER_KEY_CHECKSUMS[-1])
def answer_key_signature():
with open(ANSWER_KEY_PATH, 'rb') as f:
md5 = hashlib.md5()
buf = f.read(1024)
md5.update(buf)
while buf != b"":
buf = f.read(1024)
md5.update(buf)
return md5.hexdigest()
def ensure_latest_answer_key():
"""
Get the latest answer key from a publically available location.
Logic for determining what and when to download is as such:
- If there is no local spreadsheet file, then get the lastest answer key,
as defined by the last row in the checksum file.
- If there is a local spreadsheet file:
-- If the spreadsheet's checksum is in the checksum file:
--- If the spreadsheet's checksum does not match the latest, then grab the
the latest checksum and replace the local checksum file.
--- If the spreadsheet's checksum matches the latest, then skip download,
and use the local spreadsheet as a cached copy.
-- If the spreadsheet's checksum is not in the checksum file, then leave
the local file alone, assuming that the local xls's md5 is not in the list
due to local modifications during development.
It is possible that md5's could collide, if that is ever case, we should
then find an alternative naming scheme.
The spreadsheet answer sheet is not kept in SCM, as every edit would
increase the repo size by the file size, since it is treated as a binary.
"""
answer_key_dl_checksum = None
local_answer_key_exists = os.path.exists(ANSWER_KEY_PATH)
if local_answer_key_exists:
local_hash = answer_key_signature()
if local_hash in ANSWER_KEY_CHECKSUMS:
# Assume previously downloaded version.
# Check for latest.
if local_hash != ANSWER_KEY_CHECKSUMS[-1]:
# More recent checksum, download
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
else:
# Assume local copy that is being developed on
answer_key_dl_checksum = None
else:
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
if answer_key_dl_checksum:
res = requests.get(
ANSWER_KEY_DL_TEMPLATE.format(md5=answer_key_dl_checksum))
with open(ANSWER_KEY_PATH, 'wb') as f:
f.write(res.content)
# Get latest answer key on load.
ensure_latest_answer_key()
class DataIndex(object):
"""
Coordinates for the spreadsheet, using the values as seen in the notebook.
The python-excel libraries use 0 index, while the spreadsheet in a GUI
uses a 1 index.
"""
def __init__(self, sheet_name, col, row_start, row_end,
value_type='float'):
self.sheet_name = sheet_name
self.col = col
self.row_start = row_start
self.row_end = row_end
self.value_type = value_type
@property
def col_index(self):
return col_letter_to_index(self.col) - 1
@property
def row_start_index(self):
return self.row_start - 1
@property
def row_end_index(self):
return self.row_end - 1
def __str__(self):
return "'{sheet_name}'!{col}{row_start}:{col}{row_end}".format(
sheet_name=self.sheet_name,
col=self.col,
row_start=self.row_start,
row_end=self.row_end
)
class AnswerKey(object):
INDEXES = {
'RETURNS': DataIndex('Sim Period', 'D', 4, 255),
'BENCHMARK': {
'Dates': DataIndex('s_p', 'A', 4, 254, value_type='date'),
'Returns': DataIndex('s_p', 'H', 4, 254)
},
# Below matches the inconsistent capitalization in spreadsheet
'BENCHMARK_PERIOD_RETURNS': {
'Monthly': DataIndex('s_p', 'R', 8, 19),
'3-Month': DataIndex('s_p', 'S', 10, 19),
'6-month': DataIndex('s_p', 'T', 13, 19),
'year': DataIndex('s_p', 'U', 19, 19),
},
'BENCHMARK_PERIOD_VOLATILITY': {
'Monthly': DataIndex('s_p', 'V', 8, 19),
'3-Month': DataIndex('s_p', 'W', 10, 19),
'6-month': DataIndex('s_p', 'X', 13, 19),
'year': DataIndex('s_p', 'Y', 19, 19),
},
'ALGORITHM_PERIOD_RETURNS': {
'Monthly': DataIndex('Sim Period', 'Z', 23, 34),
'3-Month': DataIndex('Sim Period', 'AA', 25, 34),
'6-month': DataIndex('Sim Period', 'AB', 28, 34),
'year': DataIndex('Sim Period', 'AC', 34, 34),
},
'ALGORITHM_PERIOD_VOLATILITY': {
'Monthly': DataIndex('Sim Period', 'AH', 23, 34),
'3-Month': DataIndex('Sim Period', 'AI', 25, 34),
'6-month': DataIndex('Sim Period', 'AJ', 28, 34),
'year': DataIndex('Sim Period', 'AK', 34, 34),
},
'ALGORITHM_PERIOD_SHARPE': {
'Monthly': DataIndex('Sim Period', 'AL', 23, 34),
'3-Month': DataIndex('Sim Period', 'AM', 25, 34),
'6-month': DataIndex('Sim Period', 'AN', 28, 34),
'year': DataIndex('Sim Period', 'AO', 34, 34),
},
'ALGORITHM_PERIOD_BETA': {
'Monthly': DataIndex('Sim Period', 'AP', 23, 34),
'3-Month': DataIndex('Sim Period', 'AQ', 25, 34),
'6-month': DataIndex('Sim Period', 'AR', 28, 34),
'year': DataIndex('Sim Period', 'AS', 34, 34),
},
'ALGORITHM_PERIOD_ALPHA': {
'Monthly': DataIndex('Sim Period', 'AT', 23, 34),
'3-Month': DataIndex('Sim Period', 'AU', 25, 34),
'6-month': DataIndex('Sim Period', 'AV', 28, 34),
'year': DataIndex('Sim Period', 'AW', 34, 34),
},
'ALGORITHM_PERIOD_BENCHMARK_VARIANCE': {
'Monthly': DataIndex('Sim Period', 'BJ', 23, 34),
'3-Month': DataIndex('Sim Period', 'BK', 25, 34),
'6-month': DataIndex('Sim Period', 'BL', 28, 34),
'year': DataIndex('Sim Period', 'BM', 34, 34),
},
'ALGORITHM_PERIOD_COVARIANCE': {
'Monthly': DataIndex('Sim Period', 'BF', 23, 34),
'3-Month': DataIndex('Sim Period', 'BG', 25, 34),
'6-month': DataIndex('Sim Period', 'BH', 28, 34),
'year': DataIndex('Sim Period', 'BI', 34, 34),
},
'ALGORITHM_PERIOD_DOWNSIDE_RISK': {
'Monthly': DataIndex('Sim Period', 'BN', 23, 34),
'3-Month': DataIndex('Sim Period', 'BO', 25, 34),
'6-month': DataIndex('Sim Period', 'BP', 28, 34),
'year': DataIndex('Sim Period', 'BQ', 34, 34),
},
'ALGORITHM_PERIOD_SORTINO': {
'Monthly': DataIndex('Sim Period', 'BR', 23, 34),
'3-Month': DataIndex('Sim Period', 'BS', 25, 34),
'6-month': DataIndex('Sim Period', 'BT', 28, 34),
'year': DataIndex('Sim Period', 'BU', 34, 34),
},
'ALGORITHM_RETURN_VALUES': DataIndex(
'Sim Cumulative', 'D', 4, 254),
'ALGORITHM_CUMULATIVE_VOLATILITY': DataIndex(
'Sim Cumulative', 'P', 4, 254),
'ALGORITHM_CUMULATIVE_SHARPE': DataIndex(
'Sim Cumulative', 'R', 4, 254),
'CUMULATIVE_DOWNSIDE_RISK': DataIndex(
'Sim Cumulative', 'U', 4, 254),
'CUMULATIVE_SORTINO': DataIndex(
'Sim Cumulative', 'V', 4, 254),
'CUMULATIVE_INFORMATION': DataIndex(
'Sim Cumulative', 'AA', 4, 254),
'CUMULATIVE_BETA': DataIndex(
'Sim Cumulative', 'AD', 4, 254),
'CUMULATIVE_ALPHA': DataIndex(
'Sim Cumulative', 'AE', 4, 254),
'CUMULATIVE_MAX_DRAWDOWN': DataIndex(
'Sim Cumulative', 'AH', 4, 254),
}
def __init__(self):
self.workbook = xlrd.open_workbook(ANSWER_KEY_PATH)
self.sheets = {}
self.sheets['Sim Period'] = self.workbook.sheet_by_name('Sim Period')
self.sheets['Sim Cumulative'] = self.workbook.sheet_by_name(
'Sim Cumulative')
self.sheets['s_p'] = self.workbook.sheet_by_name('s_p')
for name, index in self.INDEXES.items():
if isinstance(index, dict):
subvalues = {}
for subkey, subindex in index.items():
subvalues[subkey] = self.get_values(subindex)
setattr(self, name, subvalues)
else:
setattr(self, name, self.get_values(index))
def parse_date_value(self, value):
return xlrd.xldate_as_tuple(value, 0)
def parse_float_value(self, value):
return value if value != '' else np.nan
def get_raw_values(self, data_index):
return self.sheets[data_index.sheet_name].col_values(
data_index.col_index,
data_index.row_start_index,
data_index.row_end_index + 1)
@property
def value_type_to_value_func(self):
return {
'float': self.parse_float_value,
'date': self.parse_date_value,
}
def get_values(self, data_index):
value_parser = self.value_type_to_value_func[data_index.value_type]
return [value for value in
map(value_parser, self.get_raw_values(data_index))]
ANSWER_KEY = AnswerKey()
BENCHMARK_DATES = ANSWER_KEY.BENCHMARK['Dates']
BENCHMARK_RETURNS = ANSWER_KEY.BENCHMARK['Returns']
DATES = [datetime.datetime(*x, tzinfo=pytz.UTC) for x in BENCHMARK_DATES]
BENCHMARK = pd.Series(dict(zip(DATES, BENCHMARK_RETURNS)))
ALGORITHM_RETURNS = pd.Series(
dict(zip(DATES, ANSWER_KEY.ALGORITHM_RETURN_VALUES)))
RETURNS_DATA = pd.DataFrame({'Benchmark Returns': BENCHMARK,
'Algorithm Returns': ALGORITHM_RETURNS})
RISK_CUMULATIVE = pd.DataFrame({
'volatility': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_VOLATILITY))),
'sharpe': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_SHARPE))),
'downside_risk': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_DOWNSIDE_RISK))),
'sortino': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_SORTINO))),
'information': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_INFORMATION))),
'alpha': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_ALPHA))),
'beta': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_BETA))),
'max_drawdown': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_MAX_DRAWDOWN))),
})
| apache-2.0 |
heli522/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
YuKitAs/tech-note | computer-vision/facial-keypoint-detection/data_load.py | 1 | 4719 | import os
import cv2
import matplotlib.image as mpimg
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
class FacialKeypointsDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, csv_file, root_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.key_pts_frame = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.key_pts_frame)
def __getitem__(self, idx):
image_name = os.path.join(self.root_dir,
self.key_pts_frame.iloc[idx, 0])
image = mpimg.imread(image_name)
# if image has an alpha color channel, get rid of it
if (image.shape[2] == 4):
image = image[:, :, 0:3]
key_pts = pd.DataFrame(self.key_pts_frame.iloc[idx, 1:]).to_numpy()
key_pts = key_pts.astype('float').reshape(-1, 2)
sample = {'image': image, 'keypoints': key_pts}
if self.transform:
sample = self.transform(sample)
return sample
# tranforms
class Normalize(object):
"""Convert a color image to grayscale and normalize the color range to [0,1]."""
def __call__(self, sample):
image, key_pts = sample['image'], sample['keypoints']
image_copy = np.copy(image)
key_pts_copy = np.copy(key_pts)
# convert image to grayscale
image_copy = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# scale color range from [0, 255] to [0, 1]
image_copy = image_copy / 255.0
# scale keypoints to be centered around 0 with a range of [-1, 1]
# mean = 100, sqrt = 50, so, pts should be (pts - 100)/50
key_pts_copy = (key_pts_copy - 100) / 50.0
return {'image': image_copy, 'keypoints': key_pts_copy}
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, key_pts = sample['image'], sample['keypoints']
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = cv2.resize(image, (new_w, new_h))
# scale the pts, too
key_pts = key_pts * [new_w / w, new_h / h]
return {'image': img, 'keypoints': key_pts}
class RandomCrop(object):
"""Crop randomly the image in a sample.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, key_pts = sample['image'], sample['keypoints']
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h,
left: left + new_w]
key_pts = key_pts - [left, top]
return {'image': image, 'keypoints': key_pts}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, key_pts = sample['image'], sample['keypoints']
# if image has no grayscale color channel, add one
if (len(image.shape) == 2):
# add that third color dim
image = image.reshape(image.shape[0], image.shape[1], 1)
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return {'image': torch.from_numpy(image),
'keypoints': torch.from_numpy(key_pts)}
| gpl-3.0 |
drix00/pymcxray | pymcxray/examples/simulation_test_map.py | 1 | 10408 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: pymcxray.examples.simulation_test_maps
:synopsis: Script to simulate mcxray maps for MM2017 with Nadi.
.. moduleauthor:: Hendrix Demers <[email protected]>
Script to simulate mcxray maps for MM2017 with Nadi.
"""
###############################################################################
# Copyright 2017 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
import logging
import os.path
# Third party modules.
import matplotlib.pyplot as plt
import h5py
import numpy as np
# Local modules.
import pymcxray.mcxray as mcxray
import pymcxray.FileFormat.Results.XrayIntensities as XrayIntensities
import pymcxray.FileFormat.Results.XraySpectraSpecimenEmittedDetected as XraySpectraSpecimenEmittedDetected
import pymcxray.FileFormat.Results.ElectronResults as ElectronResults
import pymcxray.FileFormat.Results.XraySpectraRegionsEmitted as XraySpectraRegionsEmitted
from pymcxray.SimulationsParameters import SimulationsParameters, PARAMETER_INCIDENT_ENERGY_keV, PARAMETER_NUMBER_ELECTRONS, \
PARAMETER_BEAM_POSITION_nm, PARAMETER_NUMBER_XRAYS
import pymcxray.FileFormat.Specimen as Specimen
import pymcxray.FileFormat.Region as Region
import pymcxray.FileFormat.RegionType as RegionType
import pymcxray.FileFormat.RegionDimensions as RegionDimensions
import pymcxray.FileFormat.Element as Element
# Project modules.
from pymcxray import get_current_module_path, get_mcxray_program_name
# Globals and constants variables.
class SimulationTestMapsMM2017(mcxray._Simulations):
def _initData(self):
self.use_hdf5 = True
self.delete_result_files = False
self.createBackup = True
# Local variables for value and list if values.
energy_keV = 30.0
number_electrons = 10000
# number_xrays_list = [10, 20, 30, 50, 60, 100, 200, 500, 1000]
number_xrays_list = [10]
xs_nm = np.linspace(-5.0e3, 5.0e3, 3)
probePositions_nm = [tuple(position_nm) for position_nm in
np.transpose([np.tile(xs_nm, len(xs_nm)), np.repeat(xs_nm, len(xs_nm))]).tolist()]
# Simulation parameters
self._simulationsParameters = SimulationsParameters()
self._simulationsParameters.addVaried(PARAMETER_NUMBER_XRAYS, number_xrays_list)
self._simulationsParameters.addVaried(PARAMETER_BEAM_POSITION_nm, probePositions_nm)
self._simulationsParameters.addFixed(PARAMETER_INCIDENT_ENERGY_keV, energy_keV)
self._simulationsParameters.addFixed(PARAMETER_NUMBER_ELECTRONS, number_electrons)
def getAnalysisName(self):
return "SimulationTestMapsMM2017"
def createSpecimen(self, parameters):
specimen = Specimen.Specimen()
specimen.name = "Maps01"
specimen.numberRegions = 10
# Region 0
region = Region.Region()
region.numberElements = 0
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [-10000000000.0, 10000000000.0, -10000000000.0, 10000000000.0, 0.0, 20000000000.0]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
# Region 1
region = Region.Region()
region.numberElements = 2
region.elements = [Element.Element(27, massFraction=0.01), Element.Element(26, massFraction=0.99)]
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [-7.5e4, -2.5e4, -7.5e4, -2.5e4, 0.0, 0.2e4]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
# Region 2
region = Region.Region()
region.numberElements = 2
region.elements = [Element.Element(27, massFraction=0.02), Element.Element(26, massFraction=0.98)]
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [-2.5e4, 2.5e4, -7.5e4, -2.5e4, 0.0, 0.2e4]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
# Region 3
region = Region.Region()
region.numberElements = 2
region.elements = [Element.Element(27, massFraction=0.05), Element.Element(26, massFraction=0.95)]
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [2.5e4, 7.5e4, -7.5e4, -2.5e4, 0.0, 0.2e4]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
# Region 4
region = Region.Region()
region.numberElements = 2
region.elements = [Element.Element(28, massFraction=0.01), Element.Element(27, massFraction=0.99)]
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [-7.5e4, -2.5e4, -2.5e4, 2.5e4, 0.0, 0.2e4]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
# Region 5
region = Region.Region()
region.numberElements = 2
region.elements = [Element.Element(28, massFraction=0.02), Element.Element(27, massFraction=0.98)]
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [-2.5e4, 2.5e4, -2.5e4, 2.5e4, 0.0, 0.2e4]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
# Region 6
region = Region.Region()
region.numberElements = 2
region.elements = [Element.Element(28, massFraction=0.05), Element.Element(27, massFraction=0.95)]
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [2.5e4, 7.5e4, -2.5e4, 2.5e4, 0.0, 0.2e4]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
# Region 7
region = Region.Region()
region.numberElements = 3
region.elements = [Element.Element(27, massFraction=0.01), Element.Element(26, massFraction=0.495), Element.Element(28, massFraction=0.495)]
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [-7.5e4, -2.5e4, 2.5e4, 7.5e4, 0.0, 0.2e4]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
# Region 8
region = Region.Region()
region.numberElements = 3
region.elements = [Element.Element(27, massFraction=0.02), Element.Element(26, massFraction=0.49), Element.Element(28, massFraction=0.49)]
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [-2.5e4, 2.5e4, 2.5e4, 7.5e4, 0.0, 0.2e4]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
# Region 9
region = Region.Region()
region.numberElements = 3
region.elements = [Element.Element(27, massFraction=0.05), Element.Element(26, massFraction=0.475), Element.Element(28, massFraction=0.475)]
region.regionType = RegionType.REGION_TYPE_BOX
parameters = [2.5e4, 7.5e4, 2.5e4, 7.5e4, 0.0, 0.2e4]
region.regionDimensions = RegionDimensions.RegionDimensionsBox(parameters)
specimen.regions.append(region)
return specimen
def read_one_results_hdf5(self, simulation, hdf5_group):
electronResults = ElectronResults.ElectronResults()
electronResults.path = self.getSimulationsPath()
electronResults.basename = simulation.resultsBasename
electronResults.read()
electronResults.write_hdf5(hdf5_group)
xrayIntensities = XrayIntensities.XrayIntensities()
xrayIntensities.path = self.getSimulationsPath()
xrayIntensities.basename = simulation.resultsBasename
xrayIntensities.read()
xrayIntensities.write_hdf5(hdf5_group)
spectrum = XraySpectraRegionsEmitted.XraySpectraRegionsEmitted()
spectrum.path = self.getSimulationsPath()
spectrum.basename = simulation.resultsBasename
spectrum.read()
spectrum.write_hdf5(hdf5_group)
spectrum = XraySpectraSpecimenEmittedDetected.XraySpectraSpecimenEmittedDetected()
spectrum.path = self.getSimulationsPath()
spectrum.basename = simulation.resultsBasename
spectrum.read()
spectrum.write_hdf5(hdf5_group)
def analyze_results_hdf5(self): #pragma: no cover
self.readResults()
file_path = self.get_hdf5_file_path()
with h5py.File(file_path, 'r', driver='core') as hdf5_file:
hdf5_group = self.get_hdf5_group(hdf5_file)
logging.info(hdf5_group.name)
def run():
# import the batch file class.
from pymcxray.BatchFileConsole import BatchFileConsole
# Find the configuration file path
configuration_file_path = get_current_module_path(__file__, "MCXRay_latest.cfg")
program_name = get_mcxray_program_name(configuration_file_path)
# Create the batch file object.
batch_file = BatchFileConsole("BatchSimulationTestMapsMM2017", program_name, numberFiles=6)
# Create the simulation object and add the batch file object to it.
analyze = SimulationTestMapsMM2017(relativePath=r"mcxray/SimulationTestMapsMM2017",
configurationFilepath=configuration_file_path)
analyze.run(batch_file)
if __name__ == '__main__': #pragma: no cover
import sys
logging.getLogger().setLevel(logging.INFO)
logging.info(sys.argv)
if len(sys.argv) == 1:
sys.argv.append(mcxray.ANALYZE_TYPE_GENERATE_INPUT_FILE)
#sys.argv.append(mcxray.ANALYZE_TYPE_CHECK_PROGRESS)
#sys.argv.append(mcxray.ANALYZE_TYPE_ANALYZE_RESULTS)
#sys.argv.append(mcxray.ANALYZE_TYPE_ANALYZE_SCHEDULED_READ)
run()
| apache-2.0 |
boland1992/seissuite_iran | build/lib/seissuite/azimuth/heatinterpolate.py | 8 | 3647 | #!/usr/bin/env python
# combining density estimation and delaunay interpolation for confidence-weighted value mapping
# Dan Stowell, April 2013
import numpy as np
from numpy import random
from math import exp, log
from scipy import stats, mgrid, c_, reshape, rot90
import matplotlib.delaunay
import matplotlib.tri as tri
import matplotlib.delaunay.interpolate
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from colorsys import hls_to_rgb
#############################
# user settings
n = 100
gridsize = 100
fontsize = 'xx-small'
#############################
# first generate some random [x,y,z] data -- random locations but closest to the middle, and random z-values
data = random.randn(3, n) * 100.
# we will add some correlation to the z-values
data[2,:] += data[1,:]
data[2,:] += data[0,:]
# scale the z-values to 0--1 for convenience
zmin = np.min(data[2,:])
zmax = np.max(data[2,:])
data[2,:] = (data[2,:] - zmin) / (zmax - zmin)
xmin = np.min(data[0,:])
xmax = np.max(data[0,:])
ymin = np.min(data[1,:])
ymax = np.max(data[1,:])
zmin = np.min(data[2,:])
zmax = np.max(data[2,:])
##################################################
# plot it simply
plt.figure()
fig = plt.subplot(2,2,1)
for datum in data.T:
plt.plot(datum[0], datum[1], 'x', color=str(1.0 - datum[2]))
plt.title("scatter", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
##################################################
# now make a KDE of it and plot that
fig = plt.subplot(2,2,2)
kdeX, kdeY = mgrid[xmin:xmax:gridsize*1j, ymin:ymax:gridsize*1j]
positions = c_[kdeX.ravel(), kdeY.ravel()]
values = c_[data[0,:], data[1,:]]
kernel = stats.kde.gaussian_kde(values.T)
kdeZ = reshape(kernel(positions.T).T, kdeX.T.shape)
plt.imshow(rot90(kdeZ), cmap=cm.binary, aspect='auto')
plt.title("density of points", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
##################################################
# now make a delaunay triangulation of it and plot that
fig = plt.subplot(2,2,3)
tt = matplotlib.delaunay.triangulate.Triangulation(data[0,:], data[1,:])
#triang = tri.Triangulation(data[0,:], data[1,:])
#plt.triplot(triang, 'bo-') # this plots the actual triangles of the triangulation. I'm more interested in their interpolated values
#extrap = tt.linear_extrapolator(data[2,:])
extrap = tt.nn_extrapolator(data[2,:])
interped = extrap[xmin:xmax:gridsize*1j, ymin:ymax:gridsize*1j]
plt.imshow(rot90(interped), cmap=cm.gist_earth_r, aspect='auto')
plt.title("interpolated values", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
##################################################
# now combine delaunay with KDE
fig = plt.subplot(2,2,4)
colours = np.zeros((gridsize, gridsize, 4))
kdeZmin = np.min(kdeZ)
kdeZmax = np.max(kdeZ)
confdepth = 0.45
for x in range(gridsize):
for y in range(gridsize):
conf = (kdeZ[x,y] - kdeZmin) / (kdeZmax - kdeZmin)
val = min(1., max(0., interped[x,y]))
colour = list(cm.gist_earth_r(val))
# now fade it out to white according to conf
for index in [0,1,2]:
colour[index] = (colour[index] * conf) + (1.0 * (1. -conf))
colours[x,y,:] = colour
#colours[x,y,:] = np.hstack((hls_to_rgb(val, 0.5 + confdepth - (confdepth * conf), 1.0), 1.0))
#colours[x,y,:] = [conf, conf, 1.0-conf, val]
plt.imshow(rot90(colours), cmap=cm.gist_earth_r, aspect='auto')
plt.title("interpolated & confidence-shaded", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
############################################
plt.savefig("output/plot_heati_simple.pdf", papertype='A4', format='pdf')
| gpl-3.0 |
timqian/sms-tools | lectures/3-Fourier-properties/plots-code/shift.py | 26 | 1223 | import matplotlib.pyplot as plt
import numpy as np
import sys
from scipy.signal import sawtooth
sys.path.append('../../../software/models/')
import dftModel as DF
N = 128
x1 = sawtooth(2*np.pi*np.arange(-N/2,N/2)/float(N))
x2 = sawtooth(2*np.pi*np.arange(-N/2-2,N/2-2)/float(N))
mX1, pX1 = DF.dftAnal(x1, np.ones(N), N)
mX2, pX2 = DF.dftAnal(x2, np.ones(N), N)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(321)
plt.title('x1=x[n]')
plt.plot(np.arange(-N/2, N/2, 1.0), x1, lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(322)
plt.title('x2=x[n-2]')
plt.plot(np.arange(-N/2, N/2, 1.0), x2, lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(323)
plt.title('mX1')
plt.plot(np.arange(0, mX1.size, 1.0), mX1, 'r', lw=1.5)
plt.axis([0,mX1.size,min(mX1),max(mX1)])
plt.subplot(324)
plt.title('mX2')
plt.plot(np.arange(0, mX2.size, 1.0), mX2, 'r', lw=1.5)
plt.axis([0,mX2.size,min(mX2),max(mX2)])
plt.subplot(325)
plt.title('pX1')
plt.plot(np.arange(0, pX1.size, 1.0), pX1, 'c', lw=1.5)
plt.axis([0,pX1.size,min(pX1),max(pX2)])
plt.subplot(326)
plt.title('pX2')
plt.plot(np.arange(0, pX2.size, 1.0), pX2, 'c', lw=1.5)
plt.axis([0,pX2.size,min(pX2),max(pX2)])
plt.tight_layout()
plt.savefig('shift.png')
plt.show()
| agpl-3.0 |
NelisVerhoef/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
danlwo/Hadoop-Spark-Python-Log-Parser | def_400_visualizations/def_400_pickle2CSV_geoCount/def_400_pickle2CSV_geoCount.py | 1 | 4737 | # Christina CJ Chen & Dan Lwo from Logitech
# python 2.7.10 $ spark-2.1.1-bin-hadoop2.7
# --- CAUTION! SAFETY RISK! ---
import credencialInfo
# --- Make sure env. variables set! ---
import findspark
findspark.init()
print '--- [INfO] FINDSPARK session successfully FINISHED. ---'
# --- Improt necessary modules ---
import boto3
import csv
import geoip2.database
import gzip
import itertools
import logging
import numpy as np
import os
import pandas as pd
import pickle
import smtplib
import sys
import traceback
from operator import add
from os.path import expanduser
from pyspark import SparkContext, SparkConf
from pyspark import SparkFiles
# --- Functions for save / load pickle files ---
def loadPickle(obj):
with open(obj) as f:
pickleContent = pickle.load(f)
return pickleContent
def savePickle(filename, save_obj):
with open(filename+'.pickle','w') as f:
pickle.dump(save_obj,f)
# --- Walk through S3 bucket for needed source data file ---
def awsS3FileWalker(srcDataBucket,year,month):
s3 = boto3.resource('s3')
bucket = s3.Bucket(srcDataBucket)
executionTimeRange = year+'_'+month
for obj in bucket.objects.filter(Prefix = executionTimeRange):
# print '--- [INFO] Find: ',obj
srcFileCollection.append(obj.key)
return srcFileCollection
# --- Group source data files by hour and sort ---
def fileGrouper(srcFileCollection):
lines = sc.parallelize(srcFileCollection)
groupedFileList = lines.map(lambda x:(x[:10],x)).groupByKey().map(lambda x:(x[0], list(x[1]))).sortBy(lambda x:x[0]).collect()
return groupedFileList
def list_concat(param_list):
return sc.parallelize(list(itertools.chain.from_iterable(param_list)))
def canaryMail(errMsg, sender, senderpwd, receiver):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(sender,senderpwd)
msg = errMsg
server.sendmail(sender,receiver,msg)
server.quit()
# --- Functions for save / load files to / from AWS S3---
def s3Uploader(outputbucket,filename):
s3 = boto3.resource('s3')
print '--- [INfO] Now uploading:',filename,'to',outputbucket,'! ---'
s3.meta.client.upload_file(filename,outputbucket,filename)
print '--- [INfO] Now deleting:',filename
os.remove(filename)
def s3Downloader(srcDataBucket,s3_obj_key,download_obj_filename):
s3 = boto3.resource('s3')
bucket = s3.Bucket(srcDataBucket)
bucket.download_file(s3_obj_key,download_obj_filename)
if __name__ == "__main__":
# --- Start the main program here ---
# --- Set global variables ---
year = str(sys.argv[1])
month = str(sys.argv[2])
keyID = credencialInfo.keyID()
key = credencialInfo.key()
srcDataBucket = credencialInfo.srcDataBucket()
outputbucket = credencialInfo.outputbucket()
sender = credencialInfo.sender()
senderpwd = credencialInfo.senderpwd()
receiver = credencialInfo.receiver()
sc = SparkContext.getOrCreate()
print '--- [INfO] Execution parameters successfully set! ---'
# --- Locate & group & sort source data files ---
srcFileCollection = []
groupedFileList = []
print '--- [INfO] Now retreating source data files from s3://',srcDataBucket,' ---'
srcFileCollection = awsS3FileWalker(srcDataBucket, year, month)
print '--- [INfO] Successfully retreat file list of',month,'in',year,'from',srcDataBucket,' ---'
print '--- [INfO] Now regrouping and sorting......'
groupedFileList = fileGrouper(srcFileCollection)
print '--- [INfO] Successfully regroup and sort file list of',month,'in',year,' ---'
# --- Core loop here ---
date_country_count_collection = []
for date, hour in groupedFileList:
everymonth_day = date[8:10]
list_container = []
for h in hour:
print '--- [INfO] Now processing:',h
s3Downloader(srcDataBucket,h,h)
preRenderPickle = loadPickle(h)
f = sc.parallelize(preRenderPickle).collect()
list_container.append(f)
os.remove(h)
rdd_list = list_concat(list_container)
output = rdd_list.reduceByKey(add).sortBy(lambda x: -x[1]).collect()
date_country_count_collection.append([year,month,everymonth_day,output])
# --- Write & save & upload & wipe result .csv file ---
df = pd.DataFrame(date_country_count_collection)
filename = year+'_'+month+'_geoTransList.csv'
df.to_csv(filename,index=False,header=['year','month','day','list'])
s3Uploader(outputbucket,filename)
# --- Send ending msg ---
completeMsg = '--- [INFO] Session successfully executed: '+str(year)+' '+str(month)+' def_400_pickle2CSV_geoCount ---'
canaryMail(completeMsg,sender,senderpwd,receiver)
print completeMsg
| mit |
pprett/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 72 | 4667 | from __future__ import print_function
from collections import defaultdict
from time import time
import six
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.randint(-50, 51, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in six.iteritems(results)
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in six.iteritems(results)
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(six.iteritems(results))):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(six.iteritems(results_2))):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
vdrhtc/Measurement-automation | lib2/DispersiveRamseyFringes.py | 1 | 1912 | from matplotlib import pyplot as plt, colorbar
from lib2.VNATimeResolvedDispersiveMeasurement2D import *
class DispersiveRamseyFringes(VNATimeResolvedDispersiveMeasurement2D):
def __init__(self, name, sample_name, **devs_aliases_map):
devs_aliases_map["q_z_awg"] = None
super().__init__(name, sample_name, devs_aliases_map)
self._measurement_result = \
DispersiveRamseyFringesResult(name, sample_name)
self._sequence_generator = IQPulseBuilder.build_dispersive_ramsey_sequences
def set_fixed_parameters(self, pulse_sequence_parameters, **dev_params):
super().set_fixed_parameters(pulse_sequence_parameters, **dev_params)
def set_swept_parameters(self, ramsey_delays, excitation_freqs):
q_if_frequency = self._q_awg.get_calibration() \
.get_radiation_parameters()["if_frequency"]
swept_pars = {"ramsey_delay": \
(self._output_pulse_sequence,
ramsey_delays),
"excitation_frequency":
(lambda x: self._exc_iqvg.set_frequency(x + q_if_frequency),
excitation_freqs)}
super().set_swept_parameters(**swept_pars)
def _output_pulse_sequence(self, ramsey_delay):
self._pulse_sequence_parameters["ramsey_delay"] = ramsey_delay
super()._output_pulse_sequence()
class DispersiveRamseyFringesResult(VNATimeResolvedDispersiveMeasurement2DResult):
def _prepare_data_for_plot(self, data):
return data["excitation_frequency"] / 1e9, \
data["ramsey_delay"] / 1e3, \
data["data"]
def _annotate_axes(self, axes):
axes[0].set_ylabel("Ramsey delay [$\mu$s]")
axes[-2].set_ylabel("Ramsey delay [$\mu$s]")
axes[-1].set_xlabel("Excitation if_freq [GHz]")
axes[-2].set_xlabel("Excitation if_freq [GHz]")
| gpl-3.0 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/cogent/draw/arrow_rates.py | 1 | 9507 | #!/usr/bin/env python
from matplotlib import use, rc
use('Agg') #suppress graphical rendering
from pylab import rc, gcf, xlim, ylim, xticks, yticks, sqrt, text, clip, gca, \
array, dot, ravel, draw, show, savefig
from fancy_arrow import arrow
"""Draws arrow plots representing rate matrices.
Note: currently requires dict of dinuc freqs, but should modify to work
with Rates objects from seqsim.
Based on graphical displays by Noboru Sueoka.
"""
__author__ = "Rob Knight"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Rob Knight"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Rob Knight"
__email__ = "[email protected]"
__status__ = "Production"
rc('text', usetex=True)
rates_to_bases={'r1':'AT', 'r2':'TA', 'r3':'GA','r4':'AG','r5':'CA','r6':'AC', \
'r7':'GT', 'r8':'TG', 'r9':'CT','r10':'TC','r11':'GC','r12':'CG'}
numbered_bases_to_rates = dict([(v,k) for k, v in rates_to_bases.items()])
lettered_bases_to_rates = dict([(v, 'r'+v) for k, v in rates_to_bases.items()])
def add_dicts(d1, d2):
"""Adds two dicts and returns the result."""
result = d1.copy()
result.update(d2)
return result
def make_arrow_plot(data, size=4, display='length', shape='right', \
max_arrow_width=0.03, arrow_sep = 0.02, alpha=0.5, \
normalize_data=False, ec=None, labelcolor=None, \
head_starts_at_zero=True, rate_labels=lettered_bases_to_rates,\
graph_name=None, \
**kwargs):
"""Makes an arrow plot.
Parameters:
data: dict with probabilities for the bases and pair transitions.
size: size of the graph in inches.
display: 'length', 'width', or 'alpha' for arrow property to change.
shape: 'full', 'left', or 'right' for full or half arrows.
max_arrow_width: maximum width of an arrow, data coordinates.
arrow_sep: separation between arrows in a pair, data coordinates.
alpha: maximum opacity of arrows, default 0.8.
**kwargs can be anything allowed by a Arrow object, e.g.
linewidth and edgecolor.
"""
xlim(-0.5,1.5)
ylim(-0.5,1.5)
gcf().set_size_inches(size,size)
xticks([])
yticks([])
max_text_size = size*12
min_text_size = size
label_text_size = size*2.5
text_params={'ha':'center', 'va':'center', 'family':'sans-serif',\
'fontweight':'bold'}
r2 = sqrt(2)
deltas = {\
'AT':(1,0),
'TA':(-1,0),
'GA':(0,1),
'AG':(0,-1),
'CA':(-1/r2, 1/r2),
'AC':(1/r2, -1/r2),
'GT':(1/r2, 1/r2),
'TG':(-1/r2,-1/r2),
'CT':(0,1),
'TC':(0,-1),
'GC':(1,0),
'CG':(-1,0)
}
colors = {\
'AT':'r',
'TA':'k',
'GA':'g',
'AG':'r',
'CA':'b',
'AC':'r',
'GT':'g',
'TG':'k',
'CT':'b',
'TC':'k',
'GC':'g',
'CG':'b'
}
label_positions = {\
'AT':'center',
'TA':'center',
'GA':'center',
'AG':'center',
'CA':'left',
'AC':'left',
'GT':'left',
'TG':'left',
'CT':'center',
'TC':'center',
'GC':'center',
'CG':'center'
}
def do_fontsize(k):
return float(clip(max_text_size*sqrt(data[k]),\
min_text_size,max_text_size))
A = text(0,1, '$A_3$', color='r', size=do_fontsize('A'), **text_params)
T = text(1,1, '$T_3$', color='k', size=do_fontsize('T'), **text_params)
G = text(0,0, '$G_3$', color='g', size=do_fontsize('G'), **text_params)
C = text(1,0, '$C_3$', color='b', size=do_fontsize('C'), **text_params)
arrow_h_offset = 0.25 #data coordinates, empirically determined
max_arrow_length = 1 - 2*arrow_h_offset
max_arrow_width = max_arrow_width
max_head_width = 2.5*max_arrow_width
max_head_length = 2*max_arrow_width
arrow_params={'length_includes_head':True, 'shape':shape, \
'head_starts_at_zero':head_starts_at_zero}
ax = gca()
sf = 0.6 #max arrow size represents this in data coords
d = (r2/2 + arrow_h_offset - 0.5)/r2 #distance for diags
r2v = arrow_sep/r2 #offset for diags
#tuple of x, y for start position
positions = {\
'AT': (arrow_h_offset, 1+arrow_sep),
'TA': (1-arrow_h_offset, 1-arrow_sep),
'GA': (-arrow_sep, arrow_h_offset),
'AG': (arrow_sep, 1-arrow_h_offset),
'CA': (1-d-r2v, d-r2v),
'AC': (d+r2v, 1-d+r2v),
'GT': (d-r2v, d+r2v),
'TG': (1-d+r2v, 1-d-r2v),
'CT': (1-arrow_sep, arrow_h_offset),
'TC': (1+arrow_sep, 1-arrow_h_offset),
'GC': (arrow_h_offset, arrow_sep),
'CG': (1-arrow_h_offset, -arrow_sep),
}
if normalize_data:
#find maximum value for rates, i.e. where keys are 2 chars long
max_val = 0
for k, v in data.items():
if len(k) == 2:
max_val = max(max_val, v)
#divide rates by max val, multiply by arrow scale factor
for k, v in data.items():
data[k] = v/max_val*sf
def draw_arrow(pair, alpha=alpha, ec=ec, labelcolor=labelcolor):
#set the length of the arrow
if display == 'length':
length = max_head_length+(max_arrow_length-max_head_length)*\
data[pair]/sf
else:
length = max_arrow_length
#set the transparency of the arrow
if display == 'alph':
alpha = min(data[pair]/sf, alpha)
else:
alpha=alpha
#set the width of the arrow
if display == 'width':
scale = data[pair]/sf
width = max_arrow_width*scale
head_width = max_head_width*scale
head_length = max_head_length*scale
else:
width = max_arrow_width
head_width = max_head_width
head_length = max_head_length
fc = colors[pair]
ec = ec or fc
x_scale, y_scale = deltas[pair]
x_pos, y_pos = positions[pair]
arrow(ax, x_pos, y_pos, x_scale*length, y_scale*length, \
fc=fc, ec=ec, alpha=alpha, width=width, head_width=head_width, \
head_length=head_length, **arrow_params)
#figure out coordinates for text
#if drawing relative to base: x and y are same as for arrow
#dx and dy are one arrow width left and up
#need to rotate based on direction of arrow, use x_scale and y_scale
#as sin x and cos x?
sx, cx = y_scale, x_scale
alo = arrow_label_offset = 3.5*max_arrow_width
where = label_positions[pair]
if where == 'left':
orig_position = array([[alo, alo]])
elif where == 'absolute':
orig_position = array([[max_arrow_length/2.0, alo]])
elif where == 'right':
orig_position = array([[length-alo, alo]])
elif where == 'center':
orig_position = array([[length/2.0, alo]])
else:
raise ValueError, "Got unknown position parameter %s" % where
M = array([[cx, sx],[-sx,cx]])
coords = dot(orig_position, M) + [[x_pos, y_pos]]
x, y = ravel(coords)
orig_label = rate_labels[pair]
label = '$%s_{_{\mathrm{%s}}}$' % (orig_label[0], orig_label[1:])
text(x, y, label, size=label_text_size, ha='center', va='center', \
color=labelcolor or fc)
for p in positions.keys():
draw_arrow(p)
if graph_name is not None:
savefig(graph_name)
#test data
all_on_max = dict([(i, 1) for i in 'TCAG'] + \
[(i+j, 0.6) for i in 'TCAG' for j in 'TCAG'])
realistic_data = {
'A':0.4,
'T':0.3,
'G':0.5,
'C':0.2,
'AT':0.4,
'AC':0.3,
'AG':0.2,
'TA':0.2,
'TC':0.3,
'TG':0.4,
'CT':0.2,
'CG':0.3,
'CA':0.2,
'GA':0.1,
'GT':0.4,
'GC':0.1,
}
extreme_data = {
'A':0.75,
'T':0.10,
'G':0.10,
'C':0.05,
'AT':0.6,
'AC':0.3,
'AG':0.1,
'TA':0.02,
'TC':0.3,
'TG':0.01,
'CT':0.2,
'CG':0.5,
'CA':0.2,
'GA':0.1,
'GT':0.4,
'GC':0.2,
}
sample_data = {
'A':0.2137,
'T':0.3541,
'G':0.1946,
'C':0.2376,
'AT':0.0228,
'AC':0.0684,
'AG':0.2056,
'TA':0.0315,
'TC':0.0629,
'TG':0.0315,
'CT':0.1355,
'CG':0.0401,
'CA':0.0703,
'GA':0.1824,
'GT':0.0387,
'GC':0.1106,
}
if __name__ == '__main__':
from sys import argv
if len(argv) > 1:
if argv[1] == 'full':
d = all_on_max
scaled = False
elif argv[1] == 'extreme':
d = extreme_data
scaled = False
elif argv[1] == 'realistic':
d = realistic_data
scaled = False
elif argv[1] == 'sample':
d = sample_data
scaled = True
else:
d = all_on_max
scaled=False
if len(argv) > 2:
display = argv[2]
else:
display = 'length'
size = 4
gcf().set_size_inches(size,size)
make_arrow_plot(d, display=display, linewidth=0.001, edgecolor=None,
normalize_data=scaled, head_starts_at_zero=True, size=size,
graph_name='arrows.png')
| mit |
pybrain2/pybrain2 | examples/rl/environments/linear_fa/bicycle.py | 26 | 14462 | from __future__ import print_function
"""An attempt to implement Randlov and Alstrom (1998). They successfully
use reinforcement learning to balance a bicycle, and to control it to drive
to a specified goal location. Their work has been used since then by a few
researchers as a benchmark problem.
We only implement the balance task. This implementation differs at least
slightly, since Randlov and Alstrom did not mention anything about how they
annealed/decayed their learning rate, etc. As a result of differences, the
results do not match those obtained by Randlov and Alstrom.
"""
__author__ = 'Chris Dembia, Bruce Cam, Johnny Israeli'
from scipy import asarray
from numpy import sin, cos, tan, sqrt, arcsin, arctan, sign, clip, argwhere
from matplotlib import pyplot as plt
import pybrain.rl.environments
from pybrain.rl.environments.environment import Environment
from pybrain.rl.learners.valuebased.linearfa import SARSALambda_LinFA
from pybrain.rl.agents.linearfa import LinearFA_Agent
from pybrain.rl.experiments import EpisodicExperiment
from pybrain.utilities import one_to_n
class BicycleEnvironment(Environment):
"""Randlov and Alstrom's bicycle model. This code matches nearly exactly
some c code we found online for simulating Randlov and Alstrom's
bicycle. The bicycle travels at a fixed speed.
"""
# For superclass.
indim = 2
outdim = 10
# Environment parameters.
time_step = 0.01
# Goal position and radius
# Lagouakis (2002) uses angle to goal, not heading, as a state
max_distance = 1000.
# Acceleration on Earth's surface due to gravity (m/s^2):
g = 9.82
# See the paper for a description of these quantities:
# Distances (in meters):
c = 0.66
dCM = 0.30
h = 0.94
L = 1.11
r = 0.34
# Masses (in kilograms):
Mc = 15.0
Md = 1.7
Mp = 60.0
# Velocity of a bicycle (in meters per second), equal to 10 km/h:
v = 10.0 * 1000.0 / 3600.0
# Derived constants.
M = Mc + Mp # See Randlov's code.
Idc = Md * r**2
Idv = 1.5 * Md * r**2
Idl = 0.5 * Md * r**2
Itot = 13.0 / 3.0 * Mc * h**2 + Mp * (h + dCM)**2
sigmad = v / r
def __init__(self):
Environment.__init__(self)
self.reset()
self.actions = [0.0, 0.0]
self._save_wheel_contact_trajectories = False
def performAction(self, actions):
self.actions = actions
self.step()
def saveWheelContactTrajectories(self, opt):
self._save_wheel_contact_trajectories = opt
def step(self):
# Unpack the state and actions.
# -----------------------------
# Want to ignore the previous value of omegadd; it could only cause a
# bug if we assign to it.
(theta, thetad, omega, omegad, _,
xf, yf, xb, yb, psi) = self.sensors
(T, d) = self.actions
# For recordkeeping.
# ------------------
if self._save_wheel_contact_trajectories:
self.xfhist.append(xf)
self.yfhist.append(yf)
self.xbhist.append(xb)
self.ybhist.append(yb)
# Intermediate time-dependent quantities.
# ---------------------------------------
# Avoid divide-by-zero, just as Randlov did.
if theta == 0:
rf = 1e8
rb = 1e8
rCM = 1e8
else:
rf = self.L / np.abs(sin(theta))
rb = self.L / np.abs(tan(theta))
rCM = sqrt((self.L - self.c)**2 + self.L**2 / tan(theta)**2)
phi = omega + np.arctan(d / self.h)
# Equations of motion.
# --------------------
# Second derivative of angular acceleration:
omegadd = 1 / self.Itot * (self.M * self.h * self.g * sin(phi)
- cos(phi) * (self.Idc * self.sigmad * thetad
+ sign(theta) * self.v**2 * (
self.Md * self.r * (1.0 / rf + 1.0 / rb)
+ self.M * self.h / rCM)))
thetadd = (T - self.Idv * self.sigmad * omegad) / self.Idl
# Integrate equations of motion using Euler's method.
# ---------------------------------------------------
# yt+1 = yt + yd * dt.
# Must update omega based on PREVIOUS value of omegad.
omegad += omegadd * self.time_step
omega += omegad * self.time_step
thetad += thetadd * self.time_step
theta += thetad * self.time_step
# Handlebars can't be turned more than 80 degrees.
theta = np.clip(theta, -1.3963, 1.3963)
# Wheel ('tyre') contact positions.
# ---------------------------------
# Front wheel contact position.
front_temp = self.v * self.time_step / (2 * rf)
# See Randlov's code.
if front_temp > 1:
front_temp = sign(psi + theta) * 0.5 * np.pi
else:
front_temp = sign(psi + theta) * arcsin(front_temp)
xf += self.v * self.time_step * -sin(psi + theta + front_temp)
yf += self.v * self.time_step * cos(psi + theta + front_temp)
# Rear wheel.
back_temp = self.v * self.time_step / (2 * rb)
# See Randlov's code.
if back_temp > 1:
back_temp = np.sign(psi) * 0.5 * np.pi
else:
back_temp = np.sign(psi) * np.arcsin(back_temp)
xb += self.v * self.time_step * -sin(psi + back_temp)
yb += self.v * self.time_step * cos(psi + back_temp)
# Preventing numerical drift.
# ---------------------------
# Copying what Randlov did.
current_wheelbase = sqrt((xf - xb)**2 + (yf - yb)**2)
if np.abs(current_wheelbase - self.L) > 0.01:
relative_error = self.L / current_wheelbase - 1.0
xb += (xb - xf) * relative_error
yb += (yb - yf) * relative_error
# Update heading, psi.
# --------------------
delta_y = yf - yb
if (xf == xb) and delta_y < 0.0:
psi = np.pi
else:
if delta_y > 0.0:
psi = arctan((xb - xf) / delta_y)
else:
psi = sign(xb - xf) * 0.5 * np.pi - arctan(delta_y / (xb - xf))
self.sensors = np.array([theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi])
def reset(self):
theta = 0
thetad = 0
omega = 0
omegad = 0
omegadd = 0
xf = 0
yf = self.L
xb = 0
yb = 0
psi = np.arctan((xb - xf) / (yf - yb))
self.sensors = np.array([theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi])
self.xfhist = []
self.yfhist = []
self.xbhist = []
self.ybhist = []
def getSteer(self):
return self.sensors[0]
def getTilt(self):
return self.sensors[2]
def get_xfhist(self):
return self.xfhist
def get_yfhist(self):
return self.yfhist
def get_xbhist(self):
return self.xbhist
def get_ybhist(self):
return self.ybhist
def getSensors(self):
return self.sensors
class BalanceTask(pybrain.rl.environments.EpisodicTask):
"""The rider is to simply balance the bicycle while moving with the
speed perscribed in the environment. This class uses a continuous 5
dimensional state space, and a discrete state space.
This class is heavily guided by
pybrain.rl.environments.cartpole.balancetask.BalanceTask.
"""
max_tilt = np.pi / 6.
nactions = 9
def __init__(self, max_time=1000.0):
super(BalanceTask, self).__init__(BicycleEnvironment())
self.max_time = max_time
# Keep track of time in case we want to end episodes based on number of
# time steps.
self.t = 0
@property
def indim(self):
return 1
@property
def outdim(self):
return 5
def reset(self):
super(BalanceTask, self).reset()
self.t = 0
def performAction(self, action):
"""Incoming action is an int between 0 and 8. The action we provide to
the environment consists of a torque T in {-2 N, 0, 2 N}, and a
displacement d in {-.02 m, 0, 0.02 m}.
"""
self.t += 1
assert round(action[0]) == action[0]
# -1 for action in {0, 1, 2}, 0 for action in {3, 4, 5}, 1 for
# action in {6, 7, 8}
torque_selector = np.floor(action[0] / 3.0) - 1.0
T = 2 * torque_selector
# Random number in [-1, 1]:
p = 2.0 * np.random.rand() - 1.0
# -1 for action in {0, 3, 6}, 0 for action in {1, 4, 7}, 1 for
# action in {2, 5, 8}
disp_selector = action[0] % 3 - 1.0
d = 0.02 * disp_selector + 0.02 * p
super(BalanceTask, self).performAction([T, d])
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi) = self.env.getSensors()
return self.env.getSensors()[0:5]
def isFinished(self):
# Criterion for ending an episode. From Randlov's paper:
# "When the agent can balance for 1000 seconds, the task is considered
# learned."
if np.abs(self.env.getTilt()) > self.max_tilt:
return True
elapsed_time = self.env.time_step * self.t
if elapsed_time > self.max_time:
return True
return False
def getReward(self):
# -1 reward for falling over; no reward otherwise.
if np.abs(self.env.getTilt()) > self.max_tilt:
return -1.0
return 0.0
class LinearFATileCoding3456BalanceTask(BalanceTask):
"""An attempt to exactly implement Randlov's function approximation. He
discretized (tiled) the state space into 3456 bins. We use the same action
space as in the superclass.
"""
# From Randlov, 1998:
theta_bounds = np.array(
[-0.5 * np.pi, -1.0, -0.2, 0, 0.2, 1.0, 0.5 * np.pi])
thetad_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
omega_bounds = np.array(
[-BalanceTask.max_tilt, -0.15, -0.06, 0, 0.06, 0.15,
BalanceTask.max_tilt])
omegad_bounds = np.array(
[-np.inf, -0.5, -0.25, 0, 0.25, 0.5, np.inf])
omegadd_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
# http://stackoverflow.com/questions/3257619/numpy-interconversion-between-multidimensional-and-linear-indexing
nbins_across_dims = [
len(theta_bounds) - 1,
len(thetad_bounds) - 1,
len(omega_bounds) - 1,
len(omegad_bounds) - 1,
len(omegadd_bounds) - 1]
# This array, when dotted with the 5-dim state vector, gives a 'linear'
# index between 0 and 3455.
magic_array = np.cumprod([1] + nbins_across_dims)[:-1]
@property
def outdim(self):
# Used when constructing LinearFALearner's.
return 3456
def getBin(self, theta, thetad, omega, omegad, omegadd):
bin_indices = [
np.digitize([theta], self.theta_bounds)[0] - 1,
np.digitize([thetad], self.thetad_bounds)[0] - 1,
np.digitize([omega], self.omega_bounds)[0] - 1,
np.digitize([omegad], self.omegad_bounds)[0] - 1,
np.digitize([omegadd], self.omegadd_bounds)[0] - 1,
]
return np.dot(self.magic_array, bin_indices)
def getBinIndices(self, linear_index):
"""Given a linear index (integer between 0 and outdim), returns the bin
indices for each of the state dimensions.
"""
return linear_index / self.magic_array % self.nbins_across_dims
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi) = self.env.getSensors()
state = one_to_n(self.getBin(theta, thetad, omega, omegad, omegadd),
self.outdim)
return state
class SARSALambda_LinFA_ReplacingTraces(SARSALambda_LinFA):
"""Randlov used replacing traces, but this doesn't exist in PyBrain's
SARSALambda.
"""
def _updateEtraces(self, state, action, responsibility=1.):
self._etraces *= self.rewardDiscount * self._lambda * responsibility
# This assumes that state is an identity vector (like, from one_to_n).
self._etraces[action] = clip(self._etraces[action] + state, -np.inf, 1.)
# Set the trace for all other actions in this state to 0:
action_bit = one_to_n(action, self.num_actions)
for argstate in argwhere(state == 1) :
self._etraces[argwhere(action_bit != 1), argstate] = 0.
task = LinearFATileCoding3456BalanceTask()
env = task.env
# The learning is very sensitive to the learning rate decay.
learner = SARSALambda_LinFA_ReplacingTraces(task.nactions, task.outdim,
learningRateDecay=2000)
learner._lambda = 0.95
task.discount = learner.rewardDiscount
agent = LinearFA_Agent(learner)
agent.logging = False
exp = EpisodicExperiment(task, agent)
performance_agent = LinearFA_Agent(learner)
performance_agent.logging = False
performance_agent.greedy = True
performance_agent.learning = False
env.saveWheelContactTrajectories(True)
plt.ion()
plt.figure(figsize=(8, 4))
ax1 = plt.subplot(1, 2, 1)
ax2 = plt.subplot(1, 2, 2)
def update_wheel_trajectories():
front_lines = ax2.plot(env.get_xfhist(), env.get_yfhist(), 'r')
back_lines = ax2.plot(env.get_xbhist(), env.get_ybhist(), 'b')
plt.axis('equal')
perform_cumrewards = []
for irehearsal in range(7000):
# Learn.
# ------
r = exp.doEpisodes(1)
# Discounted reward.
cumreward = exp.task.getTotalReward()
#print 'cumreward: %.4f; nsteps: %i; learningRate: %.4f' % (
# cumreward, len(r[0]), exp.agent.learner.learningRate)
if irehearsal % 50 == 0:
# Perform (no learning).
# ----------------------
# Swap out the agent.
exp.agent = performance_agent
# Perform.
r = exp.doEpisodes(1)
perform_cumreward = task.getTotalReward()
perform_cumrewards.append(perform_cumreward)
print('PERFORMANCE: cumreward:', perform_cumreward, 'nsteps:', len(r[0]))
# Swap back the learning agent.
performance_agent.reset()
exp.agent = agent
ax1.cla()
ax1.plot(perform_cumrewards, '.--')
# Wheel trajectories.
update_wheel_trajectories()
plt.pause(0.001)
| bsd-3-clause |
pprett/statsmodels | statsmodels/graphics/regressionplots.py | 1 | 18187 | '''Partial Regression plot and residual plots to find misspecification
Author: Josef Perktold
License: BSD-3
Created: 2011-01-23
update
2011-06-05 : start to convert example to usable functions
2011-10-27 : docstrings
'''
import numpy as np
from statsmodels.regression.linear_model import OLS
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from . import utils
__all__ = ['plot_fit', 'plot_regress_exog', 'plot_partregress', 'plot_ccpr',
'plot_regress_exog']
def plot_fit(res, exog_idx, exog_name='', y_true=None, ax=None, fontsize='small'):
"""Plot fit against one regressor.
This creates one graph with the scatterplot of observed values compared to
fitted values.
Parameters
----------
res : result instance
result instance with resid, model.endog and model.exog as attributes
exog_idx : int
index of regressor in exog matrix
y_true : array_like
(optional) If this is not None, then the array is added to the plot
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Notes
-----
This is currently very simple, no options or varnames yet.
"""
fig, ax = utils.create_mpl_ax(ax)
if exog_name == '':
exog_name = 'variable %d' % exog_idx
#maybe add option for wendog, wexog
y = res.model.endog
x1 = res.model.exog[:, exog_idx]
x1_argsort = np.argsort(x1)
y = y[x1_argsort]
x1 = x1[x1_argsort]
ax.plot(x1, y, 'bo', label='observed')
if not y_true is None:
ax.plot(x1, y_true[x1_argsort], 'b-', label='true')
title = 'fitted versus regressor %s' % exog_name
else:
title = 'fitted versus regressor %s' % exog_name
prstd, iv_l, iv_u = wls_prediction_std(res)
ax.plot(x1, res.fittedvalues[x1_argsort], 'k-', label='fitted') #'k-o')
#ax.plot(x1, iv_u, 'r--')
#ax.plot(x1, iv_l, 'r--')
ax.fill_between(x1, iv_l[x1_argsort], iv_u[x1_argsort], alpha=0.1, color='k')
ax.set_title(title, fontsize=fontsize)
return fig
def plot_regress_exog(res, exog_idx, exog_name='', fig=None):
"""Plot regression results against one regressor.
This plots four graphs in a 2 by 2 figure: 'endog versus exog',
'residuals versus exog', 'fitted versus exog' and
'fitted plus residual versus exog'
Parameters
----------
res : result instance
result instance with resid, model.endog and model.exog as attributes
exog_idx : int
index of regressor in exog matrix
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : matplotlib figure instance
Notes
-----
This is currently very simple, no options or varnames yet.
"""
fig = utils.create_mpl_fig(fig)
if exog_name == '':
exog_name = 'variable %d' % exog_idx
#maybe add option for wendog, wexog
#y = res.endog
x1 = res.model.exog[:,exog_idx]
ax = fig.add_subplot(2,2,1)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.model.endog, 'o')
ax.set_title('endog versus exog', fontsize='small')# + namestr)
ax = fig.add_subplot(2,2,2)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.resid, 'o')
ax.axhline(y=0)
ax.set_title('residuals versus exog', fontsize='small')# + namestr)
ax = fig.add_subplot(2,2,3)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.fittedvalues, 'o')
ax.set_title('Fitted versus exog', fontsize='small')# + namestr)
ax = fig.add_subplot(2,2,4)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.fittedvalues + res.resid, 'o')
ax.set_title('Fitted plus residuals versus exog', fontsize='small')# + namestr)
fig.suptitle('Regression Plots for %s' % exog_name)
return fig
def _partial_regression(endog, exog_i, exog_others):
"""Partial regression.
regress endog on exog_i conditional on exog_others
uses OLS
Parameters
----------
endog : array_like
exog : array_like
exog_others : array_like
Returns
-------
res1c : OLS results instance
(res1a, res1b) : tuple of OLS results instances
results from regression of endog on exog_others and of exog_i on
exog_others
"""
#FIXME: This function doesn't appear to be used.
res1a = OLS(endog, exog_others).fit()
res1b = OLS(exog_i, exog_others).fit()
res1c = OLS(res1a.resid, res1b.resid).fit()
return res1c, (res1a, res1b)
def plot_partregress_ax(endog, exog_i, exog_others, varname='',
title_fontsize=None, ax=None):
"""Plot partial regression for a single regressor.
Parameters
----------
endog : ndarray
endogenous or response variable
exog_i : ndarray
exogenous, explanatory variable
exog_others : ndarray
other exogenous, explanatory variables, the effect of these variables
will be removed by OLS regression
varname : str
name of the variable used in the title
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
plot_partregress : Plot partial regression for a set of regressors.
"""
fig, ax = utils.create_mpl_ax(ax)
res1a = OLS(endog, exog_others).fit()
res1b = OLS(exog_i, exog_others).fit()
ax.plot(res1b.resid, res1a.resid, 'o')
res1c = OLS(res1a.resid, res1b.resid).fit()
ax.plot(res1b.resid, res1c.fittedvalues, '-', color='k')
ax.set_title('Partial Regression plot %s' % varname,
fontsize=title_fontsize)# + namestr)
return fig
def plot_partregress(endog, exog, exog_idx=None, grid=None, fig=None):
"""Plot partial regression for a set of regressors.
Parameters
----------
endog : ndarray
endogenous or response variable
exog : ndarray
exogenous, regressor variables
exog_idx : None or list of int
(column) indices of the exog used in the plot
grid : None or tuple of int (nrows, ncols)
If grid is given, then it is used for the arrangement of the subplots.
If grid is None, then ncol is one, if there are only 2 subplots, and
the number of columns is two otherwise.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `fig` is None, the created figure. Otherwise `fig` itself.
Notes
-----
A subplot is created for each explanatory variable given by exog_idx.
The partial regression plot shows the relationship between the response
and the given explanatory variable after removing the effect of all other
explanatory variables in exog.
See Also
--------
plot_partregress_ax : Plot partial regression for a single regressor.
plot_ccpr
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/partregr.htm
"""
fig = utils.create_mpl_fig(fig)
#maybe add option for using wendog, wexog instead
y = endog
if not grid is None:
nrows, ncols = grid
else:
if len(exog_idx) > 2:
nrows = int(np.ceil(len(exog_idx)/2.))
ncols = 2
title_fontsize = 'small'
else:
nrows = len(exog_idx)
ncols = 1
title_fontsize = None
k_vars = exog.shape[1]
#this function doesn't make sense if k_vars=1
for i,idx in enumerate(exog_idx):
others = range(k_vars)
others.pop(idx)
exog_others = exog[:, others]
ax = fig.add_subplot(nrows, ncols, i+1)
plot_partregress_ax(y, exog[:, idx], exog_others, ax=ax)
return fig
def plot_ccpr_ax(res, exog_idx=None, ax=None):
"""Plot CCPR against one regressor.
Generates a CCPR (component and component-plus-residual) plot.
Parameters
----------
res : result instance
uses exog and params of the result instance
exog_idx : int
(column) index of the exog used in the plot
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
plot_ccpr : Creates CCPR plot for multiple regressors in a plot grid.
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
"""
fig, ax = utils.create_mpl_ax(ax)
x1 = res.model.exog[:,exog_idx]
#namestr = ' for %s' % self.name if self.name else ''
x1beta = x1*res.params[1]
ax.plot(x1, x1beta + res.resid, 'o')
ax.plot(x1, x1beta, '-')
ax.set_title('X_%d beta_%d plus residuals versus exog (CCPR)' % \
(exog_idx, exog_idx))
return fig
def plot_ccpr(res, exog_idx=None, grid=None, fig=None):
"""Generate CCPR plots against a set of regressors, plot in a grid.
Generates a grid of CCPR (component and component-plus-residual) plots.
Parameters
----------
res : result instance
uses exog and params of the result instance
exog_idx : None or list of int
(column) indices of the exog used in the plot
grid : None or tuple of int (nrows, ncols)
If grid is given, then it is used for the arrangement of the subplots.
If grid is None, then ncol is one, if there are only 2 subplots, and
the number of columns is two otherwise.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Notes
-----
Partial residual plots are formed as::
Res + Betahat(i)*Xi versus Xi
and CCPR adds::
Betahat(i)*Xi versus Xi
See Also
--------
plot_ccpr_ax : Creates CCPR plot for a single regressor.
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
"""
fig = utils.create_mpl_fig(fig)
if grid is not None:
nrows, ncols = grid
else:
if len(exog_idx) > 2:
nrows = int(np.ceil(len(exog_idx)/2.))
ncols = 2
else:
nrows = len(exog_idx)
ncols = 1
for i, idx in enumerate(exog_idx):
ax = fig.add_subplot(nrows, ncols, i+1)
plot_ccpr_ax(res, exog_idx=idx, ax=ax)
return fig
def abline_plot(intercept=None, slope=None, horiz=None, vert=None,
model_results=None, ax=None, **kwargs):
"""
Plots a line given an intercept and slope.
intercept : float
The intercept of the line
slope : float
The slope of the line
horiz : float or array-like
Data for horizontal lines on the y-axis
vert : array-like
Data for verterical lines on the x-axis
model_results : statsmodels results instance
Any object that has a two-value `params` attribute. Assumed that it
is (intercept, slope)
ax : axes, optional
Matplotlib axes instance
kwargs
Options passed to matplotlib.pyplot.plt
Returns
-------
fig : Figure
The figure given by `ax.figure` or a new instance.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> np.random.seed(12345)
>>> X = sm.add_constant(np.random.normal(0, 20, size=30), prepend=True)
>>> y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
>>> mod = sm.OLS(y,X).fit()
>>> fig = abline_plot(model_results=mod)
>>> ax = fig.axes
>>> ax.scatter(X[:,1], y)
>>> ax.margins(.1)
>>> import matplotlib.pyplot as plt
>>> plt.show()
"""
fig,ax = utils.create_mpl_ax(ax)
if model_results:
intercept, slope = model_results.params
x = [model_results.model.exog[:,1].min(),
model_results.model.exog[:,1].max()]
else:
x = None
if not (intercept is not None and slope is not None):
raise ValueError("specify slope and intercepty or model_results")
if not x: # can't infer x limits
x = ax.get_xlim()
y = [x[0]*slope+intercept, x[1]*slope+intercept]
ax.set_xlim(x)
ax.set_ylim(y)
from matplotlib.lines import Line2D
class ABLine2D(Line2D):
def update_datalim(self, ax):
ax.set_autoscale_on(False)
children = ax.get_children()
abline = [children[i] for i in range(len(children))
if isinstance(children[i], ABLine2D)][0]
x = ax.get_xlim()
y = [x[0]*slope+intercept, x[1]*slope+intercept]
abline.set_data(x,y)
ax.figure.canvas.draw()
line = ABLine2D(x, y, **kwargs)
ax.add_line(line)
ax.callbacks.connect('xlim_changed', line.update_datalim)
ax.callbacks.connect('ylim_changed', line.update_datalim)
if horiz:
ax.hline(horiz)
if vert:
ax.vline(vert)
return fig
if __name__ == '__main__':
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
#example from tut.ols with changes
#fix a seed for these examples
np.random.seed(9876789)
# OLS non-linear curve but linear in parameters
# ---------------------------------------------
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
#estimate only linear function, misspecified because of non-linear terms
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
# plt.figure()
# plt.plot(x1, y, 'o', x1, y_true, 'b-')
res = sm.OLS(y, exog0).fit()
#print res.params
#print res.bse
plot_old = 0 #True
if plot_old:
#current bug predict requires call to model.results
#print res.model.predict
prstd, iv_l, iv_u = wls_prediction_std(res)
plt.plot(x1, res.fittedvalues, 'r-o')
plt.plot(x1, iv_u, 'r--')
plt.plot(x1, iv_l, 'r--')
plt.title('blue: true, red: OLS')
plt.figure()
plt.plot(res.resid, 'o')
plt.title('Residuals')
fig2 = plt.figure()
ax = fig2.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
plt.plot(x1, res.resid, 'o')
ax.set_title('residuals versus exog')# + namestr)
ax = fig2.add_subplot(2,1,2)
plt.plot(x2, res.resid, 'o')
fig3 = plt.figure()
ax = fig3.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
plt.plot(x1, res.fittedvalues, 'o')
ax.set_title('Fitted values versus exog')# + namestr)
ax = fig3.add_subplot(2,1,2)
plt.plot(x2, res.fittedvalues, 'o')
fig4 = plt.figure()
ax = fig4.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
plt.plot(x1, res.fittedvalues + res.resid, 'o')
ax.set_title('Fitted values plus residuals versus exog')# + namestr)
ax = fig4.add_subplot(2,1,2)
plt.plot(x2, res.fittedvalues + res.resid, 'o')
# see http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/partregr.htm
fig5 = plt.figure()
ax = fig5.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
res1a = sm.OLS(y, exog0[:,[0,2]]).fit()
res1b = sm.OLS(x1, exog0[:,[0,2]]).fit()
plt.plot(res1b.resid, res1a.resid, 'o')
res1c = sm.OLS(res1a.resid, res1b.resid).fit()
plt.plot(res1b.resid, res1c.fittedvalues, '-')
ax.set_title('Partial Regression plot')# + namestr)
ax = fig5.add_subplot(2,1,2)
#plt.plot(x2, res.fittedvalues + res.resid, 'o')
res2a = sm.OLS(y, exog0[:,[0,1]]).fit()
res2b = sm.OLS(x2, exog0[:,[0,1]]).fit()
plt.plot(res2b.resid, res2a.resid, 'o')
res2c = sm.OLS(res2a.resid, res2b.resid).fit()
plt.plot(res2b.resid, res2c.fittedvalues, '-')
# see http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
fig6 = plt.figure()
ax = fig6.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
x1beta = x1*res.params[1]
x2beta = x2*res.params[2]
plt.plot(x1, x1beta + res.resid, 'o')
plt.plot(x1, x1beta, '-')
ax.set_title('X_i beta_i plus residuals versus exog (CCPR)')# + namestr)
ax = fig6.add_subplot(2,1,2)
plt.plot(x2, x2beta + res.resid, 'o')
plt.plot(x2, x2beta, '-')
#print res.summary()
doplots = 1
if doplots:
plot_fit(res, 0, y_true=None)
plot_fit(res, 1, y_true=None)
plot_partregress(y, exog0, exog_idx=[0,1])
plot_regress_exog(res, exog_idx=[0])
plot_ccpr(res, exog_idx=[0])
plot_ccpr(res, exog_idx=[0,1])
tp = TestPlot()
tp.test_plot_fit()
#plt.show()
| bsd-3-clause |
robbymeals/scikit-learn | examples/applications/topics_extraction_with_nmf.py | 106 | 2313 | """
========================================================
Topics extraction with Non-Negative Matrix Factorization
========================================================
This is a proof of concept application of Non Negative Matrix
Factorization of the term frequency matrix of a corpus of documents so
as to extract an additive model of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware than the time complexity
is polynomial.
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data[:n_samples])
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
| bsd-3-clause |
andrewmchen/incubator-airflow | airflow/hooks/presto_hook.py | 22 | 3617 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
import logging
from pyhive import presto
from pyhive.exc import DatabaseError
from airflow.hooks.dbapi_hook import DbApiHook
logging.getLogger("pyhive").setLevel(logging.INFO)
class PrestoException(Exception):
pass
class PrestoHook(DbApiHook):
"""
Interact with Presto through PyHive!
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = 'presto_conn_id'
default_conn_name = 'presto_default'
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(self.presto_conn_id)
return presto.connect(
host=db.host,
port=db.port,
username=db.login,
catalog=db.extra_dejson.get('catalog', 'hive'),
schema=db.schema)
@staticmethod
def _strip_sql(sql):
return sql.strip().rstrip(';')
def _get_pretty_exception_message(self, e):
"""
Parses some DatabaseError to provide a better error message
"""
if (hasattr(e, 'message')
and 'errorName' in e.message
and 'message' in e.message):
return ('{name}: {message}'.format(
name=e.message['errorName'],
message=e.message['message']))
else:
return str(e)
def get_records(self, hql, parameters=None):
"""
Get a set of records from Presto
"""
try:
return super(PrestoHook, self).get_records(
self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(self._parse_exception_message(e))
def get_first(self, hql, parameters=None):
"""
Returns only the first row, regardless of how many rows the query
returns.
"""
try:
return super(PrestoHook, self).get_first(
self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(self._parse_exception_message(e))
def get_pandas_df(self, hql, parameters=None):
"""
Get a pandas dataframe from a sql query.
"""
import pandas
cursor = self.get_cursor()
try:
cursor.execute(self._strip_sql(hql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
raise PrestoException(self._parse_exception_message(e))
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame()
return df
def run(self, hql, parameters=None):
"""
Execute the statement against Presto. Can be used to create views.
"""
return super(PrestoHook, self).run(self._strip_sql(hql), parameters)
def insert_rows(self):
raise NotImplementedError()
| apache-2.0 |
exowanderer/SpitzerDeepLearningNetwork | Python Scripts/spitzer_cal_NALU_train.py | 1 | 15910 | from multiprocessing import set_start_method, cpu_count
#set_start_method('forkserver')
import os
os.environ["OMP_NUM_THREADS"] = str(cpu_count()) # or to whatever you want
from argparse import ArgumentParser
from datetime import datetime
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.utils import shuffle
from tqdm import tqdm
import pandas as pd
import numpy as np
import tensorflow as tf
import nalu
time_now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def chisq(y_true, y_pred, y_error): return np.sum(((y_true-y_pred)/y_error)**2.)
ap = ArgumentParser()
ap.add_argument('-d', '--directory', type=str, required=False, default='nalu_tf_save_dir/saves_{}'.format(time_now), help='The tensorflow ckpt save file.')
ap.add_argument('-nnl', '--n_nalu_layers', type=int, required=False, default=1, help='Whether to use 1 (default), 2, or ... N NALU layers.')
ap.add_argument('-nnn', '--n_nalu_neurons', type=int, required=False, default=0, help='How many features on the second NALU layer.')
ap.add_argument('-ne', '--n_epochs', type=int, required=False, default=200, help='Number of N_EPOCHS to train the network with.')
ap.add_argument('-nc', '--n_classes', type=int, required=False, default=1, help='n_classes == 1 for Regression (default); > 1 for Classification.')
ap.add_argument('-bs', '--batch_size', type=int, required=False, default=32, help='Batch size: number of samples per batch.')
ap.add_argument('-lr', '--learning_rate', type=float, required=False, default=1e-3, help='Learning rate: how fast the optimizer moves up/down the gradient.')
ap.add_argument('-ts', '--test_size', type=float, required=False, default=0.75, help='How much to split the train / test ratio.')
ap.add_argument('-rs', '--random_state', type=int, required=False, default=42, help='Integer value to initialize train/test splitting randomization.')
ap.add_argument('-pp', '--pre_process', type=str2bool, nargs='?', required=False, default=True, help='Toggle whether to MinMax-preprocess the features.')
ap.add_argument('-pca', '--pca_transform', type=str2bool, nargs='?', required=False, default=True, help='Toggle whether to PCA-pretransform the features.')
ap.add_argument('-v', '--verbose', type=str2bool, nargs='?', required=False, default=False, help='Whether to set verbosity = True or False (default).')
ap.add_argument('-ds', '--data_set', type=str, required=False, default='', help='The csv file containing the data with which to train.')
try:
args = vars(ap.parse_args())
except Exception as e:
print('Error: {}'.format(e))
args = {}
args['directory'] = ap.get_default('directory')
args['n_nalu_layers'] = ap.get_default('n_nalu_layers')
args['n_nalu_neurons'] = ap.get_default('n_nalu_neurons')
args['n_epochs'] = ap.get_default('n_epochs')
args['n_classes'] = ap.get_default('n_classes')
args['batch_size'] = ap.get_default('batch_size')
args['learning_rate'] = ap.get_default('learning_rate')
args['test_size'] = ap.get_default('test_size')
args['random_state'] = ap.get_default('random_state')
args['pre_process'] = ap.get_default('pre_process')
args['pca_transform'] = ap.get_default('pca_transform')
args['verbose'] = ap.get_default('verbose')
args['data_set'] = ap.get_default('data_set')
DO_PP = args['pre_process']
DO_PCA = args['pca_transform']
verbose = args['verbose']
data_set_fname = args['data_set']
'''
print("loading pipelines on disk vis joblib.")
full_pipe = joblib.load('pmap_full_pipe_transformer_16features.joblib.save')
std_scaler_from_raw = joblib.load('pmap_standard_scaler_transformer_16features.joblib.save')
pca_transformer_from_std_scaled = joblib.load('pmap_pca_transformer_from_stdscaler_16features.joblib.save')
minmax_scaler_transformer_raw = joblib.load('pmap_minmax_scaler_transformer_from_raw_16features.joblib.save')
minmax_scaler_transformer_pca = joblib.load('pmap_minmax_scaler_transformer_from_pca_16features.joblib.save')
'''
label_n_error_filename = 'pmap_raw_labels_and_errors.csv'
print("Loading in raw labels and errors from {}".format(label_n_error_filename))
labels_df = pd.read_csv(label_n_error_filename)
labels = labels_df['Flux'].values[:,None]
labels_err = labels_df['Flux_err'].values
# Feature File Switch
if DO_PP and DO_PCA:
features_input_filename = 'pmap_full_pipe_transformed_16features.csv'
elif DO_PP:
features_input_filename = 'pmap_minmax_transformed_from_raw_16features.csv'
elif DO_PCA:
features_input_filename = 'pmap_pca_transformed_from_stdscaler_16features.csv'
else:
features_input_filename = 'pmap_raw_16features.csv'
print("Loading in pre-processed features from {}".format(features_input_filename))
features_input = pd.read_csv(feature_input_filename).drop(['idx'], axis=1).values
def nalu(input_layer, num_outputs):
""" Neural Arithmetic Logic Unit tesnorflow layer
Arguments:
input_layer - A Tensor representing previous layer
num_outputs - number of ouput units
Returns:
A tensor representing the output of NALU
"""
shape = (int(input_layer.shape[-1]), num_outputs)
# define variables
W_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
M_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
G = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
# operations according to paper
W = tf.tanh(W_hat) * tf.sigmoid(M_hat)
m = tf.exp(tf.matmul(tf.log(tf.abs(input_layer) + 1e-7), W))
g = tf.sigmoid(tf.matmul(input_layer, G))
a = tf.matmul(input_layer, W)
out = g * a + (1 - g) * m
return out
if __name__ == "__main__":
N_FEATURES = features_input.shape[-1]
EXPORT_DIR = args['directory']
N_NALU_LAYERS = args['n_nalu_layers']
N_NALU_NEURONS = args['n_nalu_neurons'] if args['n_nalu_neurons'] > 0 else N_FEATURES
N_CLASSES = args['n_classes'] # = 1 for regression
TEST_SIZE = args['test_size']
RANDOM_STATE = args['random_state']
N_EPOCHS = args['n_epochs']
LEARNING_RATE = args['learning_rate']
BATCH_SIZE = args['batch_size']
EXPORT_DIR = EXPORT_DIR + '_nnl{}_nnn{}_nc{}_bs{}_lr{}_ne{}_ts{}_rs{}_PP{}_PCA{}/'.format(N_NALU_LAYERS, N_NALU_NEURONS, N_CLASSES,
BATCH_SIZE, LEARNING_RATE, N_EPOCHS,
TEST_SIZE, RANDOM_STATE,
{True:1, False:0}[DO_PP], {True:1, False:0}[DO_PCA])
print("Saving models to path: {}".format(EXPORT_DIR))
idx_train, idx_test = train_test_split(np.arange(labels.size), test_size=TEST_SIZE, random_state=RANDOM_STATE)
X_data, Y_data = features_input[idx_train], labels[idx_train]#[:,None]
LAST_BIT = X_data.shape[0]-BATCH_SIZE*(X_data.shape[0]//BATCH_SIZE)
# Force integer number of batches total by dropping last "<BATCH_SIEZ" number of samples
X_data_use = X_data[:-LAST_BIT].copy()
Y_data_use = Y_data[:-LAST_BIT].copy()
output_dict = {}
output_dict['loss'] = np.zeros(N_EPOCHS)
output_dict['accuracy'] = np.zeros(N_EPOCHS)
output_dict['R2_train'] = np.zeros(N_EPOCHS)
output_dict['R2_test'] = np.zeros(N_EPOCHS)
output_dict['chisq_train'] = np.zeros(N_EPOCHS)
output_dict['chisq_test'] = np.zeros(N_EPOCHS)
with tf.device("/cpu:0"):
# tf.reset_default_graph()
# define placeholders and network
X = tf.placeholder(tf.float32, shape=[None, N_FEATURES])
Y_true = tf.placeholder(tf.float32, shape=[None, 1])
# Setup NALU Layers
nalu_layers = {'nalu0':nalu(X,N_NALU_NEURONS)}
for kn in range(1, N_NALU_LAYERS):
#with tf.name_scope('nalu{}'.format(kn)):
nalu_layers['nalu{}'.format(kn)] = nalu(nalu_layers['nalu{}'.format(kn-1)], N_NALU_NEURONS)
# with tf.name_scope("output"):
Y_pred = nalu(nalu_layers['nalu{}'.format(N_NALU_LAYERS-1)], N_CLASSES) # N_CLASSES = 1 for regression
#with tf.name_scope('loss'):
# loss and train operations
loss = tf.nn.l2_loss(Y_pred - Y_true) # NALU uses mse
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
train_op = optimizer.minimize(loss)
# Add an op to initialize the variables.
init_op = tf.global_variables_initializer()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()#max_to_keep=N_EPOCHS)
sess_config = tf.ConfigProto(
device_count={"CPU": cpu_count()},
inter_op_parallelism_threads=cpu_count(),
intra_op_parallelism_threads=cpu_count())
"""
with tf.name_scope("eval"):
''' Tensorboard Redouts'''
''' Training R-Squared Score'''
total_error = tf.reduce_sum(tf.square(tf.subtract(Y_true, tf.reduce_mean(Y_true))))
unexplained_error = tf.reduce_sum(tf.square(tf.subtract(Y_true, Y_pred)))
R_squared = tf.subtract(1.0, tf.div(unexplained_error, total_error))
# ''' Testing R-Squared Score'''
# Y_pred_test = Y_pred.eval(feed_dict={X: features[idx_test]})
# total_error_test = tf.reduce_sum(tf.square(tf.subtract(Y_data_use, tf.reduce_mean(Y_data_use))))
# unexplained_error_test = tf.reduce_sum(tf.square(tf.subtract(Y_data_use, Y_pred_test)))
# R_squared_test = tf.subtract(1, tf.div(unexplained_error, total_error))
''' Loss and RMSE '''
squared_error = tf.square(tf.subtract(Y_true, Y_pred))
loss = tf.reduce_sum(tf.sqrt(tf.cast(squared_error, tf.float32)))
rmse = tf.sqrt(tf.reduce_mean(tf.cast(squared_error, tf.float32)))
with tf.name_scope("summary"):
''' Declare Scalar Tensorboard Terms'''
tf.summary.scalar('loss', loss)
tf.summary.scalar('RMSE', rmse)
tf.summary.scalar('R_sqrd', R_squared)
''' Declare Histogram Tensorboard Terms'''
# Squared Error Histogram
tf.summary.histogram('SqErr Hist', squared_error)
# NALU Layers Histogram
for kn in range(N_NALU_LAYERS):
tf.summary.histogram('NALU{}'.format(kn), nalu_layers['nalu{}'.format(kn)])
''' Merge all the summaries and write them out to `export_dir` + `/logs_train_`time_now`` '''
merged = tf.summary.merge_all()
"""
with tf.Session(config=sess_config) as sess:
''' Output all summaries to `export_dir` + `/logs_train_`time_now`` '''
train_writer = tf.summary.FileWriter(EXPORT_DIR + '/logs_train_{}'.format(time_now),sess.graph)
''' END Tensorboard Readout Step'''
sess.run(init_op)
best_test_r2 = 0
for ep in tqdm(range(N_EPOCHS)):
i = 0
gts = 0
# Reshuffle features and labels together
X_data_use, Y_data_use = shuffle(X_data_use, Y_data_use)
for k in tqdm(range(len(X_data_use)//BATCH_SIZE)):
batch_now = range(k*BATCH_SIZE, (k+1)*BATCH_SIZE)
# xs, ys = X_data_use[i:i+BATCH_SIZE], Y_data_use[i:i+BATCH_SIZE]
xs, ys = X_data_use[batch_now], Y_data_use[batch_now]
_, ys_pred, l = sess.run([train_op, Y_pred, loss],
feed_dict={X: xs, Y_true: ys})
# calculate number of correct predictions from batch
gts += np.sum(np.isclose(ys, ys_pred, atol=1e-4, rtol=1e-4))
ytest_pred = Y_pred.eval(feed_dict={X: features_input[idx_test]})
if np.isnan(ytest_pred).any(): ytest_pred = median_sub_nan(ytest_pred)
test_r2 = r2_score(labels[idx_test], ytest_pred)#[:,None]
# print("Test R2 Score: {}".format(test_r2_score))
acc = gts/len(Y_data_use)
train_r2 = r2_score(ys, ys_pred)
print('\nepoch {}, loss: {:.5}, accuracy: {:.5}, Batch R2: {:.5}, Test R2: {:.5}'.format(ep, l, acc, train_r2, test_r2))
"""
output_dict['loss'][ep] = l
output_dict['accuracy'][ep] = acc
output_dict['R2_train'][ep] = train_r2
output_dict['R2_test'][ep] = test_r2
output_dict['chisq_train'][ep] = chisq(ys.flatten(), ys_pred.flatten(), labels_err[i:i+BATCH_SIZE])
output_dict['chisq_test'][ep] = chisq(labels[idx_test], ytest_pred.flatten(), labels_err[idx_test])
"""
if verbose: print('Saving Default to Disk')
now_save_name = EXPORT_DIR + "model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format( ep, l, acc, train_r2, test_r2)
save_path = saver.save(sess, now_save_name)
if test_r2 >= best_test_r2:
if verbose: print('Saving Best to Disk')
best_test_r2 = test_r2
''' Store the Best Scored Test-R2 '''
best_save_name = EXPORT_DIR + "best_test_r2/model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format( ep, l, acc, train_r2, test_r2)
save_path = saver.save(sess, best_save_name)
ep = '_FINAL'
final_save_name = EXPORT_DIR+ "model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format( ep, l, acc, train_r2, test_r2)
save_path = saver.save(sess, final_save_name)
print("Model saved in path: {}".format(save_path))
"""
try:
pd.DataFrame(output_dict, index=range(N_EPOCHS)).to_csv(EXPORT_DIR+ "model_loss_acc_BatchR2_TestR2_DataFrame.csv")
except Exception as e:
print('DataFrame to CSV broke because', str(e))
"""
'''
with tf.name_scope("loss"):
def tf_nll(labels, output, uncs, coeff=1):
error = output - labels
return tf.reduce_sum(tf.divide(tf.squared_difference(output, labels) , tf.square(uncs)))# + tf.log(tf.square(uncs))
#return tf.reduce_sum(1 * (coeff * np.log(2*np.pi) + coeff * tf.log(uncs) + (0.5/uncs) * tf.pow(error, 2)))
negloglike = tf_nll(labels=y, output=output, uncs=unc)
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([negloglike] + reg_losses, name="chisq")
with tf.name_scope("eval"):
accuracy = tf.reduce_mean(tf.squared_difference(output, y, name="accuracy"))
SqErrRatio= tf.divide(accuracy, tf.reduce_mean(tf.squared_difference(y, tf.reduce_mean(y))))
r2_acc = 1.0 - SqErrRatio
chsiqMean = tf_nll(labels=y, output=tf.reduce_mean(y), uncs=unc)
chisqModel= tf_nll(labels=y, output=output, uncs=unc)
rho2_acc = 1.0 - chisqModel / chsiqMean"
mse_summary = tf.summary.scalar('train_acc' , accuracy )
loss_summary = tf.summary.scalar('loss' , loss )
nll_summary = tf.summary.scalar('negloglike', negloglike)
r2s_summary = tf.summary.scalar('r2_acc' , r2_acc )
p2s_summary = tf.summary.scalar('rho2_acc' , rho2_acc )
val_summary = tf.summary.scalar('val_acc' , accuracy )
# hid1_hist = tf.summary.histogram('hidden1', hidden1)
# hid2_hist = tf.summary.histogram('hidden1', hidden1)
# hid3_hist = tf.summary.histogram('hidden1', hidden1)
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
'''
| mit |
selinerguncu/Yelp-Spatial-Analysis | maps/organizeData.py | 1 | 14340 | import folium
from folium import plugins
import numpy as np
import sqlite3 as sqlite
import os
import sys
import pandas as pd
#extract data from yelp DB and clean it:
DB_PATH = "/Users/selinerguncu/Desktop/PythonProjects/Fun Projects/Yelp/data/yelpCleanDB.sqlite"
conn = sqlite.connect(DB_PATH)
def dataForFoliumMaps(mapParameters):
business = str(mapParameters['business'])
region = str(mapParameters['region'])
price = str(mapParameters['price'])
rating = float(mapParameters['rating'])
# print('mapParameters', mapParameters)
# if 'zipcode' in mapParameters.keys():
# zipcode = str(mapParameters['zipcode'])
# sql = "SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count FROM Business WHERE query_category = '%s' AND city = '%s' AND zip_code = '%s' AND price = '%s' AND rating = '%r'" % (business, city, zipcode, price, rating)
# coordinates = pd.read_sql_query(sql, conn)
# else:
# sql = "SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count FROM Business WHERE query_category = '%s' AND city = '%s' AND price = '%s' AND rating = '%r'" % (business, city, price, rating)
# coordinates = pd.read_sql_query(sql, conn)
# print('here')
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND region = '%r''' % (business, price, rating, region)
# if region == 'Bay Area':
# sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
# FROM CleanBusinessData
# WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city != '%s' ''' % (business, price, rating, 'San Francisco')
# elif region == 'Peninsula':
# sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
# FROM CleanBusinessData
# WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city != '%s' AND city != '%s' AND city != '%s' ''' % (business, price, rating, 'San Francisco', 'San Francisco - Downtown', 'San Francisco - Outer')
# elif region == 'San Francisco':
# sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
# FROM CleanBusinessData
# WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city = ?''' % (business, price, rating, 'San Francisco')
# elif region == 'Downtown SF':
# sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
# FROM CleanBusinessData
# WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city = '%s' ''' % (business, price, rating, 'San Francisco - Downtown')
# elif region == 'Outer SF':
# sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
# FROM CleanBusinessData
# WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city = '%s' ''' % (business, price, rating, 'San Francisco - Outer')
# elif region == 'East Bay':
# sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
# FROM CleanBusinessData
# WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND region = '%s' ''' % (business, price, rating, 'eastBay')
# elif region == 'North Bay':
# sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
# FROM CleanBusinessData
# WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND region = '%s' ''' % (business, price, rating, 'northBay')
if region == 'Bay Area':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND city != '%s' ''' % (business, 'San Francisco')
elif region == 'Peninsula':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND city != '%s' AND city != '%s' AND city != '%s' ''' % (business, 'San Francisco', 'San Francisco - Downtown', 'San Francisco - Outer')
elif region == 'San Francisco':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND city = ?''' % (business, 'San Francisco')
elif region == 'Downtown SF':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND city = '%s' ''' % (business, 'San Francisco - Downtown')
elif region == 'Outer SF':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND city = '%s' ''' % (business, 'San Francisco - Outer')
elif region == 'East Bay':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND region = '%s' ''' % (business, 'eastBay')
elif region == 'North Bay':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND region = '%s' ''' % (business, 'northBay')
coordinatesForFoliumMaps = pd.read_sql_query(sql, conn)
print('coordinatesForFoliumMaps', len(coordinatesForFoliumMaps))
# if len(coordinatesForFoliumMaps) <= 1860:
for i in range(len(coordinatesForFoliumMaps)):
if coordinatesForFoliumMaps["longitude"][i] == None:
coordinatesForFoliumMaps["longitude"][i] = coordinatesForFoliumMaps["query_longitude"][i]
if coordinatesForFoliumMaps["latitude"][i] == None:
coordinatesForFoliumMaps["latitude"][i] = coordinatesForFoliumMaps["query_latitude"][i]
return coordinatesForFoliumMaps
def dataForCircleMapsRating(mapParameters):
business = str(mapParameters['business'])
region = str(mapParameters['region'])
price = str(mapParameters['price'])
rating = float(mapParameters['rating'])
print('mapParameters', mapParameters)
# sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
# FROM CleanBusinessData
# WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND region = '%r''' % (business, price, rating, region)
if region == 'Bay Area':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND city != '%s' ''' % (business, price, 'San Francisco')
elif region == 'Peninsula':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND city != '%s' AND city != '%s' AND city != '%s' ''' % (business, price, 'San Francisco', 'San Francisco - Downtown', 'San Francisco - Outer')
elif region == 'San Francisco':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND city = ?''' % (business, price, 'San Francisco')
elif region == 'Downtown SF':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND city = '%s' ''' % (business, price, 'San Francisco - Downtown')
elif region == 'Outer SF':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND city = '%s' ''' % (business, price, 'San Francisco - Outer')
elif region == 'East Bay':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND region = '%s' ''' % (business, price, 'eastBay')
elif region == 'North Bay':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND region = '%s' ''' % (business, price, 'northBay')
coordinatesForCircleMapsRating = pd.read_sql_query(sql, conn)
# if len(coordinatesForCircleMapsRating) <= 1860:
for i in range(len(coordinatesForCircleMapsRating)):
if coordinatesForCircleMapsRating["longitude"][i] == None:
coordinatesForCircleMapsRating["longitude"][i] = coordinatesForCircleMapsRating["query_longitude"][i]
if coordinatesForCircleMapsRating["latitude"][i] == None:
coordinatesForCircleMapsRating["latitude"][i] = coordinatesForCircleMapsRating["query_latitude"][i]
return coordinatesForCircleMapsRating
def dataForCircleMapsPrice(mapParameters):
business = str(mapParameters['business'])
region = str(mapParameters['region'])
price = str(mapParameters['price'])
rating = float(mapParameters['rating'])
print('mapParameters', mapParameters)
# sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
# FROM CleanBusinessData
# WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND region = '%r''' % (business, price, rating, region)
if region == 'Bay Area':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND rating = '%r' AND city != '%s' ''' % (business, rating, 'San Francisco')
elif region == 'Peninsula':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND rating = '%r' AND city != '%s' AND city != '%s' AND city != '%s' ''' % (business, rating, 'San Francisco', 'San Francisco - Downtown', 'San Francisco - Outer')
elif region == 'San Francisco':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND rating = '%r' AND city = ?''' % (business, rating, 'San Francisco')
elif region == 'Downtown SF':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND rating = '%r' AND city = '%s' ''' % (business, rating, 'San Francisco - Downtown')
elif region == 'Outer SF':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND rating = '%r' AND city = '%s' ''' % (business, rating, 'San Francisco - Outer')
elif region == 'East Bay':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND rating = '%r' AND region = '%s' ''' % (business, rating, 'eastBay')
elif region == 'North Bay':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zipcode, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND rating = '%r' AND region = '%s' ''' % (business, rating, 'northBay')
coordinatesForCircleMapsPrice = pd.read_sql_query(sql, conn)
# if len(coordinatesForCircleMapsPrice) <= 1860:
for i in range(len(coordinatesForCircleMapsPrice)):
if coordinatesForCircleMapsPrice["longitude"][i] == None:
coordinatesForCircleMapsPrice["longitude"][i] = coordinatesForCircleMapsPrice["query_longitude"][i]
if coordinatesForCircleMapsPrice["latitude"][i] == None:
coordinatesForCircleMapsPrice["latitude"][i] = coordinatesForCircleMapsPrice["query_latitude"][i]
return coordinatesForCircleMapsPrice
# coordinates = []
# for i in range(len(coords)): #max ~1860 coordinates
# coordinate = []
# coordinate.append(coords["latitude"][i])
# coordinate.append(coords["longitude"][i])
# coordinates.append(coordinate)
# # convert list of lists to list of tuples
# coordinates = [tuple([i[0],i[1]]) for i in coordinates]
# # print(coordinates[0:10])
# else:
# print("Too many data points; cannot be mapped!")
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/pandas/core/algorithms.py | 7 | 42600 | """
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import division
from warnings import warn
import numpy as np
from pandas import compat, lib, tslib, _np_version_under1p8
from pandas.types.cast import _maybe_promote
from pandas.types.generic import ABCSeries, ABCIndex
from pandas.types.common import (is_integer_dtype,
is_int64_dtype,
is_categorical_dtype,
is_extension_type,
is_datetimetz,
is_period_dtype,
is_period_arraylike,
is_float_dtype,
needs_i8_conversion,
is_categorical,
is_datetime64_dtype,
is_timedelta64_dtype,
is_scalar,
_ensure_platform_int,
_ensure_object,
_ensure_float64,
_ensure_int64,
is_list_like)
from pandas.types.missing import isnull
import pandas.core.common as com
import pandas.algos as algos
import pandas.hashtable as htable
from pandas.compat import string_types
from pandas.tslib import iNaT
# --------------- #
# top-level algos #
# --------------- #
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
Parameters
----------
to_match : array-like
values to find positions of
values : array-like
Unique set of values
na_sentinel : int, default -1
Value to mark "not found"
Examples
--------
Returns
-------
match : ndarray of integers
"""
values = com._asarray_tuplesafe(values)
if issubclass(values.dtype.type, string_types):
values = np.array(values, dtype='O')
f = lambda htype, caster: _match_generic(to_match, values, htype, caster)
result = _hashtable_algo(f, values.dtype, np.int64)
if na_sentinel != -1:
# replace but return a numpy array
# use a Series because it handles dtype conversions properly
from pandas.core.series import Series
result = Series(result.ravel()).replace(-1, na_sentinel).values.\
reshape(result.shape)
return result
def _match_generic(values, index, table_type, type_caster):
values = type_caster(values)
index = type_caster(index)
table = table_type(min(len(index), 1000000))
table.map_locations(index)
return table.lookup(values)
def unique(values):
"""
Compute unique values (not necessarily sorted) efficiently from input array
of values
Parameters
----------
values : array-like
Returns
-------
uniques
"""
values = com._asarray_tuplesafe(values)
f = lambda htype, caster: _unique_generic(values, htype, caster)
return _hashtable_algo(f, values.dtype)
def _unique_generic(values, table_type, type_caster):
values = type_caster(values)
table = table_type(min(len(values), 1000000))
uniques = table.unique(values)
return type_caster(uniques)
def isin(comps, values):
"""
Compute the isin boolean array
Parameters
----------
comps: array-like
values: array-like
Returns
-------
boolean array same length as comps
"""
if not is_list_like(comps):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(comps).__name__))
comps = np.asarray(comps)
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(values).__name__))
if not isinstance(values, np.ndarray):
values = list(values)
# GH11232
# work-around for numpy < 1.8 and comparisions on py3
# faster for larger cases to use np.in1d
if (_np_version_under1p8 and compat.PY3) or len(comps) > 1000000:
f = lambda x, y: np.in1d(x, np.asarray(list(y)))
else:
f = lambda x, y: lib.ismember_int64(x, set(y))
# may need i8 conversion for proper membership testing
if is_datetime64_dtype(comps):
from pandas.tseries.tools import to_datetime
values = to_datetime(values)._values.view('i8')
comps = comps.view('i8')
elif is_timedelta64_dtype(comps):
from pandas.tseries.timedeltas import to_timedelta
values = to_timedelta(values)._values.view('i8')
comps = comps.view('i8')
elif is_int64_dtype(comps):
pass
else:
f = lambda x, y: lib.ismember(x, set(values))
return f(comps, values)
def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False):
"""
Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
----------
values : list-like
Sequence; must be unique if ``labels`` is not None.
labels : list_like
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``labels`` to mark "not found".
Ignored when ``labels`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``labels`` is None.
Returns
-------
ordered : ndarray
Sorted ``values``
new_labels : ndarray
Reordered ``labels``; returned when ``labels`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``labels`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``labels`` is not None and ``values`` contain duplicates.
"""
if not is_list_like(values):
raise TypeError("Only list-like objects are allowed to be passed to"
"safe_sort as values")
values = np.array(values, copy=False)
def sort_mixed(values):
# order ints before strings, safe in py3
str_pos = np.array([isinstance(x, string_types) for x in values],
dtype=bool)
nums = np.sort(values[~str_pos])
strs = np.sort(values[str_pos])
return _ensure_object(np.concatenate([nums, strs]))
sorter = None
if compat.PY3 and lib.infer_dtype(values) == 'mixed-integer':
# unorderable in py3 if mixed str/int
ordered = sort_mixed(values)
else:
try:
sorter = values.argsort()
ordered = values.take(sorter)
except TypeError:
# try this anyway
ordered = sort_mixed(values)
# labels:
if labels is None:
return ordered
if not is_list_like(labels):
raise TypeError("Only list-like objects or None are allowed to be"
"passed to safe_sort as labels")
labels = _ensure_platform_int(np.asarray(labels))
from pandas import Index
if not assume_unique and not Index(values).is_unique:
raise ValueError("values should be unique if labels is not None")
if sorter is None:
# mixed types
(hash_klass, _), values = _get_data_algo(values, _hashtables)
t = hash_klass(len(values))
t.map_locations(values)
sorter = _ensure_platform_int(t.lookup(ordered))
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = (labels < -len(values)) | (labels >= len(values)) | \
(labels == na_sentinel)
# (Out of bound indices will be masked with `na_sentinel` next, so we may
# deal with them here without performance loss using `mode='wrap'`.)
new_labels = reverse_indexer.take(labels, mode='wrap')
np.putmask(new_labels, mask, na_sentinel)
return ordered, _ensure_platform_int(new_labels)
def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
"""
Encode input values as an enumerated type or categorical variable
Parameters
----------
values : ndarray (1-d)
Sequence
sort : boolean, default False
Sort by values
na_sentinel : int, default -1
Value to mark "not found"
size_hint : hint to the hashtable sizer
Returns
-------
labels : the indexer to the original array
uniques : ndarray (1-d) or Index
the unique values. Index is returned when passed values is Index or
Series
note: an array of Periods will ignore sort as it returns an always sorted
PeriodIndex
"""
from pandas import Index, Series, DatetimeIndex, PeriodIndex
# handling two possibilities here
# - for a numpy datetimelike simply view as i8 then cast back
# - for an extension datetimelike view as i8 then
# reconstruct from boxed values to transfer metadata
dtype = None
if needs_i8_conversion(values):
if is_period_dtype(values):
values = PeriodIndex(values)
vals = values.asi8
elif is_datetimetz(values):
values = DatetimeIndex(values)
vals = values.asi8
else:
# numpy dtype
dtype = values.dtype
vals = values.view(np.int64)
else:
vals = np.asarray(values)
(hash_klass, vec_klass), vals = _get_data_algo(vals, _hashtables)
table = hash_klass(size_hint or len(vals))
uniques = vec_klass()
labels = table.get_labels(vals, uniques, 0, na_sentinel, True)
labels = _ensure_platform_int(labels)
uniques = uniques.to_array()
if sort and len(uniques) > 0:
uniques, labels = safe_sort(uniques, labels, na_sentinel=na_sentinel,
assume_unique=True)
if dtype is not None:
uniques = uniques.astype(dtype)
if isinstance(values, Index):
uniques = values._shallow_copy(uniques, name=None)
elif isinstance(values, Series):
uniques = Index(uniques)
return labels, uniques
def value_counts(values, sort=True, ascending=False, normalize=False,
bins=None, dropna=True):
"""
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
normalize: boolean, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : boolean, default True
Don't include counts of NaN
Returns
-------
value_counts : Series
"""
from pandas.core.series import Series
name = getattr(values, 'name', None)
if bins is not None:
try:
from pandas.tools.tile import cut
values = Series(values).values
cat, bins = cut(values, bins, retbins=True)
except TypeError:
raise TypeError("bins argument only works with numeric data.")
values = cat.codes
if is_extension_type(values) and not is_datetimetz(values):
# handle Categorical and sparse,
# datetime tz can be handeled in ndarray path
result = Series(values).values.value_counts(dropna=dropna)
result.name = name
counts = result.values
else:
# ndarray path. pass original to handle DatetimeTzBlock
keys, counts = _value_counts_arraylike(values, dropna=dropna)
from pandas import Index, Series
if not isinstance(keys, Index):
keys = Index(keys)
result = Series(counts, index=keys, name=name)
if bins is not None:
# TODO: This next line should be more efficient
result = result.reindex(np.arange(len(cat.categories)),
fill_value=0)
result.index = bins[:-1]
if sort:
result = result.sort_values(ascending=ascending)
if normalize:
result = result / float(counts.sum())
return result
def _value_counts_arraylike(values, dropna=True):
is_datetimetz_type = is_datetimetz(values)
is_period_type = (is_period_dtype(values) or
is_period_arraylike(values))
orig = values
from pandas.core.series import Series
values = Series(values).values
dtype = values.dtype
if needs_i8_conversion(dtype) or is_period_type:
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex
if is_period_type:
# values may be an object
values = PeriodIndex(values)
freq = values.freq
values = values.view(np.int64)
keys, counts = htable.value_count_int64(values, dropna)
if dropna:
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
# convert the keys back to the dtype we came in
keys = keys.astype(dtype)
# dtype handling
if is_datetimetz_type:
keys = DatetimeIndex._simple_new(keys, tz=orig.dtype.tz)
if is_period_type:
keys = PeriodIndex._simple_new(keys, freq=freq)
elif is_integer_dtype(dtype):
values = _ensure_int64(values)
keys, counts = htable.value_count_int64(values, dropna)
elif is_float_dtype(dtype):
values = _ensure_float64(values)
keys, counts = htable.value_count_float64(values, dropna)
else:
values = _ensure_object(values)
mask = isnull(values)
keys, counts = htable.value_count_object(values, mask)
if not dropna and mask.any():
keys = np.insert(keys, 0, np.NaN)
counts = np.insert(counts, 0, mask.sum())
return keys, counts
def duplicated(values, keep='first'):
"""
Return boolean ndarray denoting duplicate values
.. versionadded:: 0.19.0
Parameters
----------
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : ndarray
"""
dtype = values.dtype
# no need to revert to original type
if needs_i8_conversion(dtype):
values = values.view(np.int64)
elif is_period_arraylike(values):
from pandas.tseries.period import PeriodIndex
values = PeriodIndex(values).asi8
elif is_categorical_dtype(dtype):
values = values.values.codes
elif isinstance(values, (ABCSeries, ABCIndex)):
values = values.values
if is_integer_dtype(dtype):
values = _ensure_int64(values)
duplicated = htable.duplicated_int64(values, keep=keep)
elif is_float_dtype(dtype):
values = _ensure_float64(values)
duplicated = htable.duplicated_float64(values, keep=keep)
else:
values = _ensure_object(values)
duplicated = htable.duplicated_object(values, keep=keep)
return duplicated
def mode(values):
"""Returns the mode or mode(s) of the passed Series or ndarray (sorted)"""
# must sort because hash order isn't necessarily defined.
from pandas.core.series import Series
if isinstance(values, Series):
constructor = values._constructor
values = values.values
else:
values = np.asanyarray(values)
constructor = Series
dtype = values.dtype
if is_integer_dtype(values):
values = _ensure_int64(values)
result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)
elif issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):
dtype = values.dtype
values = values.view(np.int64)
result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)
elif is_categorical_dtype(values):
result = constructor(values.mode())
else:
mask = isnull(values)
values = _ensure_object(values)
res = htable.mode_object(values, mask)
try:
res = sorted(res)
except TypeError as e:
warn("Unable to sort modes: %s" % e)
result = constructor(res, dtype=dtype)
return result
def rank(values, axis=0, method='average', na_option='keep',
ascending=True, pct=False):
"""
"""
if values.ndim == 1:
f, values = _get_data_algo(values, _rank1d_functions)
ranks = f(values, ties_method=method, ascending=ascending,
na_option=na_option, pct=pct)
elif values.ndim == 2:
f, values = _get_data_algo(values, _rank2d_functions)
ranks = f(values, axis=axis, ties_method=method,
ascending=ascending, na_option=na_option, pct=pct)
return ranks
_rank1d_functions = {
'float64': algos.rank_1d_float64,
'int64': algos.rank_1d_int64,
'generic': algos.rank_1d_generic
}
_rank2d_functions = {
'float64': algos.rank_2d_float64,
'int64': algos.rank_2d_int64,
'generic': algos.rank_2d_generic
}
def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = isnull(x)
x = x[~mask]
values = np.sort(x)
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
idx % 1)
elif interpolation_method == 'lower':
score = values[np.floor(idx)]
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
raise ValueError("interpolation_method can only be 'fraction' "
", 'lower' or 'higher'")
return score
if is_scalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return algos.arrmap_float64(q, _get_score)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def nsmallest(arr, n, keep='first'):
"""
Find the indices of the n smallest values of a numpy array.
Note: Fails silently with NaN.
"""
if keep == 'last':
arr = arr[::-1]
narr = len(arr)
n = min(n, narr)
sdtype = str(arr.dtype)
arr = arr.view(_dtype_map.get(sdtype, sdtype))
kth_val = algos.kth_smallest(arr.copy(), n - 1)
return _finalize_nsmallest(arr, kth_val, n, keep, narr)
def nlargest(arr, n, keep='first'):
"""
Find the indices of the n largest values of a numpy array.
Note: Fails silently with NaN.
"""
sdtype = str(arr.dtype)
arr = arr.view(_dtype_map.get(sdtype, sdtype))
return nsmallest(-arr, n, keep=keep)
def select_n_slow(dropped, n, keep, method):
reverse_it = (keep == 'last' or method == 'nlargest')
ascending = method == 'nsmallest'
slc = np.s_[::-1] if reverse_it else np.s_[:]
return dropped[slc].sort_values(ascending=ascending).head(n)
_select_methods = {'nsmallest': nsmallest, 'nlargest': nlargest}
def select_n_series(series, n, keep, method):
"""Implement n largest/smallest for pandas Series
Parameters
----------
series : pandas.Series object
n : int
keep : {'first', 'last'}, default 'first'
method : str, {'nlargest', 'nsmallest'}
Returns
-------
nordered : Series
"""
dtype = series.dtype
if not issubclass(dtype.type, (np.integer, np.floating, np.datetime64,
np.timedelta64)):
raise TypeError("Cannot use method %r with dtype %s" % (method, dtype))
if keep not in ('first', 'last'):
raise ValueError('keep must be either "first", "last"')
if n <= 0:
return series[[]]
dropped = series.dropna()
if n >= len(series):
return select_n_slow(dropped, n, keep, method)
inds = _select_methods[method](dropped.values, n, keep)
return dropped.iloc[inds]
def select_n_frame(frame, columns, n, method, keep):
"""Implement n largest/smallest for pandas DataFrame
Parameters
----------
frame : pandas.DataFrame object
columns : list or str
n : int
keep : {'first', 'last'}, default 'first'
method : str, {'nlargest', 'nsmallest'}
Returns
-------
nordered : DataFrame
"""
from pandas.core.series import Series
if not is_list_like(columns):
columns = [columns]
columns = list(columns)
ser = getattr(frame[columns[0]], method)(n, keep=keep)
if isinstance(ser, Series):
ser = ser.to_frame()
return ser.merge(frame, on=columns[0], left_index=True)[frame.columns]
def _finalize_nsmallest(arr, kth_val, n, keep, narr):
ns, = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind='mergesort')][:n]
if keep == 'last':
# reverse indices
return narr - 1 - inds
else:
return inds
_dtype_map = {'datetime64[ns]': 'int64', 'timedelta64[ns]': 'int64'}
# ------- #
# helpers #
# ------- #
def _hashtable_algo(f, dtype, return_dtype=None):
"""
f(HashTable, type_caster) -> result
"""
if is_float_dtype(dtype):
return f(htable.Float64HashTable, _ensure_float64)
elif is_integer_dtype(dtype):
return f(htable.Int64HashTable, _ensure_int64)
elif is_datetime64_dtype(dtype):
return_dtype = return_dtype or 'M8[ns]'
return f(htable.Int64HashTable, _ensure_int64).view(return_dtype)
elif is_timedelta64_dtype(dtype):
return_dtype = return_dtype or 'm8[ns]'
return f(htable.Int64HashTable, _ensure_int64).view(return_dtype)
else:
return f(htable.PyObjectHashTable, _ensure_object)
_hashtables = {
'float64': (htable.Float64HashTable, htable.Float64Vector),
'int64': (htable.Int64HashTable, htable.Int64Vector),
'generic': (htable.PyObjectHashTable, htable.ObjectVector)
}
def _get_data_algo(values, func_map):
if is_float_dtype(values):
f = func_map['float64']
values = _ensure_float64(values)
elif needs_i8_conversion(values):
f = func_map['int64']
values = values.view('i8')
elif is_integer_dtype(values):
f = func_map['int64']
values = _ensure_int64(values)
else:
f = func_map['generic']
values = _ensure_object(values)
return f, values
# ---- #
# take #
# ---- #
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(arr, indexer, out, fill_value=np.nan):
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _convert_wrapper(f, conv_dtype):
def wrapper(arr, indexer, out, fill_value=np.nan):
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _take_2d_multi_generic(arr, indexer, out, fill_value, mask_info):
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i in range(len(row_idx)):
u_ = row_idx[i]
for j in range(len(col_idx)):
v = col_idx[j]
out[i, j] = arr[u_, v]
def _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info):
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
if arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
if arr.shape[axis] > 0:
arr.take(_ensure_platform_int(indexer), axis=axis, out=out)
if needs_masking:
outindexer = [slice(None)] * arr.ndim
outindexer[axis] = mask
out[tuple(outindexer)] = fill_value
_take_1d_dict = {
('int8', 'int8'): algos.take_1d_int8_int8,
('int8', 'int32'): algos.take_1d_int8_int32,
('int8', 'int64'): algos.take_1d_int8_int64,
('int8', 'float64'): algos.take_1d_int8_float64,
('int16', 'int16'): algos.take_1d_int16_int16,
('int16', 'int32'): algos.take_1d_int16_int32,
('int16', 'int64'): algos.take_1d_int16_int64,
('int16', 'float64'): algos.take_1d_int16_float64,
('int32', 'int32'): algos.take_1d_int32_int32,
('int32', 'int64'): algos.take_1d_int32_int64,
('int32', 'float64'): algos.take_1d_int32_float64,
('int64', 'int64'): algos.take_1d_int64_int64,
('int64', 'float64'): algos.take_1d_int64_float64,
('float32', 'float32'): algos.take_1d_float32_float32,
('float32', 'float64'): algos.take_1d_float32_float64,
('float64', 'float64'): algos.take_1d_float64_float64,
('object', 'object'): algos.take_1d_object_object,
('bool', 'bool'): _view_wrapper(algos.take_1d_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_1d_bool_object, np.uint8,
None),
('datetime64[ns]', 'datetime64[ns]'): _view_wrapper(
algos.take_1d_int64_int64, np.int64, np.int64, np.int64)
}
_take_2d_axis0_dict = {
('int8', 'int8'): algos.take_2d_axis0_int8_int8,
('int8', 'int32'): algos.take_2d_axis0_int8_int32,
('int8', 'int64'): algos.take_2d_axis0_int8_int64,
('int8', 'float64'): algos.take_2d_axis0_int8_float64,
('int16', 'int16'): algos.take_2d_axis0_int16_int16,
('int16', 'int32'): algos.take_2d_axis0_int16_int32,
('int16', 'int64'): algos.take_2d_axis0_int16_int64,
('int16', 'float64'): algos.take_2d_axis0_int16_float64,
('int32', 'int32'): algos.take_2d_axis0_int32_int32,
('int32', 'int64'): algos.take_2d_axis0_int32_int64,
('int32', 'float64'): algos.take_2d_axis0_int32_float64,
('int64', 'int64'): algos.take_2d_axis0_int64_int64,
('int64', 'float64'): algos.take_2d_axis0_int64_float64,
('float32', 'float32'): algos.take_2d_axis0_float32_float32,
('float32', 'float64'): algos.take_2d_axis0_float32_float64,
('float64', 'float64'): algos.take_2d_axis0_float64_float64,
('object', 'object'): algos.take_2d_axis0_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_axis0_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_axis1_dict = {
('int8', 'int8'): algos.take_2d_axis1_int8_int8,
('int8', 'int32'): algos.take_2d_axis1_int8_int32,
('int8', 'int64'): algos.take_2d_axis1_int8_int64,
('int8', 'float64'): algos.take_2d_axis1_int8_float64,
('int16', 'int16'): algos.take_2d_axis1_int16_int16,
('int16', 'int32'): algos.take_2d_axis1_int16_int32,
('int16', 'int64'): algos.take_2d_axis1_int16_int64,
('int16', 'float64'): algos.take_2d_axis1_int16_float64,
('int32', 'int32'): algos.take_2d_axis1_int32_int32,
('int32', 'int64'): algos.take_2d_axis1_int32_int64,
('int32', 'float64'): algos.take_2d_axis1_int32_float64,
('int64', 'int64'): algos.take_2d_axis1_int64_int64,
('int64', 'float64'): algos.take_2d_axis1_int64_float64,
('float32', 'float32'): algos.take_2d_axis1_float32_float32,
('float32', 'float64'): algos.take_2d_axis1_float32_float64,
('float64', 'float64'): algos.take_2d_axis1_float64_float64,
('object', 'object'): algos.take_2d_axis1_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_axis1_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_multi_dict = {
('int8', 'int8'): algos.take_2d_multi_int8_int8,
('int8', 'int32'): algos.take_2d_multi_int8_int32,
('int8', 'int64'): algos.take_2d_multi_int8_int64,
('int8', 'float64'): algos.take_2d_multi_int8_float64,
('int16', 'int16'): algos.take_2d_multi_int16_int16,
('int16', 'int32'): algos.take_2d_multi_int16_int32,
('int16', 'int64'): algos.take_2d_multi_int16_int64,
('int16', 'float64'): algos.take_2d_multi_int16_float64,
('int32', 'int32'): algos.take_2d_multi_int32_int32,
('int32', 'int64'): algos.take_2d_multi_int32_int64,
('int32', 'float64'): algos.take_2d_multi_int32_float64,
('int64', 'int64'): algos.take_2d_multi_int64_int64,
('int64', 'float64'): algos.take_2d_multi_int64_float64,
('float32', 'float32'): algos.take_2d_multi_float32_float32,
('float32', 'float64'): algos.take_2d_multi_float32_float64,
('float64', 'float64'): algos.take_2d_multi_float64_float64,
('object', 'object'): algos.take_2d_multi_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_multi_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_multi_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None):
if ndim <= 2:
tup = (arr_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
return func
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
func = _convert_wrapper(func, out_dtype)
return func
def func(arr, indexer, out, fill_value=np.nan):
indexer = _ensure_int64(indexer)
_take_nd_generic(arr, indexer, out, axis=axis, fill_value=fill_value,
mask_info=mask_info)
return func
def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,
allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
Parameters
----------
arr : ndarray
Input array
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indicies are filed with fill_value
axis : int, default 0
Axis to take from
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
_maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
mask_info : tuple of (ndarray, boolean)
If provided, value should correspond to:
(indexer != -1, (indexer != -1).any())
If not provided, it will be computed internally if necessary
allow_fill : boolean, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
"""
# dispatch to internal type takes
if is_categorical(arr):
return arr.take_nd(indexer, fill_value=fill_value,
allow_fill=allow_fill)
elif is_datetimetz(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
indexer = _ensure_int64(indexer)
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = _maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
mask_info = mask, needs_masking
if needs_masking:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
flip_order = False
if arr.ndim == 2:
if arr.flags.f_contiguous:
flip_order = True
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
if out is not None:
out = out.T
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = list(arr.shape)
out_shape[axis] = len(indexer)
out_shape = tuple(out_shape)
if arr.flags.f_contiguous and axis == arr.ndim - 1:
# minor tweak that can make an order-of-magnitude difference
# for dataframes initialized directly from 2-d ndarrays
# (s.t. df.values is c-contiguous and df._data.blocks[0] is its
# f-contiguous transpose)
out = np.empty(out_shape, dtype=dtype, order='F')
else:
out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype, axis=axis,
mask_info=mask_info)
indexer = _ensure_int64(indexer)
func(arr, indexer, out, fill_value)
if flip_order:
out = out.T
return out
take_1d = take_nd
def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None,
allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
"""
if indexer is None or (indexer[0] is None and indexer[1] is None):
row_idx = np.arange(arr.shape[0], dtype=np.int64)
col_idx = np.arange(arr.shape[1], dtype=np.int64)
indexer = row_idx, col_idx
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
row_idx, col_idx = indexer
if row_idx is None:
row_idx = np.arange(arr.shape[0], dtype=np.int64)
else:
row_idx = _ensure_int64(row_idx)
if col_idx is None:
col_idx = np.arange(arr.shape[1], dtype=np.int64)
else:
col_idx = _ensure_int64(col_idx)
indexer = row_idx, col_idx
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = _maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if row_needs or col_needs:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is None:
def func(arr, indexer, out, fill_value=np.nan):
_take_2d_multi_generic(arr, indexer, out, fill_value=fill_value,
mask_info=mask_info)
func(arr, indexer, out=out, fill_value=fill_value)
return out
# ---- #
# diff #
# ---- #
_diff_special = {
'float64': algos.diff_2d_float64,
'float32': algos.diff_2d_float32,
'int64': algos.diff_2d_int64,
'int32': algos.diff_2d_int32,
'int16': algos.diff_2d_int16,
'int8': algos.diff_2d_int8,
}
def diff(arr, n, axis=0):
""" difference of n between self,
analagoust to s-s.shift(n) """
n = int(n)
na = np.nan
dtype = arr.dtype
is_timedelta = False
if needs_i8_conversion(arr):
dtype = np.float64
arr = arr.view('i8')
na = tslib.iNaT
is_timedelta = True
elif issubclass(dtype.type, np.integer):
dtype = np.float64
elif issubclass(dtype.type, np.bool_):
dtype = np.object_
dtype = np.dtype(dtype)
out_arr = np.empty(arr.shape, dtype=dtype)
na_indexer = [slice(None)] * arr.ndim
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
out_arr[tuple(na_indexer)] = na
if arr.ndim == 2 and arr.dtype.name in _diff_special:
f = _diff_special[arr.dtype.name]
f(arr, out_arr, n, axis)
else:
res_indexer = [slice(None)] * arr.ndim
res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
res_indexer = tuple(res_indexer)
lag_indexer = [slice(None)] * arr.ndim
lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
lag_indexer = tuple(lag_indexer)
# need to make sure that we account for na for datelike/timedelta
# we don't actually want to subtract these i8 numbers
if is_timedelta:
res = arr[res_indexer]
lag = arr[lag_indexer]
mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)
if mask.any():
res = res.copy()
res[mask] = 0
lag = lag.copy()
lag[mask] = 0
result = res - lag
result[mask] = na
out_arr[res_indexer] = result
else:
out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
if is_timedelta:
from pandas import TimedeltaIndex
out_arr = TimedeltaIndex(out_arr.ravel().astype('int64')).asi8.reshape(
out_arr.shape).astype('timedelta64[ns]')
return out_arr
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.