repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
botswana-harvard/bcpp-export | bcpp_export/old_export/working/ltc_qa.py | 1 | 1448 | import pandas as pd
from tabulate import tabulate
from bcpp_export.dataframes.ltc import Ltc
"""
df.query('final_hiv_status == 1 and final_arv_status == 1 and prev_result_known != 1')
"""
ltc = Ltc()
df = ltc.df_subjects
df_kadima = pd.read_csv('/Users/erikvw/Documents/bcpp/kadima_LTC_table_2-ew.csv')
df_kadima.rename(
columns={'pims_reg_date': 'ek_pims_reg_date', 'pims_initiation_date': 'ek_pims_initiation_date'},
inplace=True)
df_kadima['ek_in_pims'] = df_kadima.apply(
lambda row: 0 if row['ek_pims_reg_date'] == 'Not in PIMS at the site' else 1, axis=1)
df1 = pd.merge(
df.query('intervention == 1'), df_kadima[
[u'ek_pims_reg_date', u'ek_pims_initiation_date', u'pims_identifier',
u'ek_in_pims', 'subject_identifier']], how='left', on='subject_identifier')
df2 = pd.DataFrame(
{'all': df1.query('final_hiv_status == 1').groupby(df1['community']).size()}).reset_index()
df2 = pd.merge(
df2, pd.DataFrame(
{'rdb': df1.query('final_hiv_status == 1')[pd.notnull(df1['pims_reg_date'])].groupby(
df1['community']).size()}).reset_index(), how='left', on='community')
df2 = pd.merge(df2, pd.DataFrame(
{'ek': df1.query('final_hiv_status == 1 and ek_in_pims == 1')[pd.notnull(df1['ek_pims_reg_date'])].groupby(
df1['community']).size()}).reset_index(), how='left', on='community')
print(tabulate(df2, headers=['community', 'bhs', 'rdb', 'ettiene'], tablefmt='psql'))
| gpl-2.0 |
dyoung418/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_test.py | 41 | 9763 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments, self.scores = self.make_random_points(
self.true_centers, self.num_points)
self.true_score = np.add.reduce(self.scores)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
means = [
np.mean(
points[assignments == center], axis=0)
for center in xrange(num_centers)
]
covs = [
np.cov(points[assignments == center].T)
for center in xrange(num_centers)
]
scores = []
for r in xrange(num_points):
scores.append(
np.sqrt(
np.dot(
np.dot(points[r, :] - means[assignments[r]],
np.linalg.inv(covs[assignments[r]])), points[r, :] -
means[assignments[r]])))
return (points, assignments, scores)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertGreater(score1, score2)
self.assertNear(self.true_score, score2, self.true_score * 0.15)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments, true_offsets = (
self.make_random_points(clusters, num_points))
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
# Test score
score = gmm.score(input_fn=self.input_fn(points=points,
batch_size=num_points), steps=1)
self.assertNear(score, np.sum(true_offsets), 4.05)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Geneva_Rot_cont/Geneva_Rot_cont_age2/Optical2.py | 33 | 7437 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
line = [56, #AR 4 4740
58, #4861
59, #O III 4959
60, #O 3 5007
61, #N 1 5200
63, #O 1 5577
64, #N 2 5755
65, #HE 1 5876
66, #O 1 6300
67, #S 3 6312
68, #O 1 6363
69, #H 1 6563
70, #N 2 6584
71, #S II 6716
72, #S 2 6720
73] #S II 6731
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Optical Lines Continued", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Optical_lines_cntd.pdf')
plt.clf()
| gpl-2.0 |
Eric89GXL/numpy | numpy/linalg/linalg.py | 1 | 82422 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan
)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import lapack_lite, _umath_linalg
# For Python2/3 compatibility
_N = b'N'
_V = b'V'
_A = b'A'
_S = b'S'
_L = b'L'
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
def _determine_error_states():
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
return [bufsize, invalid_call_errmask, None]
# Dealing with errors in _umath_linalg
_linalg_error_extobj = _determine_error_states()
del _determine_error_states
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def _raise_linalgerror_lstsq(err, flag):
raise LinAlgError("SVD did not converge in Linear Least Squares")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj) # make a copy
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assertRankAtLeast2(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
m, n = a.shape[-2:]
if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _isEmpty2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def _assertNoEmpty2d(*arrays):
for a in arrays:
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
def transpose(a):
"""
Transpose each matrix in a stack of matrices.
Unlike np.transpose, this only swaps the last two axes, rather than all of
them
Parameters
----------
a : (...,M,N) array_like
Returns
-------
aT : (...,N,M) ndarray
"""
return swapaxes(a, -1, -2)
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
def matrix_power(a, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be "powered."
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
a**n : (..., M, M) ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
For matrices that are not square or that (for negative powers) cannot
be inverted numerically.
Examples
--------
>>> from numpy.linalg import matrix_power
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
a = asanyarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
try:
n = operator.index(n)
except TypeError:
raise TypeError("exponent must be an integer")
if n == 0:
a = empty_like(a)
a[...] = eye(a.shape[-2], dtype=a.dtype)
return a
elif n < 0:
a = inv(a)
n = abs(n)
# short-cuts.
if n == 1:
return a
elif n == 2:
return matmul(a, a)
elif n == 3:
return matmul(matmul(a, a), a)
# Use binary decomposition to reduce the number of matrix multiplications.
# Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
# increasing powers of 2, and multiply into the result as needed.
z = result = None
while n > 0:
z = a if z is None else matmul(z, z)
n, bit = divmod(n, 2)
if bit:
result = z if result is None else matmul(result, z)
return result
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
* 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
* 'complete' : returns q, r with dimensions (M, M), (M, N)
* 'r' : returns r only with dimensions (K, N)
* 'raw' : returns h, tau with dimensions (N, M), (K,)
* 'full' : alias of 'reduced', deprecated
* 'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced', and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
https://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.44721360-0.j , -0.89442719+0.j ],
[ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])
array([[ 0.89442719+0.j , 0.00000000-0.4472136j],
[ 0.00000000-0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=True, compute_uv=True):
"""
Singular Value Decomposition.
When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh
= (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D
array of `a`'s singular values. When `a` is higher-dimensional, SVD is
applied in stacked mode as explained below.
Parameters
----------
a : (..., M, N) array_like
A real or complex array with ``a.ndim >= 2``.
full_matrices : bool, optional
If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
``(..., N, N)``, respectively. Otherwise, the shapes are
``(..., M, K)`` and ``(..., K, N)``, respectively, where
``K = min(M, N)``.
compute_uv : bool, optional
Whether or not to compute `u` and `vh` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
s : (..., K) array
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
vh : { (..., N, N), (..., K, N) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionchanged:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine ``_gesdd``.
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
:math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
contains the singular values of `a` and `u` and `vh` are unitary. The rows
of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
the eigenvectors of :math:`A A^H`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
If `a` has more than two dimensions, then broadcasting rules apply, as
explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
working in "stacked" mode: it iterates over all indices of the first
``a.ndim - 2`` dimensions and for each combination SVD is applied to the
last two indices. The matrix `a` can be reconstructed from the
decomposition with either ``(u * s[..., None, :]) @ vh`` or
``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
function ``np.matmul`` for python versions below 3.5.)
If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
>>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
Reconstruction based on full SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((9, 9), (6,), (6, 6))
>>> np.allclose(a, np.dot(u[:, :6] * s, vh))
True
>>> smat = np.zeros((9, 6), dtype=complex)
>>> smat[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on reduced SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((9, 6), (6,), (6, 6))
>>> np.allclose(a, np.dot(u * s, vh))
True
>>> smat = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on full SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
True
Reconstruction based on reduced SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u, s[..., None] * vh))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m, n = a.shape[-2:]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vh = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vh = vh.astype(result_t, copy=False)
return wrap(u), s, wrap(vh)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None or p == 2 or p == -2:
s = svd(x, compute_uv=False)
with errstate(all='ignore'):
if p == -2:
r = s[..., -1] / s[..., 0]
else:
r = s[..., 0] / s[..., -1]
else:
# Call inv(x) ignoring errors. The result array will
# contain nans in the entries where inversion failed.
_assertRankAtLeast2(x)
_assertNdSquareness(x)
t, result_t = _commonType(x)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(all='ignore'):
invx = _umath_linalg.inv(x, signature=signature)
r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1))
r = r.astype(result_t, copy=False)
# Convert nans to infs unless the original array had nan entries
r = asarray(r)
nan_mask = isnan(r)
if nan_mask.any():
nan_mask &= ~isnan(x).any(axis=(-2, -1))
if r.ndim > 0:
r[nan_mask] = Inf
elif nan_mask:
r[()] = Inf
# Convention is to return scalars instead of 0d arrays
if r.ndim == 0:
r = r[()]
return r
def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of singular values of the array that are
greater than `tol`.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
tol : (...) array_like, float, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
.. versionchanged:: 1.14
Broadcasted against the stack of matrices
hermitian : bool, optional
If True, `M` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.14
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
https://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
if hermitian:
S = abs(eigvalsh(M))
else:
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
else:
tol = asarray(tol)[..., newaxis]
return count_nonzero(S > tol, axis=-1)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
a : (..., M, N) array_like
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) array_like of float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero. Broadcasts against the stack of matrices
Returns
-------
B : (..., N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
rcond = asarray(rcond)
if _isEmpty2d(a):
m, n = a.shape[-2:]
res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False)
# discard small singular values
cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
large = s > cutoff
s = divide(1, s, where=large, out=s)
s[~large] = 0
res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
sign = sign.astype(result_t, copy=False)
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond="warn"):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
.. versionchanged:: 1.14.0
If not set, a FutureWarning is given. The previous default
of ``-1`` will use the machine precision as `rcond` parameter,
the new default will use the machine precision times `max(M, N)`.
To silence the warning and use the new default, use ``rcond=None``,
to keep using the old behavior, use ``rcond=-1``.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(1,), (K,), (0,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
m, n = a.shape[-2:]
m2, n_rhs = b.shape[-2:]
if m != m2:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
real_t = _linalgRealType(t)
result_real_t = _realType(result_t)
# Determine default rcond value
if rcond == "warn":
# 2017-08-19, 1.14.0
warnings.warn("`rcond` parameter will change to the default of "
"machine precision times ``max(M, N)`` where M and N "
"are the input matrix dimensions.\n"
"To use the future default and silence this warning "
"we advise to pass `rcond=None`, to keep using the old, "
"explicitly pass `rcond=-1`.",
FutureWarning, stacklevel=2)
rcond = -1
if rcond is None:
rcond = finfo(t).eps * max(n, m)
if m <= n:
gufunc = _umath_linalg.lstsq_m
else:
gufunc = _umath_linalg.lstsq_n
signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)
x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
# remove the axis we added
if is_1d:
x = x.squeeze(axis=-1)
# we probably should squeeze resids too, but we can't
# without breaking compatibility.
# as documented
if rank != n or m <= n:
resids = array([], result_real_t)
# coerce output arrays
s = s.astype(result_real_t, copy=False)
resids = resids.astype(result_real_t, copy=False)
x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed
return wrap(x), wrap(resids), rank, s
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
y = moveaxis(x, (row_axis, col_axis), (-2, -1))
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
absx = abs(x)
absx **= ord
ret = add.reduce(absx, axis=axis, keepdims=keepdims)
ret **= (1 / ord)
return ret
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/pandas/io/clipboard.py | 14 | 2947 | """ io on the clipboard """
from pandas import compat, get_option, option_context, DataFrame
from pandas.compat import StringIO
def read_clipboard(**kwargs): # pragma: no cover
"""
Read text from clipboard and pass to read_table. See read_table for the
full argument list
If unspecified, `sep` defaults to '\s+'
Returns
-------
parsed : DataFrame
"""
from pandas.util.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
# try to decode (if needed on PY3)
# Strange. linux py33 doesn't complain, win py33 does
if compat.PY3:
try:
text = compat.bytes_to_str(
text, encoding=(kwargs.get('encoding') or
get_option('display.encoding'))
)
except:
pass
# Excel copies into clipboard with \t seperation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split('\n')[:-1][:10]
# Need to remove leading white space, since read_table
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = set([x.lstrip().count('\t') for x in lines])
if len(lines)>1 and len(counts) == 1 and counts.pop() != 0:
kwargs['sep'] = '\t'
if kwargs.get('sep') is None and kwargs.get('delim_whitespace') is None:
kwargs['sep'] = '\s+'
return read_table(StringIO(text), **kwargs)
def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows:
- OS X:
"""
from pandas.util.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = '\t'
buf = StringIO()
obj.to_csv(buf, sep=sep, **kwargs)
clipboard_set(buf.getvalue())
return
except:
pass
if isinstance(obj, DataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context('display.max_colwidth', 999999):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
| mit |
ypochien/TaiwanStockBSR | GetStockCode.py | 1 | 1465 | # -*- coding: utf-8 -*-
#http://www.twse.com.tw/ch/products/stock_code2.php
#http://isin.twse.com.tw/isin/C_public.jsp?strMode=2
#http://isin.twse.com.tw/isin/C_public.jsp?strMode=4
from types import *
from lxml.html import parse
import csv
import pandas as pd
from pandas import Series, DataFrame
tse_url = "http://isin.twse.com.tw/isin/C_public.jsp?strMode=2"
otc_url = "http://isin.twse.com.tw/isin/C_public.jsp?strMode=4"
def getCode(_url):
page = parse(_url)
rows = page.xpath("body/table")[1].findall("tr")
data = list()
content = []
for row in rows:
v = [c.text for c in row.getchildren()]
if len(v) > 4 and type(v[4]) is not NoneType: #沒有分類的代表非股票
code = v[0].encode('latin1').decode('cp950')
content.append('%s,%s'%(code[:11].strip(),code[11:]))
return content
def ToCsv(CSVData, filename):
with open(filename, 'wb') as csvfile:
content = '\r\n'.join(row for row in CSVData)
csvfile.write(content.encode('cp950'))
print("write %s ok."%(filename))
def format_for_mac(filename):
#additional process for csv to avoid displaying 'garbled text' on Mac
df = pd.read_csv(filename, encoding='cp950')
print df.shape
df.columns = ['id', 'name']
df.to_csv(filename, encoding='utf-8', index=False)
if __name__ == '__main__':
TSE_Code = getCode(tse_url)
ToCsv(TSE_Code,"TSECode.csv")
OTC_Code = getCode(otc_url)
ToCsv(OTC_Code, "OTCCode.csv")
format_for_mac("TSECode.csv")
format_for_mac("OTCCode.csv")
| mit |
bundgus/python-playground | matplotlib-playground/examples/axes_grid/demo_edge_colorbar.py | 1 | 2605 | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
def get_demo_image():
import numpy as np
from matplotlib.cbook import get_sample_data
f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False)
z = np.load(f)
# z is a numpy array of 15x15
return z, (-3, 4, -4, 3)
def demo_bottom_cbar(fig):
"""
A grid of 2x2 images with a colorbar for each column.
"""
grid = AxesGrid(fig, 121, # similar to subplot(132)
nrows_ncols=(2, 2),
axes_pad=0.10,
share_all=True,
label_mode="1",
cbar_location="bottom",
cbar_mode="edge",
cbar_pad=0.25,
cbar_size="15%",
direction="column"
)
Z, extent = get_demo_image()
cmaps = [plt.get_cmap("autumn"), plt.get_cmap("summer")]
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest",
cmap=cmaps[i//2])
if i % 2:
cbar = grid.cbar_axes[i//2].colorbar(im)
for cax in grid.cbar_axes:
cax.toggle_label(True)
cax.axis[cax.orientation].set_label("Bar")
# This affects all axes as share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
def demo_right_cbar(fig):
"""
A grid of 2x2 images. Each row has its own colorbar.
"""
grid = AxesGrid(F, 122, # similar to subplot(122)
nrows_ncols=(2, 2),
axes_pad=0.10,
label_mode="1",
share_all=True,
cbar_location="right",
cbar_mode="edge",
cbar_size="7%",
cbar_pad="2%",
)
Z, extent = get_demo_image()
cmaps = [plt.get_cmap("spring"), plt.get_cmap("winter")]
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest",
cmap=cmaps[i//2])
if i % 2:
grid.cbar_axes[i//2].colorbar(im)
for cax in grid.cbar_axes:
cax.toggle_label(True)
cax.axis[cax.orientation].set_label('Foo')
# This affects all axes because we set share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
if 1:
F = plt.figure(1, (5.5, 2.5))
F.subplots_adjust(left=0.05, right=0.93)
demo_bottom_cbar(F)
demo_right_cbar(F)
plt.draw()
plt.show()
| mit |
broadinstitute/cms | cms/cms_modeller.py | 1 | 21590 | #!/usr/bin/env python
## top-level script for demographic modeling as part of CMS 2.0.
## last updated: 02.26.18 [email protected]
import matplotlib as mp
mp.use('agg')
from model.bootstrap_func import flattenList, checkFileExists, readFreqsFile, readLDFile, readFstFile, estimateFstByBootstrap, estimateFstByBootstrap_bysnp, estimateFreqSpectrum, estimatePi, estimater2decay, estimatedprimedecay
from model.params_func import get_ranges, generate_params
from model.error_func import calc_error, read_error_dimensionsfile
from model.search_func import read_dimensionsfile, sample_point, get_real_value, get_scaled_value
from model.plot_func import plot_comparison
from scipy import optimize
import numpy as np
import subprocess
import argparse
import random
import sys
#############################
## DEFINE ARGUMENT PARSER ###
#############################
def full_parser_cms_modeller():
parser=argparse.ArgumentParser(description="This script contains command-line utilities for exploratory fitting of demographic models to population genetic data.")
subparsers = parser.add_subparsers(help="sub-commands")
######################
## CALCULATE TARGET ##
######################
target_stats_parser = subparsers.add_parser('target_stats', help='Perform per-site(/per-site-pair) calculations of population summary statistics for model target values.')
target_stats_parser.add_argument('inputTpeds', action='store', type=list, help='comma-delimited list of unzipped input tped files (only one file per pop being modelled; must run chroms separately or concatenate)')
target_stats_parser.add_argument('recomFile', action='store', type=str, help='file defining recombination map for input')
target_stats_parser.add_argument('regions', action='store', type=str, help='tab-separated file with putative neutral regions') #OPTIONAL?
target_stats_parser.add_argument('--freqs', action='store_true', help='calculate summary statistics from within-population allele frequencies')
target_stats_parser.add_argument('--ld', action='store_true', help='calculate summary statistics from within-population linkage disequilibrium')
target_stats_parser.add_argument('--fst', action='store_true', help='calculate summary statistics from population comparison using allele frequencies')
target_stats_parser.add_argument('out', action='store', type=str, help='outfile prefix')
target_stats_parser.add_argument('--modelpath', action='store', type=str, default='cms/model/', help="path to model directory containing executables")
bootstrap_parser = subparsers.add_parser('bootstrap', help='Perform bootstrap estimates of population summary statistics from per-site(/per-site-pair) calculations in order to finalize model target values.')
bootstrap_parser.add_argument('nBootstrapReps', action='store', type=int, help='number of bootstraps to perform in order to estimate standard error of the dataset (should converge for reasonably small n)')
bootstrap_parser.add_argument('--in_freqs', action='store', help='comma-delimited list of infiles with per-site calculations for population. One file per population -- for bootstrap estimates of genome-wide values, should first concatenate per-chrom files')
bootstrap_parser.add_argument('--nFreqHistBins', action='store',type=int, default=6, help="number of bins for site frequency spectrum and p(der|freq)")
bootstrap_parser.add_argument('--in_ld', action='store', help='comma-delimited list of infiles with per-site-pair calculations for population. One file per population -- for bootstrap estimates of genome-wide values, should first concatenate per-chrom files')
bootstrap_parser.add_argument('--mafcutoffdprime', action='store', type=float, default=.2, help="for D' calculations, only use sites with MAF > mafcutoffdprime")
bootstrap_parser.add_argument('--nphysdisthist', action='store', type=int, default=14, help="nbins for r2 LD calculations")
bootstrap_parser.add_argument('--in_fst', action='store', help='comma-delimited list of infiles with per-site calculations for population pair. One file per population-pair -- for bootstrap estimates of genome-wide values, should first concatenate per-chrom files')
bootstrap_parser.add_argument('--ngendisthist', action='store', type=int, default=17, help="nbins for D' LD calculations")
bootstrap_parser.add_argument('out', action='store', type=str, help='outfile prefix')
##########################
### COSI - SHARED ARGS ##
##########################
point_parser = subparsers.add_parser('point', help='Run simulates of a point in parameter-space.')
grid_parser = subparsers.add_parser('grid', help='Perform grid search: for specified parameters and intervals, define points in parameter-space to sample and compare.')
optimize_parser = subparsers.add_parser('optimize', help='Perform optimization algorithm (scipy.optimize) to fit model parameters robustly.')
for cosi_parser in [point_parser, grid_parser, optimize_parser]:
cosi_parser.add_argument('inputParamFile', type=str, action='store', help='file with model specifications for input')
cosi_parser.add_argument('nCoalescentReps', type=int, help='number of coalescent replicates to run per point in parameter-space')
cosi_parser.add_argument('outputDir', type=str, action='store', help='location in which to write cosi output')
cosi_parser.add_argument('--cosiBuild', action='store', default="coalescent", help='which version of cosi to run?')
cosi_parser.add_argument('--dropSings', action='store', type=float, help='randomly thin global singletons from output dataset (i.e., to model ascertainment bias)')
cosi_parser.add_argument('--genmapRandomRegions', action='store_true', help='cosi option to sub-sample genetic map randomly from input')
cosi_parser.add_argument('--stopAfterMinutes', action='store', help='cosi option to terminate simulations')
cosi_parser.add_argument('--calcError', type=str, action='store', help='file specifying dimensions of error function to use. if unspecified, defaults to all. first line = stats, second line = pops')
######################
## VISUALIZE MODEL ##
######################
point_parser.add_argument('--targetvalsFile', action='store', type=str, help='file containing target values for model')
point_parser.add_argument('--plotStats', action='store_true', default=False, help='visualize goodness-of-fit to model targets')
#########################
## FIT MODEL TO TARGET ##
#########################
grid_parser.add_argument('grid_inputdimensionsfile', type=str, action='store', help='file with specifications of grid search. each parameter to vary is indicated: KEY\tINDEX\t[VALUES]') #must be defined for each search
#grid_parser.add_argument('--parallel', type=str, action='store', default="uger", help='if specified, launch points of grid search as tasks on a scheduler')
optimize_parser.add_argument('optimize_inputdimensionsfile', type=str, action='store', help='file with specifications of optimization. each parameter to vary is indicated: KEY\tINDEX')
optimize_parser.add_argument('--stepSize', action='store', type=float, help='scaled step size (i.e. whole range = 1)')
optimize_parser.add_argument('--method', action='store', type=str, default='SLSQP', help='algorithm to pass to scipy.optimize')
for common_parser in [target_stats_parser, point_parser]:#[bootstrap_parser, grid_parser, optimize_parser]:
common_parser.add_argument('--printOnly', action='store_true', help='print rather than execute pipeline commands')
return parser
#############################
## WRAPPER FOR OPTIMIZE ###
#############################
def sample_point_wrapper(values):
'''function passed to scipy.optimize.'''
return sample_point(nreps, gradientname, keys, indices, values)
############################
## DEFINE EXEC FUNCTIONS ###
############################
def execute_target_stats(args):
'''calls bootstrap_*_popstats_regions to get per-snp/per-snp-pair values'''
#pathcmd = "export PATH=" + args.modelpath + ":$PATH"
#subprocess.check_call(pathcmd.split())
modelpath = args.modelpath
if modelpath[-1] != "/":
modelpath += "/"
inputtpedstring = ''.join(args.inputTpeds)
inputtpeds = inputtpedstring.split(',')
npops = len(inputtpeds)
print("calculating summary statistics for " + str(npops) + " populations...")
allCmds = []
for ipop in range(npops):
inputtped = inputtpeds[ipop]
if args.freqs:
freqCmd = [modelpath + 'bootstrap_freq_popstats_regions', inputtped, args.recomFile, args.regions, args.out + "_freqs_" + str(ipop)]
allCmds.append(freqCmd)
if args.ld:
ldCmd = [modelpath + 'bootstrap_ld_popstats_regions', inputtped, args.recomFile, args.regions, args.out + "_ld_" + str(ipop)]
allCmds.append(ldCmd)
if args.fst:
for jpop in range(ipop+1, npops):
inputtped2 = inputtpeds[jpop]
fstCmd = [modelpath + 'bootstrap_fst_popstats_regions', inputtped, inputtped2, args.recomFile, args.regions, args.out + "_fst_" + str(ipop) + "_" + str(jpop)]
allCmds.append(fstCmd)
for command in allCmds:
command = [str(x) for x in command]
if args.printOnly:
commandstring = ""
for item in command:
commandstring += item + " "
print(commandstring)
else:
subprocess.check_call( command )
return
def execute_bootstrap(args):
'''pulls all per-snp/per-snp-pair values to get genome-wide bootstrap estimates.'''
nbootstraprep = args.nBootstrapReps
print("running " + str(nbootstraprep) + " bootstrap estimates of summary statistics...")
targetstats_filename = args.out + "_bootstrap_n" + str(nbootstraprep) + ".txt"
writefile = open(targetstats_filename, 'w')
#################
### FREQ STATS ##
#################
if args.in_freqs is not None:
nhist = args.nFreqHistBins
inputestimatefilenames = ''.join(args.in_freqs)
inputfilenames = inputestimatefilenames.split(',')
npops = len(inputfilenames)
for ipop in range(npops):
allRegionDER, allRegionANC, allRegionPI, allseqlens = [], [], [], []
nsnps, totalregions, totallen = 0, 0, 0
inputfilename = inputfilenames[ipop]
print("reading allele frequency statistics from: " + inputfilename)
writefile.write(str(ipop) + '\n')
if checkFileExists(inputfilename):
allpi, allnderiv, allnanc, nregions, seqlens = readFreqsFile(inputfilename)
allRegionPI.extend(allpi)
allRegionDER.extend(allnderiv)
allRegionANC.extend(allnanc)
allseqlens.extend(seqlens)
totalregions += nregions
totallen += sum(seqlens)
for i in range(len(allpi)):
nsnps += len(allpi[i])
print("TOTAL: logged frequency values for " + str(nsnps) + " SNPS across " + str(totalregions) + ".")
####################################
#### PI: MEAN & BOOTSTRAP STDERR ###
####################################
pi_mean = estimatePi(allRegionPI, allseqlens)
writefile.write(str(pi_mean)+'\t')
estimates = []
for j in range(nbootstraprep):
rep_pis, rep_seqlens = [], []
for k in range(totalregions):
index = random.randint(0, totalregions-1)
rep_pis.append(allRegionPI[index])
rep_seqlens.append(allseqlens[index])
rep_pi_mean = estimatePi(rep_pis, rep_seqlens)
estimates.append(rep_pi_mean)
pi_se = np.std(estimates)
writefile.write(str(pi_se) + '\n')
#########################################
### SFS, ANC: MEAN ACROSS ALL REGIONS ###
#########################################
mafhist, anchist = estimateFreqSpectrum(allRegionDER, allRegionANC, nhist)
npoly = sum(mafhist)
sfs_mean = [float(x)/npoly for x in mafhist]
anc_mean = [anchist[i]/float(mafhist[i]) for i in range(len(mafhist))]
###################################################
### SFS, ANC: STDERR ACROSS BOOTSTRAP ESTIMATES ###
###################################################
estimates_sfs, estimates_anc = [[] for i in range(nhist)], [[] for i in range(nhist)]
for j in range(nbootstraprep):
rep_all_nderiv, rep_all_nanc = [], []
flatanc = flattenList(allRegionANC)
flatder = flattenList(allRegionDER)
for w in range(nsnps):
index = random.randint(0, nsnps-1)
rep_all_nderiv.append(flatder[index])
rep_all_nanc.append(flatanc[index])
repmafhist, repanchist = estimateFreqSpectrum(rep_all_nderiv, rep_all_nanc, nhist)
npoly = sum(repmafhist)
repsfs = [float(x)/npoly for x in repmafhist]
for ibin in range(nhist):
estimates_sfs[ibin].append(repsfs[ibin])
repanc = [repanchist[i]/float(repmafhist[i]) for i in range(nhist)]
for ibin in range(nhist):
estimates_anc[ibin].append(repanc[ibin])
sfs_se = [np.std(x) for x in estimates_sfs]
anc_se = [np.std(x) for x in estimates_anc]
writefile.write(str(sfs_mean) + '\n')
writefile.write(str(sfs_se) + '\n')
writefile.write(str(anc_mean) + '\n')
writefile.write(str(anc_se) + '\n')
#########
### LD ##
#########
if args.in_ld is not None:
nphysdisthist = args.nphysdisthist
ngendisthist = args.ngendisthist
inputestimatefilenames = ''.join(args.in_ld)
inputfilenames = inputestimatefilenames.split(',')
npops = len(inputfilenames)
#print('npops ' + str(npops)) #debug
for ipop in range(npops):
inputfilename = inputfilenames[ipop]
print("reading linkage disequilibrium statistics from: " + inputfilename)
writefile.write(str(ipop) + '\n')
N_r2regs, N_dprimeregs = 0, 0
N_r2snps, N_dprimesnps = 0, 0
allRegionDists, allRegionr2, allRegionGendists, allRegionDprime, nr2regions, ndprimeregions = readLDFile(inputfilename, dprimecutoff = args.mafcutoffdprime)
N_r2regs += nr2regions
N_r2snps += sum([len(x) for x in allRegionr2])
N_dprimeregs += ndprimeregions
N_dprimesnps += sum([len(x) for x in allRegionDprime])
print("\tlogged r2 values for " + str(N_r2snps) + " SNP pairs across " + str(N_r2regs) + " regions.")
print("\tlogged D' values for " + str(N_dprimesnps) + " SNP pairs across " + str(N_dprimeregs) + " regions.")
###################################
### r2: MEAN ACROSS ALL REGIONS ###
###################################
r2sums, physDistHist = estimater2decay(allRegionr2, allRegionDists, nphysdisthist)
r2dist = [r2sums[u]/physDistHist[u] for u in range(len(r2sums))]
writefile.write(str(r2dist) + "\n")
############################################
### r2: STDERR ACROSS BOOTSTRAP ESTIMATES ##
############################################
estimates_r2 = [[] for i in range(nphysdisthist)]
while len(estimates_r2[0]) < nbootstraprep:
rep_all_r2, rep_all_physdist = [], []
flatr2 = flattenList(allRegionr2)
flatregions = flattenList(allRegionDists)
nsnppairs = len(flatr2)
for w in range(nsnppairs):
index_r2 = random.randint(0, nsnppairs-1)
rep_all_r2.append(flatr2[index_r2])
rep_all_physdist.append(flatregions[index_r2])
#add pseudocount for empty bins
repr2sum, repphysdisthist = estimater2decay(rep_all_r2, rep_all_physdist, nphysdisthist)
for ibin in range(len(repphysdisthist)):
if repphysdisthist[ibin] == 0:
repphysdisthist[ibin] = 1
r2estimate =[repr2sum[u]/repphysdisthist[u] for u in range(len(repr2sum))]
for ibin in range(nphysdisthist):
estimates_r2[ibin].append(r2estimate[ibin])
r2_se = [np.std(x) for x in estimates_r2]
writefile.write(str(r2_se) + "\n")
####################################
### D': MEAN ACROSS ALL REGIONS ###
####################################
compLDhist, genDistHist = estimatedprimedecay(allRegionDprime, allRegionGendists, ngendisthist)
#add pseudocounts
for ibin in range(len(genDistHist)):
if genDistHist[ibin] == 0:
genDistHist[ibin]+=1
dprimedist = [float(compLDhist[x])/float(genDistHist[x]) for x in range(len(compLDhist))]
writefile.write(str(dprimedist) + "\n")
############################################
### D': STDERR ACROSS BOOTSTRAP ESTIMATES ##
############################################
estimates_dprime = [[] for i in range(ngendisthist)]
while len(estimates_dprime[0]) < nbootstraprep:
rep_all_dprime, rep_all_gendist = [], []
flatdprime = flattenList(allRegionDprime)
flatgendist = flattenList(allRegionGendists)
nsnppairs = len(flatdprime)
for w in range(nsnppairs):
index_dprime = random.randint(0, nsnppairs-1)
rep_all_dprime.append(flatdprime[index_dprime])
rep_all_gendist.append(flatgendist[index_dprime])
repcompLDhist, repgenDistHist = estimatedprimedecay(rep_all_dprime, rep_all_gendist, ngendisthist)
for ibin in range(len(repgenDistHist)):
if repgenDistHist[ibin] == 0:
repgenDistHist[ibin] = 1
dprimeestimate = [float(repcompLDhist[x])/float(repgenDistHist[x]) for x in range(ngendisthist)]
for ibin in range(ngendisthist):
estimates_dprime[ibin].append(dprimeestimate[ibin])
dprime_se = [np.std(x) for x in estimates_dprime]
writefile.write(str(dprime_se) + "\n")
##########
### FST ##
##########
if args.in_fst is not None:
inputestimatefilenames = ''.join(args.in_fst)
inputfilenames = inputestimatefilenames.split(',')
npopcomp = len(inputfilenames)
for icomp in range(npopcomp):
fstfilename = inputfilenames[icomp]
print("reading Fst values from: " + fstfilename)
if checkFileExists(fstfilename):
allfst, nregions = readFstFile(fstfilename)
else:
print('missing ' + fstfilename)
target_mean, target_se = estimateFstByBootstrap_bysnp(allfst, nrep = nbootstraprep)
writeline = str(icomp) + "\t" + str(target_mean) + "\t" + str(target_se) + '\n'
writefile.write(writeline)
print("TOTAL: logged Fst values for " + str(len(allfst)) + " SNPs.\n")
writefile.close()
print("wrote to file: " + targetstats_filename)
return
def execute_point(args):
'''runs simulates of a point in parameter-space, comparing to specified target'''
################
## FILE PREP ###
################
print("generating " + str(args.nCoalescentReps) + " simulations from model: " + args.inputParamFile)
statfilename = args.outputDir
if args.outputDir[-1] != "/":
statfilename += "/"
statfilename += "n" + str(args.nCoalescentReps) + "stats.txt"
###############
## RUN SIMS ###
###############
runStatsCommand = args.cosiBuild + " -p " + args.inputParamFile + " -n " + str(args.nCoalescentReps)
if args.dropSings is not None:
runStatsCommand += " --drop-singletons " + str(args.dropSings)
if args.genmapRandomRegions:
runStatsCommand += " --genmapRandomRegions"
if args.stopAfterMinutes is not None:
runStatsCommand += " --stop-after-minutes " + str(args.stopAfterMinutes)
runStatsCommand += " --custom-stats > " + statfilename
if args.printOnly:
print(runStatsCommand)
else:
subprocess.check_call( runStatsCommand.split() )
#################
## CALC ERROR ###
#################
if args.calcError is not None:
if args.calcError == '': #no error dimension file given
error = calc_error(statfilename)
else:
stats, pops = read_error_dimensionsfile(args.calcError)
error = calc_error(statfilename, stats, pops)
print(" error: " + str(error)) #record?
################
## VISUALIZE ###
################
if args.plotStats:
plot_comparison(statfilename, args.nCoalescentReps)
return
def execute_grid(args):
'''run points in parameter-space according to specified grid'''
print("loading dimensions of grid to search from: " + args.grid_inputdimensionsfile)
gridname, keys, indices, values = read_dimensionsfile(args.grid_inputdimensionsfile, 'grid')
assert len(keys) == len(indices)
combos = [' '.join(str(y) for y in x) for x in product(*values)]
errors = []
for combo in combos:
argstring = combo + "\n"
theseValues = eval(combo) #list of values
error = sample_point(args.nCoalescentReps, keys, indices, theseValues)
errors.append(error)
for icombo in range(len(combos)):
print(combo[icombo] + "\t" + errors[icombo] + "\n")
return
def execute_optimize(args):
'''run scipy.optimize module according to specified parameters'''
print("loading dimensions to search from: " + args.optimize_inputdimensionsfile)
runname, keys, indices = read_dimensionsfile(args.optimize_inputdimensionsfile, runType='optimize')
rangeDict = get_ranges()
paramDict = generate_params()
x0 = []
bounds = []
for i in range(len(keys)):
key = keys[i]
index = indices[i]
value = paramDict[key][index]
interval = rangeDict[key][index]
low, high = float(interval[0]), float(interval[1])
scaled = get_scaled_value(value, low, high)
x0.append(scaled)
bounds.append([0,1])
x0 = np.array(x0)
stepdict = {'eps':float(args.stepSize)}
result = optimize.minimize(sample_point_wrapper, x0, method=args.method, bounds=bounds, options=stepdict)
print(result)
print( "******************")
#translate back to changes to model
bestparams = []
assert len(keys) == len(result.x)
for i in range(len(keys)):
key = keys[i]
index = indices[i]
interval = rangeDict[key][index]
low, high = float(interval[0]), float(interval[1])
realVal = get_real_value(result.x[i], low, high)
bestparams.append(result.x[i])
print("best " + str(key) + "|" + str(index) + "|" + str(realVal))
return
##########
## MAIN ##
##########
if __name__ == '__main__':
runparser = full_parser_cms_modeller()
args = runparser.parse_args()
# if called with no arguments, print help
if len(sys.argv)==1:
runparser.parse_args(['--help'])
elif len(sys.argv)==2:
runparser.parse_args([sys.argv[1], '--help'])
subcommand = sys.argv[1]
function_name = 'execute_' + subcommand + "(args)"
eval(function_name) #points to functions defined above, which wrap other programs in the pipeline
| bsd-2-clause |
antiface/mne-python | mne/viz/decoding.py | 13 | 8804 | """Functions to plot decoding results
"""
from __future__ import print_function
# Authors: Denis Engemann <[email protected]>
# Clement Moutard <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: Simplified BSD
import numpy as np
import warnings
def plot_gat_matrix(gat, title=None, vmin=None, vmax=None, tlim=None,
ax=None, cmap='RdBu_r', show=True, colorbar=True,
xlabel=True, ylabel=True):
"""Plotting function of GeneralizationAcrossTime object
Predict each classifier. If multiple classifiers are passed, average
prediction across all classifier to result in a single prediction per
classifier.
Parameters
----------
gat : instance of mne.decoding.GeneralizationAcrossTime
The gat object.
title : str | None
Figure title. Defaults to None.
vmin : float | None
Min color value for scores. If None, sets to min(gat.scores_).
Defaults to None.
vmax : float | None
Max color value for scores. If None, sets to max(gat.scores_).
Defaults to None.
tlim : array-like, (4,) | None
The temporal boundaries. If None, expands to
[tmin_train, tmax_train, tmin_test, tmax_test]. Defaults to None.
ax : object | None
Plot pointer. If None, generate new figure. Defaults to None.
cmap : str | cmap object
The color map to be used. Defaults to 'RdBu_r'.
show : bool
If True, the figure will be shown. Defaults to True.
colorbar : bool
If True, the colorbar of the figure is displayed. Defaults to True.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
if not hasattr(gat, 'scores_'):
raise RuntimeError('Please score your data before trying to plot '
'scores')
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(1, 1)
# Define time limits
if tlim is None:
tt_times = gat.train_times_['times']
tn_times = gat.test_times_['times']
tlim = [tn_times[0][0], tn_times[-1][-1], tt_times[0], tt_times[-1]]
# Plot scores
im = ax.imshow(gat.scores_, interpolation='nearest', origin='lower',
extent=tlim, vmin=vmin, vmax=vmax, cmap=cmap)
if xlabel is True:
ax.set_xlabel('Testing Time (s)')
if ylabel is True:
ax.set_ylabel('Training Time (s)')
if title is not None:
ax.set_title(title)
ax.axvline(0, color='k')
ax.axhline(0, color='k')
ax.set_xlim(tlim[:2])
ax.set_ylim(tlim[2:])
if colorbar is True:
plt.colorbar(im, ax=ax)
if show is True:
plt.show()
return fig if ax is None else ax.get_figure()
def plot_gat_times(gat, train_time='diagonal', title=None, xmin=None,
xmax=None, ymin=None, ymax=None, ax=None, show=True,
color=None, xlabel=True, ylabel=True, legend=True,
chance=True, label='Classif. score'):
"""Plotting function of GeneralizationAcrossTime object
Plot the scores of the classifier trained at 'train_time'.
Parameters
----------
gat : instance of mne.decoding.GeneralizationAcrossTime
The gat object.
train_time : 'diagonal' | float | list or array of float
Plot a 1d array of a portion of gat.scores_.
If set to 'diagonal', plots the gat.scores_ of classifiers
trained and tested at identical times
if set to float | list or array of float, plots scores of the
classifier(s) trained at (a) specific training time(s).
Default to 'diagonal'.
title : str | None
Figure title. Defaults to None.
xmin : float | None, optional
Min time value. Defaults to None.
xmax : float | None, optional
Max time value. Defaults to None.
ymin : float | None, optional
Min score value. If None, sets to min(scores). Defaults to None.
ymax : float | None, optional
Max score value. If None, sets to max(scores). Defaults to None.
ax : object | None
Plot pointer. If None, generate new figure. Defaults to None.
show : bool, optional
If True, the figure will be shown. Defaults to True.
color : str
Score line color. Defaults to 'steelblue'.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
legend : bool
If True, a legend is displayed. Defaults to True.
chance : bool | float.
Plot chance level. If True, chance level is estimated from the type
of scorer. Defaults to None.
label : str
Score label used in the legend. Defaults to 'Classif. score'.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
if not hasattr(gat, 'scores_'):
raise RuntimeError('Please score your data before trying to plot '
'scores')
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(1, 1)
# Find and plot chance level
if chance is not False:
if chance is True:
chance = _get_chance_level(gat.scorer_, gat.y_train_)
ax.axhline(float(chance), color='k', linestyle='--',
label="Chance level")
ax.axvline(0, color='k', label='')
if isinstance(train_time, (str, float)):
train_time = [train_time]
label = [label]
elif isinstance(train_time, (list, np.ndarray)):
label = train_time
else:
raise ValueError("train_time must be 'diagonal' | float | list or "
"array of float.")
if color is None or isinstance(color, str):
color = np.tile(color, len(train_time))
for _train_time, _color, _label in zip(train_time, color, label):
_plot_gat_time(gat, _train_time, ax, _color, _label)
if title is not None:
ax.set_title(title)
if ymin is not None and ymax is not None:
ax.set_ylim(ymin, ymax)
if xmin is not None and xmax is not None:
ax.set_xlim(xmin, xmax)
if xlabel is True:
ax.set_xlabel('Time (s)')
if ylabel is True:
ax.set_ylabel('Classif. score ({0})'.format(
'AUC' if 'roc' in repr(gat.scorer_) else r'%'))
if legend is True:
ax.legend(loc='best')
if show is True:
plt.show()
return fig if ax is None else ax.get_figure()
def _plot_gat_time(gat, train_time, ax, color, label):
"""Aux function of plot_gat_time
Plots a unique score 1d array"""
# Detect whether gat is a full matrix or just its diagonal
if np.all(np.unique([len(t) for t in gat.test_times_['times']]) == 1):
scores = gat.scores_
elif train_time == 'diagonal':
# Get scores from identical training and testing times even if GAT
# is not square.
scores = np.zeros(len(gat.scores_))
for train_idx, train_time in enumerate(gat.train_times_['times']):
for test_times in gat.test_times_['times']:
# find closest testing time from train_time
lag = test_times - train_time
test_idx = np.abs(lag).argmin()
# check that not more than 1 classifier away
if np.abs(lag[test_idx]) > gat.train_times_['step']:
score = np.nan
else:
score = gat.scores_[train_idx][test_idx]
scores[train_idx] = score
elif isinstance(train_time, float):
train_times = gat.train_times_['times']
idx = np.abs(train_times - train_time).argmin()
if train_times[idx] - train_time > gat.train_times_['step']:
raise ValueError("No classifier trained at %s " % train_time)
scores = gat.scores_[idx]
else:
raise ValueError("train_time must be 'diagonal' or a float.")
kwargs = dict()
if color is not None:
kwargs['color'] = color
ax.plot(gat.train_times_['times'], scores, label=str(label), **kwargs)
def _get_chance_level(scorer, y_train):
# XXX JRK This should probably be solved within sklearn?
if scorer.__name__ == 'accuracy_score':
chance = np.max([np.mean(y_train == c) for c in np.unique(y_train)])
elif scorer.__name__ == 'roc_auc_score':
chance = 0.5
else:
chance = np.nan
warnings.warn('Cannot find chance level from %s, specify chance'
' level' % scorer.func_name)
return chance
| bsd-3-clause |
shenzebang/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
terkkila/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 129 | 43401 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
BrainTech/openbci | obci/logic/experiment_builder/analysis.py | 1 | 8412 | import numpy as np
import os
import time
import sys
import random
import variables_pb2
import matplotlib.pyplot as plt
import math
from multiplexer.multiplexer_constants import peers, types
from multiplexer.clients import BaseMultiplexerServer, connect_client
from operator import setitem
#def distinct(l):
# d = {}
# map(setitem, (d,)*len(l), l, [])
# return d.keys()
def distinct(l):
return list(set(l))
"""
d - array of signal values (one channel)
freqs - array of frequencies which we want to see on spectrum
window - length of signal in seconds
sampling_rate
returns array of power of given frequencies
"""
def simple_analyse(d, freqs, window, sampling_rate):
d2 = abs(np.fft.fft(d))
d2 = [x*x for x in d2]
d3 = []
# if (len(freqs) > 0):
freqs = distinct(freqs)
for i in freqs:
d3.append(d2[int(i) * window])
print "d3 ",d3
return d3
def draw_spectrum(d, freqs, window, sampling_rate):
amplitudes = simple_analyse(d, freqs, window, sampling_rate)
t = np.arange(len(freqs))
plt.plot(t,amplitudes)
"""
d - signal values array, single channel
window - d contains window * sampling_rate values
sampling_rate -
numOfFreq - number of stimulation frequencies
returns array of differencies between standard deviations of given freqeuncies and average standard deviation of "other" frequencies
"""
def stat_analyse(d, numOfFreq, window, freqs, sampling_rate):
mul = window
d = d[-int(sampling_rate * mul):]
#d = d[-int(sampling_rate * (now - tt)):]
#d.extend((window - now + tt) * [0])
d2 = abs(np.fft.fft(d))
d2 = [x**2 for x in d2]
d2[0] = 0
d2[1] = 0
j = len(d2)
amplitudes = [0] * (numOfFreq * 3)
otherAmplitudes = numOfFreq * [0]
freqs_new = (numOfFreq * 3) * [0]
for i in range(numOfFreq):
freqs_new[i] = freqs[i]
freqs_new[i + numOfFreq] = 2 * freqs[i]
freqs_new[i + (2 * numOfFreq)] = 3 * freqs[i]
freqs = list(freqs_new)
otherFreqs = (numOfFreq * 3) * [0]
print "numOfFreq ", numOfFreq, "len(d2) ", len(d2), " int(freqs[i + (numOfFreq * 2)] * mul) ", int(freqs[i + (numOfFreq * 2)] * mul), "freqs ", freqs
for i in range(numOfFreq):
otherFreqs[i] = float(freqs[i]) + 0.5
print "len freqs: ", len(freqs), " numOfFreq ", numOfFreq, "len \
otherFreqs: ", len(otherFreqs)
for i in range(numOfFreq):
amplitudes[i] = d2[int(freqs[i] * mul)] - 0.5*(d2[int(freqs[i] * mul - 1)] + d2[int(freqs[i]* mul + 1)]) \
+ d2[int(freqs[i + numOfFreq] * mul)] - 0.5*(d2[int(freqs[i + numOfFreq] * mul - 1)] + d2[int(freqs[i + numOfFreq]* mul + 1)]) \
+ d2[int(freqs[i + (numOfFreq * 2)] * mul)] - 0.5*(d2[int(freqs[i + (numOfFreq * 2)] * mul) - 1] + d2[int(freqs[i + (numOfFreq * 2)]* mul + 1)])
otherAmplitudes[i] = d2[int(otherFreqs[i] * mul)] - 0.5*(d2[int(otherFreqs[i] * mul - 1)] + d2[int(otherFreqs[i] * mul + 1)])\
+ d2[int(otherFreqs[i + numOfFreq] * mul)] - 0.5*(d2[int(otherFreqs[i + numOfFreq] * mul - 1)] + d2[int(otherFreqs[i + numOfFreq]* mul + 1)])\
+ d2[int(otherFreqs[i + (numOfFreq * 2)] * mul)] - 0.5*(d2[int(otherFreqs[i + (numOfFreq * 2)] * mul - 1)] + d2[int(otherFreqs[i + (numOfFreq * 2)]* mul + 1)])
avgAmplitude = 0.0
avgOtherAmplitude = 0.0
for i in range(numOfFreq):
avgAmplitude += amplitudes[i]
avgOtherAmplitude += otherAmplitudes[i]
avgAmplitude /= float(numOfFreq)
avgOtherAmplitude /= float(numOfFreq)
stdAmplitude = 0.0
stdOtherAmplitude = 0.0
diffs = numOfFreq * [0.0]
otherDiffs = numOfFreq * [0.0]
avgs = numOfFreq * [0.0]
otherAvgs = numOfFreq * [0.0]
for i in range(numOfFreq):
avgs[i] = ((avgAmplitude * numOfFreq) - amplitudes[i]) / float(numOfFreq - 1)
otherAvgs[i] = ((avgOtherAmplitude * numOfFreq) - otherAmplitudes[i]) / float(numOfFreq - 1)
for i in range(numOfFreq):
diffs[i] = amplitudes[i] - avgs[i]
otherDiffs[i] = otherAmplitudes[i] - otherAvgs[i]
sumOtherDiffs = 0.0
for i in range(numOfFreq):
sumOtherDiffs += otherDiffs[i]
avgOtherDiffs = float(sumOtherDiffs/numOfFreq)
bestCandidate = 0
for i in range(numOfFreq):
if (diffs[bestCandidate] < diffs[i]):
bestCandidate = i
for i in range(numOfFreq):
stdAmplitude += (amplitudes[i]-avgs[i])**2
stdOtherAmplitude += (otherAmplitudes[i]-otherAvgs[i])**2
stdAmplitude = (stdAmplitude/float(numOfFreq-1))**0.5
stdOtherAmplitude = (stdOtherAmplitude/float(numOfFreq-1))**0.5
return [x - avgOtherDiffs for x in diffs]
"""
zwraca tablice odchylen dla kazdej czestosci, rysowane na wykresie maja byc odchylenia kazdej czestosci innym kolorem
"""
#def diffs(d, freqs, ):
"""
d -- array of samples
f -- array of frequencies
t -- array of timestamps of samples
tags -- array of timestamps of signal changes
n - number of seconds how long one frequency was presented
sr- sampling rate
min_freq
"""
def prepeare_signal_round_i(d, n, sr, min_freq, f):
avg = sr * n * [0]
amplitudes = len(f) * [0]
i = 0
f2 = list(f)
f2.sort()
poz = {}
for i in range(len(f2)):
poz[f2[i]] = i
while (len(d) > 0) and (i/2 < len(f)):
if (i%2 == 1):
avg = [avg[i] + d[i] for i in range(sr * n)]
#print "f[i] ", f[i], " min_freq ",min_freq, "f ", f
#f
#amplitudes[f[i] - min_freq] = simple_analyse(d, [f[i]], n, sr)[0]
else:
amplitudes[poz[f[i/2]]] = simple_analyse(d, [f[i/2]], n, sr)[0]
d = d[(sr * n) :]
i += 1
avg = [x/float(len(f)) for x in avg]
avg = simple_analyse(avg, f2, n, sr)
return avg, amplitudes
def draw_plots_round_i(d, n, sr, min_freq, f):
d1, d2 = prepeare_signal_round_i(d, n, sr, min_freq, f)
t1 = np.arange(len(d1))
t2 = np.arange(len(d2))
plt.plot(t1, d1, 'g', t2, d2, 'b')
plt.show()
def draw_plots_round_ii(d, numOfFreqs, window, freqs, sampling_rate):
diffs_per_freq = numOfFreqs * [numOfFreqs * [0]]
t = np.arange((sampling_rate * window))
f2 = list(freqs)
f2.sort()
poz = {}
for i in range(len(f2)):
poz[f2[i]] = i
i = 0
t = np.arange(len(freqs))
while (len(d) > 0)and(i<len(freqs)):
res = stat_analyse(d, numOfFreqs, window, freqs, sampling_rate)
diffs_per_freq[poz[freqs[i]]] = res
i += 1
d = d[(sampling_rate * window):]
plt.plot(t, res)
plt.show()
# return diffs_per_freq
"""
extracts from vector of data and vector of tags, vector of exactly the data we need and vector of frequencies used
"""
def get_data(d, tags, numOfFreqs, n, sr):
#data = variables_pb2.SampleVector()
i = 0
start = 0
f = []
for x in tags.tags:
if (x.name == "experiment_update"):
if (start == 0):
start = x.start_timestamp
for y in x.desc.variables:
if y.key == "Freqs":
f.append((int(y.value.split(",")[1])))
data = []
for s in d.samples:
if (s.timestamp >= start and s.timestamp <= start + numOfFreqs * n * sr):
data.append(s.value)
return data, f
def get_data_ii(d, tags, numOfFreqs, n, sr):
#data = variables_pb2.SampleVector()
i = 0
f = []
for x in tags.tags:
if (x.name == "experiment_update"):
start = x.start_timestamp
for y in x.desc.variables:
if y.key == "Freqs":
f = [int(z) for z in (y.value.split(']'))[0].split('[')[1].split(',')]
f = f[:int(numOfFreqs)]
print "GET_DATA_II Freqs", f
data = []
for s in d.samples:
if (s.timestamp >= start and s.timestamp <= start + numOfFreqs * n * sr):
data.append(s.value)
return data, f
def round1(d, tags, numOfFreqs,n,sr):
d,f = get_data(d, tags, numOfFreqs,n,sr)
print " f ", f
min_freq = min(f)
draw_plots_round_i(d, n, sr, min_freq, f)
def round2(d, tags, numOfFreqs,n,sr):
d,f = get_data_ii(d, tags, numOfFreqs,n,sr)
print " f ", f
min_freq = min(f)
draw_plots_round_ii(d, numOfFreqs, n, f, sr)
#def draw_plots_round_ii()
#if __name__ == "__main__":
| gpl-3.0 |
humanoid-path-planner/hpp-corbaserver | src/hpp/corbaserver/benchmark.py | 1 | 17430 | #!/usr/bin/env python
# Copyright (c) 2016 CNRS
# Author: Joseph Mirabel
#
# This file is part of hpp-corbaserver.
# hpp-corbaserver is free software: you can redistribute it
# and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# hpp-corbaserver is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# hpp-corbaserver. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import print_function
import numpy as np
import sys
## \cond
class _BenchmarkIter (object):
def __init__ (self, seedI, caseI, iterPerCaseI, case = None):
self.seedI = seedI
self.caseI = caseI
self.case = case
self.newCase = True;
self.iterPerCaseI = iterPerCaseI
def __repr__ (self):
if self.case is None:
return "iteration " + str(self.iterPerCaseI) + " for case " + str(self.caseI) + " at seed " + str(self.seedI)
else:
return "iteration " + str(self.iterPerCaseI) + " for case " + str(case) + " at seed " + str(self.seedI)
class _BenchmarkIterator (object):
def __init__(self, seedRange, cases, iterPerCase, initCase, startAt = None):
self.seedRangeL = len(seedRange)
self.casesL = len(cases)
self.iterPerCase = iterPerCase
if startAt is None:
self.current = _BenchmarkIter (0, 0, 0)
else:
self.current = startAt
self.start = True
def __iter__(self):
return self
def next(self): # Python 2 iteration
return self.__next__ ()
def __next__(self): # Python 3 iteration
if self.start:
self.newCase = True
self.start = False
return self.current
self.current.newCase = False
self.current.iterPerCaseI += 1
if self.current.iterPerCaseI >= self.iterPerCase:
self.current.seedI += 1
if self.current.seedI >= self.seedRangeL:
self.current.newCase = True
self.current.caseI += 1
if self.current.caseI >= self.casesL:
raise StopIteration
self.current.seedI = 0
self.current.iterPerCaseI = 0
return self.current
## \endcond
## class to do benchmarking
#
# ## Basic usage ##
#
# \code{.py}
# # Here goes your script to load the problem.
# robot = hpp.corbaserver.robot.Robot (...)
# ps = hpp.corbaserver.problem_solver.ProblemSolver (...)
# ...
#
# from hpp.corbaserver import Benchmark
# benchmark = Benchmark (robot.client, robot, ps)
# benchmark.iterPerCase = 10
# results = benchmark.do()
# \endcode
#
# \sa hpp.corbaserver.benchmark.Benchmark.do
# hpp.corbaserver.benchmark.Benchmark.seedRange
#
# If you wish to replot datas stored in a file:
# \code{.py}
# from hpp.corbaserver import Benchmark
# import matplotlib.pyplot as plt
# b = Benchmark (None, None, None)
# b.resumeFrom ("datafile")
#
# fig, axes = plt.subplots(nrows=1, ncols=2)
# b.plotTime (axes[0])
# b.plotPathLength (axes[1])
# plt.show ()
#
# \endcode
#
# \sa
# hpp.corbaserver.benchmark.Benchmark.plotTime
# hpp.corbaserver.benchmark.Benchmark.plotPathLength
#
# ## Advanced usage ##
#
# \code{.py}
# from hpp.corbaserver import Benchmark
# b = Benchmark (robot.client, robot, ps)
# b.seedRange = range (10)
# b.iterPerCase = 10
#
# b.cases = list()
# for type in ["Progressive", "Global"]:
# for param in [0.1, 0.2]:
# b.cases.append((type,param))
# b.cases.append(("None",0.1))
#
# b.tryResumeAndDelete ()
#
# def initialize (bench, case, iter):
# bench.ps.selectPathProjector (case[0], case[1])
#
# results = b.do(initCase = initialize)
# \endcode
#
# \sa
# hpp.corbaserver.benchmark.Benchmark.cases
# hpp.corbaserver.benchmark.Benchmark.iterPerCase
# hpp.corbaserver.benchmark.Benchmark.tryResumeAndDelete
#
# ## What if HPP crashes ##
#
# \note This section assumes you have installed
# https://github.com/humanoid-path-planner/hpp-tools
#
# You can do the following
# \code{.py}
# try:
# b.do ()
# except:
# import sys
# sys.exit(1)
#
# b.writeResume (filename = "yourresults")
# \endcode
#
# Then, launch your server with this:
# \code{bash}
# hppautorestart hppcorbaserver
# \endcode
#
# Finally, launch your script with this:
# \code{bash}
# hpp_run_benchmark path_to_python_script_file.py
# \endcode
# This will restart the server whenever it crashes and will resume
# the benchmarks where it stopped.
class Benchmark (object):
## Used to transform HPP output into seconds
toSeconds = np.array ([60*60,60,1,1e-3])
## The filename of the crash file.
crashFile = "/tmp/resume.hpp.corbaserver.benchmark.pickle"
def __init__ (self, client, robot, problemSolver):
## A list of seed to initialize the random generator.
self.seedRange = list(range(1))
## A list of cases for which benchmarking will be done.
self.cases = [None]
## Number of times one case is repeated (for one seed).
self.iterPerCase = 1
self.client = client
self.robot = robot
self.ps = problemSolver
self.current = None
# internal datas
self.results = dict ()
self.results['user'] = []
self.results['time'] = []
self.results['pathLength'] = []
self.results['states']=[]
## Solve the same problem for the specified cases, for various random seed.
# \param initCase a function of 3 arguments:
# - the calling Benchmark instance
# - the current element of the list of Benchmark.cases
# - the current _BenchmarkIterator (Normally not useful).
def do(self, initCase = lambda this, case, iter: None):
for iter in _BenchmarkIterator (self.seedRange, self.cases, self.iterPerCase, initCase, startAt = self.current):
self.client.problem.clearRoadmap ()
self.client.problem.setRandomSeed (self.seedRange[iter.seedI])
try:
if iter.newCase:
print("=======================================================")
print("Case ", self.getCase (iter))
print("=======================================================")
self.results['user'].append (initCase (self, self.getCase(iter), iter))
self.results['time'].append (self.client.problem.solve ())
self.results['pathLength'].append (self.client.problem.pathLength (self.client.problem.numberPaths()-1))
self.results['states'].append(self.ps.numberNodes())
except Exception as err:
# write current data to restart at the same point
self.current = iter
self.writeResume ()
print(err)
print("\nOops, something went wrong.\nTo resume at the benchmark just before the crash, use method thisobject.tryResumeAndDelete () before calling method do()\n")
raise
print("Solved", iter, "in", self.results['time'][-1])
i = 0
nb = self.iterPerCase * len(self.seedRange)
for c in self.cases:
t = np.array (self.results['time'][i:i+nb]).dot (self.toSeconds)
pl = np.array (self.results['pathLength'][i:i+nb])
nodes = np.array (self.results['states'])
print("=====================================================")
print("Case", c)
print("Mean time (s):", np.mean(t))
print("Std dev time (s):", np.std(t))
print("Mean number of nodes:", np.mean(nodes))
print("Std dev nb nodes:", np.std(nodes))
print("Average length:", np.mean(pl))
print("std dev length:", np.std(pl))
print("=====================================================")
i += nb
t = np.array (self.results['time']).dot (self.toSeconds)
nodes = np.array (self.results['states'])
pl = np.array (self.results['pathLength'])
print("=====================================================")
print("All cases together")
print("Mean time (s):", np.mean(t))
print("Std dev time (s):", np.std(t))
print("Mean number of nodes:", np.mean(nodes))
print("Std dev nb nodes:", np.std(nodes))
print("Average length:", np.mean(pl))
print("std dev length:", np.std(pl))
print("=====================================================")
return t, pl
def getCase (self, iter):
return self.cases[iter.caseI]
## Write data to file.
# \param filename if None, it uses member Benchmark.crashFile
def writeResume (self, filename = None):
if filename is None: fname = self.crashFile
else: fname = filename
import pickle as pk
with open (fname, 'w') as f:
pk.dump(self.cases, f)
pk.dump(self.seedRange, f)
pk.dump(self.iterPerCase, f)
pk.dump(self.current, f)
pk.dump(self.results, f)
## In case of crash of HPP, the benchmark class writes temporary datas to a file.
# The method will check if the crash file exists and:
# - if it exists, the benchmarking will be initialized at the state before the crash. No datas are lost.
# The crash file is deleted after having been loaded.
# - if it does not exist, the method does nothing.
# \param filename if None, it uses member Benchmark.crashFile
def tryResumeAndDelete (self, filename = None):
if filename is None: fname = self.crashFile
else: fname = filename
import os
if os.path.isfile (fname):
print("Retrieving datas from file", fname)
self.resumeFrom (fname)
os.remove (fname)
def resumeFrom (self, fname):
import pickle as pk
with open (fname, 'r') as f:
cases = pk.load(f)
if not cases == self.cases:
print("Cases are different.\nValue in file is :", cases, "\nValue in this instance was:\n", self.cases)
self.cases = cases
seedRange = pk.load(f)
if not seedRange == self.seedRange:
print("Seed range is different.\nValue in file is :", seedRange, "\nValue in this instance was:\n", self.seedRange)
iterPerCase = pk.load(f)
if not iterPerCase == self.iterPerCase:
print("Number of iteration per case is different.\nValue in file is :", iterPerCase, "\nValue in this instance was:\n", self.iterPerCase)
self.iterPerCase = iterPerCase
self.current = pk.load(f)
self.results = pk.load(f)
def plotTime(self, axes):
# Generate datas
times = list()
i = 0
nb = self.iterPerCase * len(self.seedRange)
for c in self.cases:
times.append(np.array (self.results['time'][i:i+nb]).dot (self.toSeconds))
i += nb
self._boxplot (axes, times, "Time (s)")
return times
def plotPathLength(self, axes):
# Generate datas
pls = list()
i = 0
nb = self.iterPerCase * len(self.seedRange)
for c in self.cases:
pls.append(np.array (self.results['pathLength'][i:i+nb]))
i += nb
self._boxplot (axes, pls, "Path length")
return pls
def _boxplot(self, axes, datas, ylabel):
import matplotlib.pyplot as plt
# rectangular box plot
bplot = axes.boxplot(datas,
vert=True, # vertical box aligmnent
patch_artist=True) # fill with color
# adding horizontal grid lines
axes.yaxis.grid(True)
axes.set_xticks([x+1 for x in range(len(self.cases))], )
axes.set_ylabel(ylabel)
# add x-tick labels
plt.setp(axes, xticks=[x+1 for x in range(len(self.cases))],
xticklabels=[str(c) for c in self.cases])
## This method create a database which store the benchmark results.
# You can then use it in the platform http://plannerarena.org/ to plot your results.
# \param nameDatabase the name of the created file (extension must be in .db)
# \param experimentName the name of the current scenario/problem (used when you append several scenario in the same database)
# \param nameLogFile the name of the text file writed
# \param append if True, the current result will be added in the given database.
def writeDatabase(self,nameDatabase,experimentName = 'default',nameLogFile = 'temp.log',append = False):
import os
if os.path.isfile(nameLogFile) :
os.remove(nameLogFile)
if not append and os.path.isfile(nameDatabase):
os.remove(nameDatabase)
# write log file :
# write experiment header :
log = open(nameLogFile,'w')
log.write('Experiment '+experimentName+'\n')
log.write('0 experiment properties\n')
p = os.popen('hostname')
log.write('Running on '+p.read())
p = os.popen('date --rfc-3339=seconds')
log.write('Starting at '+p.read())
log.write('<<<| \n')
log.write('Configuration of the experiment : \n')
log.write('Robot name : '+self.robot.displayName+'\n')
log.write('Initial config : '+str(self.ps.getInitialConfig())+'\n')
for qgoal in self.ps.getGoalConfigs():
log.write('Goal config : '+str(qgoal)+'\n')
log.write('Joints bounds : \n')
for jointName in self.robot.allJointNames :
if len(self.robot.client.robot.getJointBounds(jointName)) > 0 :
log.write(jointName+' : '+str(self.robot.client.robot.getJointBounds(jointName))+'\n')
log.write('|>>> \n')
log.write('<<<| \n')
p = os.popen('cat /proc/cpuinfo')
log.write(p.read())
log.write('|>>> \n')
log.write('0 is the random seed\n')
# hardcoded value : time and memory limit for all the benchmark (required by the parser)
log.write('0 seconds per run\n')
log.write('8192 MB per run\n')
nbRuns = len(self.seedRange) * self.iterPerCase
log.write(str(nbRuns)+' runs per planner\n')
t = np.array (self.results['time']).dot (self.toSeconds)
log.write(str(t.sum()) +' seconds spent to collect the data\n')
log.write('1 enum type\n')
log.write('status|Timeout|solved|Crash|Unknown status\n')
log.write(str(len(self.cases))+" planners\n")
i=0
# for each algorithm (case ) :
for c in self.cases:
if c == 'None' :
log.write('Default\n')
else :
log.write(str(c)+'\n') # need a better way to display this
log.write('0 common properties\n')
log.write('6 properties for each run\n')
# solved, status, seed and time are mandatory for the parser
log.write('solved BOOLEAN\n')
log.write('status ENUM\n') # for now it's always 1 (= solved)
log.write('seed INTEGER\n')
log.write('pathLenght INTEGER\n')
log.write('graph_states INTEGER\n')
log.write('time REAL\n')
log.write(str(nbRuns)+' runs\n')
nbSeed = len(self.seedRange)
for s in range(nbSeed):
for j in range(self.iterPerCase):
# write a line for each run
log.write('1; 1; '+str(self.seedRange[s])+'; '+str(self.results['pathLength'][i+j])+'; '+str(self.results['states'][ i+j])+"; "+str(t[i+j])+'; \n')
i += self.iterPerCase
log.write('. \n')
log.close()
# compute the database :
from hpp.corbaserver import ompl_benchmark_statistics as omplBench
omplBench.readBenchmarkLog(nameDatabase,[nameLogFile],False)
omplBench.computeViews(nameDatabase,False)
if nameLogFile=='temp.log' :
os.remove(nameLogFile)
def _printStats(self, times, nodes, pathLengths):
return "Mean time (s): " + str(np.mean(times)) + '\n' \
+ "Std dev time (s): " + str(np.std(times)) + '\n' \
+ "Mean number of nodes: " + str(np.mean(nodes)) + '\n' \
+ "Std dev nb nodes: " + str(np.std(nodes)) + '\n' \
+ "Average length: " + str(np.mean(pathLengths)) + '\n' \
+ "std dev length: " + str(np.std(pathLengths))
def __str__(self):
res = ""
for i in range(len(self.results['time'])):
res += "Time (s): " + str(np.array (self.results['time'][i]).dot (self.toSeconds)) \
+ "\nNumber of nodes: " + str(self.results['states'][i]) \
+ "\nLength: " + str(self.results['pathLength'][i]) + '\n'
times = np.array (self.results['time']).dot (self.toSeconds)
nodes = np.array (self.results['states'])
pathLengths = np.array (self.results['pathLength'])
res += self._printStats(times, nodes, pathLengths)
return res
| lgpl-3.0 |
shusenl/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
rjw57/vagrant-ipython | ipython/profile_default/ipython_console_config.py | 1 | 21268 | # Configuration file for ipython-console.
c = get_config()
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp configuration
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp will inherit config from: TerminalIPythonApp,
# BaseIPythonApplication, Application, InteractiveShellApp, IPythonConsoleApp,
# ConnectionFileMixin
# The Logging format template
# c.ZMQTerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# List of files to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_files = []
# set the stdin (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.stdin_port = 0
# set the iopub (PUB) port [default: random]
# c.ZMQTerminalIPythonApp.iopub_port = 0
# Execute the given command string.
# c.ZMQTerminalIPythonApp.code_to_run = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.ZMQTerminalIPythonApp.ipython_dir = ''
# A file to be run
# c.ZMQTerminalIPythonApp.file_to_run = ''
# Run the module as a script.
# c.ZMQTerminalIPythonApp.module_to_run = ''
# Whether to display a banner upon starting IPython.
# c.ZMQTerminalIPythonApp.display_banner = True
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.ZMQTerminalIPythonApp.gui = None
# Start IPython quickly by skipping the loading of config files.
# c.ZMQTerminalIPythonApp.quick = False
# Set the log level by value or name.
# c.ZMQTerminalIPythonApp.log_level = 30
# Path to the ssh key to use for logging in to the ssh server.
# c.ZMQTerminalIPythonApp.sshkey = ''
# The SSH server to use to connect to the kernel.
# c.ZMQTerminalIPythonApp.sshserver = ''
# lines of code to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_lines = []
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.ZMQTerminalIPythonApp.pylab = None
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.ZMQTerminalIPythonApp.extra_config_file = ''
# set the shell (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.shell_port = 0
# The IPython profile to use.
# c.ZMQTerminalIPythonApp.profile = 'default'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ZMQTerminalIPythonApp.connection_file = ''
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.ZMQTerminalIPythonApp.exec_PYTHONSTARTUP = True
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.ZMQTerminalIPythonApp.hide_initial_ns = True
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.ZMQTerminalIPythonApp.copy_config_files = False
# Whether to overwrite existing config files when copying
# c.ZMQTerminalIPythonApp.overwrite = False
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.ZMQTerminalIPythonApp.matplotlib = None
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.ZMQTerminalIPythonApp.confirm_exit = True
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.ZMQTerminalIPythonApp.force_interact = False
#
# c.ZMQTerminalIPythonApp.transport = 'tcp'
# set the control (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.control_port = 0
# dotted module name of an IPython extension to load.
# c.ZMQTerminalIPythonApp.extra_extension = ''
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ZMQTerminalIPythonApp.ip = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.ZMQTerminalIPythonApp.pylab_import_all = True
# set the heartbeat port [default: random]
# c.ZMQTerminalIPythonApp.hb_port = 0
# The date format used by logging formatters for %(asctime)s
# c.ZMQTerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The name of the default kernel to start.
# c.ZMQTerminalIPythonApp.kernel_name = 'python'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.ZMQTerminalIPythonApp.verbose_crash = False
# A list of dotted module names of IPython extensions to load.
# c.ZMQTerminalIPythonApp.extensions = []
# Connect to an already running kernel
# c.ZMQTerminalIPythonApp.existing = ''
# Suppress warning messages about legacy config files
# c.ZMQTerminalIPythonApp.ignore_old_config = False
#------------------------------------------------------------------------------
# ZMQTerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of TerminalInteractiveShell that uses the 0MQ kernel
# ZMQTerminalInteractiveShell will inherit config from:
# TerminalInteractiveShell, InteractiveShell
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.ZMQTerminalInteractiveShell.editor = 'vi'
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQTerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQTerminalInteractiveShell.colors = 'Linux'
# Autoindent IPython code entered interactively.
# c.ZMQTerminalInteractiveShell.autoindent = True
# Whether to include output from clients other than this one sharing the same
# kernel.
#
# Outputs are not displayed until enter is pressed.
# c.ZMQTerminalInteractiveShell.include_other_output = False
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQTerminalInteractiveShell.ast_transformers = []
#
# c.ZMQTerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQTerminalInteractiveShell.deep_reload = False
# Save multi-line entries as one entry in readline history
# c.ZMQTerminalInteractiveShell.multiline_history = True
# Callable object called via 'callable' image handler with one argument, `data`,
# which is `msg["content"]["data"]` where `msg` is the message from iopub
# channel. For exmaple, you can find base64 encoded PNG data as
# `data['image/png']`.
# c.ZMQTerminalInteractiveShell.callable_image_handler = None
#
# c.ZMQTerminalInteractiveShell.separate_out2 = ''
# Don't call post-execute functions that have failed in the past.
# c.ZMQTerminalInteractiveShell.disable_failing_post_execute = False
# Start logging to the given file in append mode.
# c.ZMQTerminalInteractiveShell.logappend = ''
#
# c.ZMQTerminalInteractiveShell.separate_in = '\n'
#
# c.ZMQTerminalInteractiveShell.readline_use = True
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQTerminalInteractiveShell.autocall = 0
# Enable auto setting the terminal title.
# c.ZMQTerminalInteractiveShell.term_title = False
# Automatically call the pdb debugger after every exception.
# c.ZMQTerminalInteractiveShell.pdb = False
# Deprecated, use PromptManager.in_template
# c.ZMQTerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# The part of the banner to be printed after the profile
# c.ZMQTerminalInteractiveShell.banner2 = ''
#
# c.ZMQTerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.ZMQTerminalInteractiveShell.readline_remove_delims = '-/~'
# Deprecated, use PromptManager.out_template
# c.ZMQTerminalInteractiveShell.prompt_out = 'Out[\\#]: '
#
# c.ZMQTerminalInteractiveShell.xmode = 'Context'
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQTerminalInteractiveShell.color_info = True
# Command to invoke an image viewer program when you are using 'stream' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. Raw image data
# is given as STDIN to the program.
# c.ZMQTerminalInteractiveShell.stream_image_handler = []
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQTerminalInteractiveShell.cache_size = 1000
#
# c.ZMQTerminalInteractiveShell.ipython_dir = ''
# Command to invoke an image viewer program when you are using 'tempfile' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. You can use
# {file} and {format} in the string to represent the location of the generated
# image file and image format.
# c.ZMQTerminalInteractiveShell.tempfile_image_handler = []
# Deprecated, use PromptManager.in2_template
# c.ZMQTerminalInteractiveShell.prompt_in2 = ' .\\D.: '
# Enable magic commands to be called without the leading %.
# c.ZMQTerminalInteractiveShell.automagic = True
#
# c.ZMQTerminalInteractiveShell.separate_out = ''
# Timeout for giving up on a kernel (in seconds).
#
# On first connect and restart, the console tests whether the kernel is running
# and responsive by sending kernel_info_requests. This sets the timeout in
# seconds for how long the kernel can take before being presumed dead.
# c.ZMQTerminalInteractiveShell.kernel_timeout = 60
# Deprecated, use PromptManager.justify
# c.ZMQTerminalInteractiveShell.prompts_pad_left = True
# The shell program to be used for paging.
# c.ZMQTerminalInteractiveShell.pager = 'less'
# The name of the logfile to use.
# c.ZMQTerminalInteractiveShell.logfile = ''
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQTerminalInteractiveShell.display_page = False
# auto editing of files with syntax errors.
# c.ZMQTerminalInteractiveShell.autoedit_syntax = False
# Handler for image type output. This is useful, for example, when connecting
# to the kernel in which pylab inline backend is activated. There are four
# handlers defined. 'PIL': Use Python Imaging Library to popup image; 'stream':
# Use an external program to show the image. Image will be fed into the STDIN
# of the program. You will need to configure `stream_image_handler`;
# 'tempfile': Use an external program to show the image. Image will be saved in
# a temporally file and the program is called with the temporally file. You
# will need to configure `tempfile_image_handler`; 'callable': You can set any
# Python callable which is called with the image data. You will need to
# configure `callable_image_handler`.
# c.ZMQTerminalInteractiveShell.image_handler = None
# Show rewritten input, e.g. for autocall.
# c.ZMQTerminalInteractiveShell.show_rewritten_input = True
# The part of the banner to be printed before the profile
# c.ZMQTerminalInteractiveShell.banner1 = 'Python 3.4.0 (default, Apr 11 2014, 13:05:18) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.0.0-rc1 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.ZMQTerminalInteractiveShell.confirm_exit = True
# Preferred object representation MIME type in order. First matched MIME type
# will be used.
# c.ZMQTerminalInteractiveShell.mime_preference = ['image/png', 'image/jpeg', 'image/svg+xml']
#
# c.ZMQTerminalInteractiveShell.history_length = 10000
#
# c.ZMQTerminalInteractiveShell.object_info_string_level = 0
#
# c.ZMQTerminalInteractiveShell.debug = False
# Start logging to the default log file.
# c.ZMQTerminalInteractiveShell.logstart = False
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.ZMQTerminalInteractiveShell.screen_length = 0
# Prefix to add to outputs coming from clients other than this one.
#
# Only relevant if include_other_output is True.
# c.ZMQTerminalInteractiveShell.other_output_prefix = '[remote] '
#
# c.ZMQTerminalInteractiveShell.quiet = False
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
#
# c.KernelManager.transport = 'tcp'
# set the shell (ROUTER) port [default: random]
# c.KernelManager.shell_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = ''
# set the stdin (ROUTER) port [default: random]
# c.KernelManager.stdin_port = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.KernelManager.connection_file = ''
# set the heartbeat port [default: random]
# c.KernelManager.hb_port = 0
# set the iopub (PUB) port [default: random]
# c.KernelManager.iopub_port = 0
# set the control (ROUTER) port [default: random]
# c.KernelManager.control_port = 0
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The UUID identifying this session.
# c.Session.session = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# path to file containing execution key.
# c.Session.keyfile = ''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
# Debug output in the Session
# c.Session.debug = False
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'vagrant'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# execution key, for extra authentication.
# c.Session.key = b''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
| mit |
mrcslws/htmresearch | projects/l2_pooling/capacity_test.py | 3 | 60277 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file tests the capacity of L4-L2 columns
In this test, we consider a set of objects without any shared (feature,
location) pairs and without any noise. One, or more, L4-L2 columns is trained
on all objects.
In the test phase, we randomly pick a (feature, location) SDR and feed it to
the network, and asked whether the correct object can be retrieved.
"""
import argparse
import multiprocessing
import os
import os.path
from pprint import pprint
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
from htmresearch.frameworks.layers.l2_l4_inference import L4L2Experiment
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
DEFAULT_NUM_LOCATIONS = 5000
DEFAULT_NUM_FEATURES = 5000
DEFAULT_RESULT_DIR_NAME = "results"
DEFAULT_PLOT_DIR_NAME = "plots"
DEFAULT_NUM_CORTICAL_COLUMNS = 1
DEFAULT_COLORS = ("b", "r", "c", "g", 'm', 'y', 'w', 'k')
def _prepareResultsDir(resultBaseName, resultDirName=DEFAULT_RESULT_DIR_NAME):
"""
Ensures that the requested resultDirName exists. Attempt to create it if not.
Returns the combined absolute path to result.
"""
resultDirName = os.path.abspath(resultDirName)
resultFileName = os.path.join(resultDirName, resultBaseName)
try:
if not os.path.isdir(resultDirName):
# Directory does not exist, attempt to create recursively
os.makedirs(resultDirName)
except os.error:
# Unlikely, but directory may have been created already. Double check to
# make sure it's safe to ignore error in creation
if not os.path.isdir(resultDirName):
raise Exception("Unable to create results directory at {}"
.format(resultDirName))
return resultFileName
def getL4Params():
"""
Returns a good default set of parameters to use in the L4 region.
"""
return {
"columnCount": 150,
"cellsPerColumn": 16,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.51,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.02,
"minThreshold": 10,
"basalPredictedSegmentDecrement": 0.0,
"activationThreshold": 13,
"sampleSize": 25,
"implementation": "ApicalTiebreakCPP",
}
def getL2Params():
"""
Returns a good default set of parameters to use in the L4 region.
"""
return {
"inputWidth": 2048 * 8,
"cellCount": 4096,
"sdrSize": 40,
"synPermProximalInc": 0.1,
"synPermProximalDec": 0.001,
"initialProximalPermanence": 0.6,
"minThresholdProximal": 6,
"sampleSizeProximal": 10,
"connectedPermanenceProximal": 0.5,
"synPermDistalInc": 0.1,
"synPermDistalDec": 0.001,
"initialDistalPermanence": 0.41,
"activationThresholdDistal": 18,
"sampleSizeDistal": 20,
"connectedPermanenceDistal": 0.5,
"distalSegmentInhibitionFactor": 0.6667,
"learningMode": True,
}
def createRandomObjects(numObjects,
numPointsPerObject,
numLocations,
numFeatures):
"""
Create numObjects with non-shared (feature, location) pairs
:param numObjects: number of objects
:param numPointsPerObject: number of (feature, location) pairs per object
:param numLocations: number of unique locations
:param numFeatures: number of unique features
:return: (list(list(tuple)) List of lists of feature / location pairs.
"""
requiredFeatureLocPairs = numObjects * numPointsPerObject
uniqueFeatureLocPairs = numLocations * numFeatures
if requiredFeatureLocPairs > uniqueFeatureLocPairs:
raise RuntimeError("Not Enough Feature Location Pairs")
randomPairIdx = np.random.choice(
np.arange(uniqueFeatureLocPairs),
numObjects * numPointsPerObject,
replace=False
)
randomFeatureLocPairs = (divmod(idx, numFeatures) for idx in randomPairIdx)
# Return sequences of random feature-location pairs. Each sequence will
# contain a number of pairs defined by 'numPointsPerObject'
return zip(*[iter(randomFeatureLocPairs)] * numPointsPerObject)
def createRandomObjectsSharedPairs(numObjects,
numPointsPerObject,
numLocations,
numFeatures):
"""
Create numObjects. (feature, location) pairs may be shared.
:param numObjects: number of objects
:param numPointsPerObject: number of (feature, location) pairs per object
:param numLocations: number of unique locations
:param numFeatures: number of unique features
:return: (list(list(tuple)) List of lists of feature / location pairs.
"""
locations = np.arange(numLocations)
features = np.arange(numFeatures)
objects = []
objectsSets = set()
for _ in xrange(numObjects):
objectLocations = np.random.choice(locations, numPointsPerObject,
replace=False)
objectFeatures = np.random.choice(features, numPointsPerObject,
replace=True)
o = zip(objectLocations, objectFeatures)
# Make sure this is a unique object.
objectAsSet = frozenset(o)
assert objectAsSet not in objectsSets
objectsSets.add(objectAsSet)
objects.append(o)
return objects
def testNetworkWithOneObject(objects, exp, testObject, numTestPoints):
"""
Check whether a trained L4-L2 network can successfully retrieve an object
based on a sequence of (feature, location) pairs on this object
:param objects: list of lists of (feature, location) pairs for all objects
:param exp: L4L2Experiment instance with a trained network
:param testObject: the index for the object being tested
:param numTestPoints: number of test points on the test object
:return:
"""
innerObjs = objects.getObjects()
numObjects = len(innerObjs)
numPointsPerObject = len(innerObjs[0])
testPts = np.random.choice(np.arange(numPointsPerObject),
(numTestPoints,),
replace=False)
testPairs = [objects[testObject][i] for i in testPts]
exp._unsetLearningMode()
exp.sendReset()
overlap = np.zeros((numTestPoints, numObjects))
numL2ActiveCells = np.zeros((numTestPoints))
numL4ActiveCells = np.zeros((numTestPoints))
for step, pair in enumerate(testPairs):
(locationIdx, featureIdx) = pair
for colIdx in xrange(exp.numColumns):
feature = objects.features[colIdx][featureIdx]
location = objects.locations[colIdx][locationIdx]
exp.sensorInputs[colIdx].addDataToQueue(list(feature), 0, 0)
exp.externalInputs[colIdx].addDataToQueue(list(location), 0, 0)
exp.network.run(1)
for colIdx in xrange(exp.numColumns):
numL2ActiveCells[step] += float(len(exp.getL2Representations()[colIdx]))
numL4ActiveCells[step] += float(len(exp.getL4Representations()[colIdx]))
numL2ActiveCells[step] /= exp.numColumns
numL4ActiveCells[step] /= exp.numColumns
overlapByColumn = exp.getCurrentObjectOverlaps()
overlap[step] = np.mean(overlapByColumn, axis=0)
# columnPooler = exp.L2Columns[0]._pooler
# tm = exp.L4Columns[0]._tm
# print "step : {}".format(step)
# print "{} L4 cells predicted : ".format(
# len(exp.getL4PredictedCells()[0])), exp.getL4PredictedeCells()
# print "{} L4 cells active : ".format(len(exp.getL4Representations()[0])), exp.getL4Representations()
# print "L2 activation: ", columnPooler.getActiveCells()
# print "overlap : ", (overlap[step, :])
return overlap, numL2ActiveCells, numL4ActiveCells
def testOnSingleRandomSDR(objects, exp, numRepeats=100, repeatID=0):
"""
Test a trained L4-L2 network on (feature, location) pairs multiple times
Compute object retrieval accuracy, overlap with the correct and incorrect
objects
:param objects: list of lists of (feature, location) pairs for all objects
:param exp: L4L2Experiment instance with a trained network
:param numRepeats: number of repeats
:return: a set of metrics for retrieval accuracy
"""
innerObjs = objects.getObjects()
numObjects = len(innerObjs)
numPointsPerObject = len(innerObjs[0])
overlapTrueObj = np.zeros((numRepeats,))
l2ActivationSize = np.zeros((numRepeats,))
l4ActivationSize = np.zeros((numRepeats,))
confusion = overlapTrueObj.copy()
outcome = overlapTrueObj.copy()
columnPooler = exp.L2Columns[0]._pooler
numConnectedProximal = columnPooler.numberOfConnectedProximalSynapses()
numConnectedDistal = columnPooler.numberOfConnectedDistalSynapses()
result = None
for i in xrange(numRepeats):
targetObject = np.random.choice(np.arange(numObjects))
nonTargetObjs = np.array(
[obj for obj in xrange(numObjects) if obj != targetObject]
)
overlap, numActiveL2Cells, numL4ActiveCells = testNetworkWithOneObject(
objects,
exp,
targetObject,
10
)
lastOverlap = overlap[-1, :]
maxOverlapIndices = (
np.where(lastOverlap == lastOverlap[np.argmax(lastOverlap)])[0].tolist()
)
# Only set to 1 iff target object is the lone max overlap index. Otherwise
# the network failed to conclusively identify the target object.
outcome[i] = 1 if maxOverlapIndices == [targetObject] else 0
confusion[i] = np.max(lastOverlap[nonTargetObjs])
overlapTrueObj[i] = lastOverlap[targetObject]
l2ActivationSize[i] = numActiveL2Cells[-1]
l4ActivationSize[i] = numL4ActiveCells[-1]
# print "repeat {} target obj {} overlap {}".format(i, targetObject, overlapTrueObj[i])
testResult = {
"repeatID": repeatID,
"repeatI": i,
"numberOfConnectedProximalSynapses": numConnectedProximal,
"numberOfConnectedDistalSynapses": numConnectedDistal,
"numObjects": numObjects,
"numPointsPerObject": numPointsPerObject,
"l2ActivationSize": l2ActivationSize[i],
"l4ActivationSize": l4ActivationSize[i],
"confusion": confusion[i],
"accuracy": outcome[i],
"overlapTrueObj": overlapTrueObj[i]}
result = (
pd.concat([result, pd.DataFrame.from_dict([testResult])])
if result is not None else
pd.DataFrame.from_dict([testResult])
)
return result
def plotResults(result, ax=None, xaxis="numObjects",
filename=None, marker='-bo', confuseThresh=30, showErrBar=1):
if ax is None:
fig, ax = plt.subplots(2, 2)
numRpts = max(result['repeatID']) + 1
resultsRpts = result.groupby(['repeatID'])
if xaxis == "numPointsPerObject":
x = np.array(resultsRpts.get_group(0).numPointsPerObject)
xlabel = "# Pts / Obj"
x = np.unique(x)
d = resultsRpts.get_group(0)
d = d.groupby(['numPointsPerObject'])
elif xaxis == "numObjects":
x = np.array(resultsRpts.get_group(0).numObjects)
xlabel = "Object #"
x = np.unique(x)
d = resultsRpts.get_group(0)
d = d.groupby(['numObjects'])
accuracy = np.zeros((1, len(x),))
numberOfConnectedProximalSynapses = np.zeros((1, len(x),))
l2ActivationSize = np.zeros((1, len(x),))
confusion = np.zeros((1, len(x),))
for j in range(len(x)):
accuracy[0,j] = np.sum(np.logical_and(d.get_group(x[j]).accuracy == 1,
d.get_group(x[j]).confusion < confuseThresh)) / \
float(len(d.get_group(x[j]).accuracy))
l2ActivationSize[0,j] = np.mean(d.get_group(x[j]).l2ActivationSize)
confusion[0,j] = np.mean(d.get_group(x[j]).confusion)
numberOfConnectedProximalSynapses[0,j] = np.mean(
d.get_group(x[j]).numberOfConnectedProximalSynapses)
if ax is None:
fig, ax = plt.subplots(2, 2)
for rpt in range(1, numRpts):
d = resultsRpts.get_group(rpt)
d = d.groupby(['numObjects'])
accuracyRpt = np.zeros((1, len(x)))
numberOfConnectedProximalSynapsesRpt = np.zeros((1, len(x)))
l2ActivationSizeRpt = np.zeros((1, len(x)))
confusionRpt = np.zeros((1, len(x)))
for j in range(len(x)):
accuracyRpt[0,j] = np.sum(np.logical_and(
d.get_group(x[j]).accuracy == 1,
d.get_group(x[j]).confusion < confuseThresh)) / \
float(len(d.get_group(x[j]).accuracy))
l2ActivationSizeRpt[0,j] = np.mean(d.get_group(x[j]).l2ActivationSize)
confusionRpt[0,j] = np.mean(d.get_group(x[j]).confusion)
numberOfConnectedProximalSynapsesRpt[0,j] = np.mean(
d.get_group(x[j]).numberOfConnectedProximalSynapses)
accuracy = np.vstack((accuracy, accuracyRpt))
confusion = np.vstack((confusion, confusionRpt))
l2ActivationSize = np.vstack((l2ActivationSize, l2ActivationSizeRpt))
numberOfConnectedProximalSynapses = np.vstack((
numberOfConnectedProximalSynapses, numberOfConnectedProximalSynapsesRpt))
if showErrBar==0:
s = 0
else:
s = 1
ax[0, 0].errorbar(x, np.mean(accuracy, 0), yerr=np.std(accuracy, 0)*s,
color=marker)
ax[0, 0].set_ylabel("Accuracy")
ax[0, 0].set_xlabel(xlabel)
ax[0, 0].set_ylim([0.1, 1.05])
ax[0, 0].set_xlim([0, 820])
ax[0, 1].errorbar(x, np.mean(numberOfConnectedProximalSynapses, 0),
yerr=np.std(numberOfConnectedProximalSynapses, 0)*s, color=marker)
ax[0, 1].set_ylabel("# connected proximal synapses")
ax[0, 1].set_xlabel("# Pts / Obj")
ax[0, 1].set_xlabel(xlabel)
ax[0, 1].set_xlim([0, 820])
ax[1, 0].errorbar(x, np.mean(l2ActivationSize, 0), yerr=np.std(l2ActivationSize, 0)*s, color=marker)
ax[1, 0].set_ylabel("l2ActivationSize")
# ax[1, 0].set_ylim([0, 41])
ax[1, 0].set_xlabel(xlabel)
ax[1, 0].set_xlim([0, 820])
ax[1, 1].errorbar(x, np.mean(confusion, 0), yerr=np.std(confusion, 0)*s, color=marker)
ax[1, 1].set_ylabel("OverlapFalseObject")
ax[1, 1].set_ylim([0, 41])
ax[1, 1].set_xlabel(xlabel)
ax[1, 1].set_xlim([0, 820])
plt.tight_layout()
if filename is not None:
plt.savefig(filename)
def runCapacityTest(numObjects,
numPointsPerObject,
numCorticalColumns,
l2Params,
l4Params,
objectParams,
networkType = "MultipleL4L2Columns",
repeat=0):
"""
Generate [numObjects] objects with [numPointsPerObject] points per object
Train L4-l2 network all the objects with single pass learning
Test on (feature, location) pairs and compute
:param numObjects:
:param numPointsPerObject:
:param sampleSize:
:param activationThreshold:
:param numCorticalColumns:
:return:
"""
l4ColumnCount = l4Params["columnCount"]
numInputBits = objectParams['numInputBits']
externalInputSize = objectParams['externalInputSize']
if numInputBits is None:
numInputBits = int(l4ColumnCount * 0.02)
numLocations = objectParams["numLocations"]
numFeatures = objectParams["numFeatures"]
objects = createObjectMachine(
machineType="simple",
numInputBits=numInputBits,
sensorInputSize=l4ColumnCount,
externalInputSize=externalInputSize,
numCorticalColumns=numCorticalColumns,
numLocations=numLocations,
numFeatures=numFeatures
)
exp = L4L2Experiment("capacity_two_objects",
numInputBits=numInputBits,
L2Overrides=l2Params,
L4Overrides=l4Params,
inputSize=l4ColumnCount,
networkType = networkType,
externalInputSize=externalInputSize,
numLearningPoints=3,
numCorticalColumns=numCorticalColumns,
objectNamesAreIndices=True)
if objectParams["uniquePairs"]:
pairs = createRandomObjects(
numObjects,
numPointsPerObject,
numLocations,
numFeatures
)
else:
pairs = createRandomObjectsSharedPairs(
numObjects,
numPointsPerObject,
numLocations,
numFeatures
)
for object in pairs:
objects.addObject(object)
exp.learnObjects(objects.provideObjectsToLearn())
testResult = testOnSingleRandomSDR(objects, exp, 100, repeat)
return testResult
def runCapacityTestVaryingObjectSize(
numObjects=2,
numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
expName=None,
cpuCount=None,
l2Params=None,
l4Params=None,
objectParams=None):
"""
Runs experiment with two objects, varying number of points per object
"""
result = None
cpuCount = cpuCount or multiprocessing.cpu_count()
pool = multiprocessing.Pool(cpuCount, maxtasksperchild=1)
l4Params = l4Params or getL4Params()
l2Params = l2Params or getL2Params()
params = [(numObjects,
numPointsPerObject,
numCorticalColumns,
l2Params,
l4Params,
objectParams,
0)
for numPointsPerObject in np.arange(10, 160, 20)]
for testResult in pool.map(invokeRunCapacityTest, params):
result = (
pd.concat([result, testResult])
if result is not None else testResult
)
resultFileName = _prepareResultsDir(
"{}.csv".format(expName),
resultDirName=resultDirName
)
pd.DataFrame.to_csv(result, resultFileName)
def invokeRunCapacityTest(params):
""" Splits out params so that runCapacityTest may be invoked with
multiprocessing.Pool.map() to support parallelism
"""
return runCapacityTest(*params)
def runCapacityTestVaryingObjectNum(numPointsPerObject=10,
numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
expName=None,
cpuCount=None,
l2Params=None,
l4Params=None,
objectParams=None,
networkType="MultipleL4L2Columns",
numRpts=1):
"""
Run experiment with fixed number of pts per object, varying number of objects
"""
l4Params = l4Params or getL4Params()
l2Params = l2Params or getL2Params()
cpuCount = cpuCount or multiprocessing.cpu_count()
pool = multiprocessing.Pool(cpuCount, maxtasksperchild=1)
numObjectsList = np.arange(50, 1300, 100)
params = []
for rpt in range(numRpts):
for numObjects in numObjectsList:
params.append((numObjects,
numPointsPerObject,
numCorticalColumns,
l2Params,
l4Params,
objectParams,
networkType,
rpt))
result = None
for testResult in pool.map(invokeRunCapacityTest, params):
result = (
pd.concat([result, testResult])
if result is not None else testResult
)
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName)
pd.DataFrame.to_csv(result, resultFileName)
def invokeRunCapacityTest(params):
""" Splits out params so that runCapacityTest may be invoked with
multiprocessing.Pool.map() to support parallelism
"""
return runCapacityTest(*params)
def runCapacityTestWrapperNonParallel(numPointsPerObject=10,
numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
numObjects = 100,
expName=None,
l2Params=None,
l4Params=None,
objectParams=None,
networkType="MultipleL4L2Columns",
rpt=0):
"""
Run experiment with fixed number of pts per object, varying number of objects
"""
l4Params = l4Params or getL4Params()
l2Params = l2Params or getL2Params()
testResult = runCapacityTest(numObjects,
numPointsPerObject,
numCorticalColumns,
l2Params,
l4Params,
objectParams,
networkType,
rpt)
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName)
if os.path.isfile(resultFileName):
pd.DataFrame.to_csv(testResult, resultFileName, mode = "a", header = False)
else:
pd.DataFrame.to_csv(testResult, resultFileName, mode = "a", header = True)
def invokeRunCapacityTestWrapper(params):
""" Splits out params so that runCapacityTest may be invoked with
multiprocessing.Pool.map() to support parallelism
"""
return runCapacityTestWrapperNonParallel(*params)
def runExperiment1(numObjects=2,
sampleSizeRange=(10,),
numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
Varying number of pts per objects, two objects
Try different sample sizes
"""
objectParams = {'numInputBits': 20,
'externalInputSize': 2400,
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
l4Params = getL4Params()
l2Params = getL2Params()
numInputBits = objectParams['numInputBits']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
for sampleSize in sampleSizeRange:
print "sampleSize: {}".format(sampleSize)
l2Params['sampleSizeProximal'] = sampleSize
expName = "capacity_varying_object_size_synapses_{}".format(sampleSize)
runCapacityTestVaryingObjectSize(numObjects,
numCorticalColumns,
resultDirName,
expName,
cpuCount,
l2Params,
l4Params,
objectParams=objectParams)
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying points per object x 2 objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large")
legendEntries = []
for sampleSize in sampleSizeRange:
expName = "capacity_varying_object_size_synapses_{}".format(sampleSize)
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numPointsPerObject", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("# sample size {}".format(sampleSize))
plt.legend(legendEntries, loc=2)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_size_summary.pdf"
)
)
def runExperiment2(numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
runCapacityTestVaryingObjectNum()
Try different sample sizes
"""
sampleSizeRange = (10,)
numPointsPerObject = 10
l4Params = getL4Params()
l2Params = getL2Params()
objectParams = {'numInputBits': 20,
'externalInputSize': 2400,
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
for sampleSize in sampleSizeRange:
print "sampleSize: {}".format(sampleSize)
l2Params['sampleSizeProximal'] = sampleSize
expName = "capacity_varying_object_num_synapses_{}".format(sampleSize)
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expName,
cpuCount,
l2Params,
l4Params,
objectParams)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying number of objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large"
)
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(50))
legendEntries = []
for sampleSize in sampleSizeRange:
print "sampleSize: {}".format(sampleSize)
l2Params['sampleSizeProximal'] = sampleSize
expName = "capacity_varying_object_num_synapses_{}".format(sampleSize)
resultFileName = _prepareResultsDir(
"{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("# sample size {}".format(sampleSize))
ax[0, 0].legend(legendEntries, loc=4, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_num_summary.pdf"
)
)
def runExperiment3(numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
runCapacityTestVaryingObjectNum()
Try different L4 network size
"""
numPointsPerObject = 10
numRpts = 1
l4Params = getL4Params()
l2Params = getL2Params()
l2Params['cellCount'] = 4096
l2Params['sdrSize'] = 40
expParams = []
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3})
expParams.append(
{'l4Column': 200, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3})
expParams.append(
{'l4Column': 250, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3})
for expParam in expParams:
l4Params["columnCount"] = expParam['l4Column']
numInputBits = expParam['w']
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {'numInputBits': numInputBits,
'externalInputSize': expParam['externalInputSize'],
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expname = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expname,
cpuCount,
l2Params,
l4Params,
objectParams,
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying number of objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large"
)
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expname = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"])
resultFileName = _prepareResultsDir("{}.csv".format(expname),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L4 mcs {} w {} s {} thresh {}".format(
expParam["l4Column"], expParam['w'], expParam['sample'],
expParam['thresh']))
ax[0, 0].legend(legendEntries, loc=4, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_num_l4size_summary.pdf"
)
)
def runExperiment4(resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
runCapacityTestVaryingObjectNum()
varying number of cortical columns
"""
numPointsPerObject = 10
numRpts = 1
l4Params = getL4Params()
l2Params = getL2Params()
expParams = []
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 1})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 2})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 3})
for expParam in expParams:
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l4Params["columnCount"] = expParam['l4Column']
numInputBits = expParam['w']
numCorticalColumns = expParam['l2Column']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {'numInputBits': numInputBits,
'externalInputSize': expParam['externalInputSize'],
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2column_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"],
expParam['l2Column'])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expName,
cpuCount,
l2Params,
l4Params,
objectParams,
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle("Varying number of objects", fontsize="x-large")
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2column_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"],
expParam['l2Column'])
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L4 mcs {} #cc {} ".format(
expParam['l4Column'], expParam['l2Column']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"multiple_column_capacity_varying_object_num_and_column_num_summary.pdf"
)
)
def runExperiment5(resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
varying size of L2
calculate capacity by varying number of objects with fixed size
"""
numPointsPerObject = 10
numRpts = 1
numInputBits = 20
externalInputSize = 2400
numL4MiniColumns = 150
l4Params = getL4Params()
l2Params = getL2Params()
expParams = []
expParams.append(
{'L2cellCount': 2048, 'L2activeBits': 40, 'w': 20, 'sample': 6, 'thresh': 3,
'l2Column': 1})
expParams.append(
{'L2cellCount': 4096, 'L2activeBits': 40, 'w': 20, 'sample': 6, 'thresh': 3,
'l2Column': 1})
expParams.append(
{'L2cellCount': 6144, 'L2activeBits': 40, 'w': 20, 'sample': 6, 'thresh': 3,
'l2Column': 1})
for expParam in expParams:
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l2Params['cellCount'] = expParam['L2cellCount']
l2Params['sdrSize'] = expParam['L2activeBits']
numCorticalColumns = expParam['l2Column']
l4Params["columnCount"] = numL4MiniColumns
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {'numInputBits': numInputBits,
'externalInputSize': externalInputSize,
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l2Cells_{}_l2column_{}".format(
expParam['sample'], expParam['thresh'], expParam["L2cellCount"],
expParam['l2Column'])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expName,
cpuCount,
l2Params,
l4Params,
objectParams,
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle("Varying number of objects", fontsize="x-large")
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l2Cells_{}_l2column_{}".format(
expParam['sample'], expParam['thresh'], expParam["L2cellCount"],
expParam['l2Column'])
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L2 cells {}/{} #cc {} ".format(
expParam['L2activeBits'], expParam['L2cellCount'], expParam['l2Column']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_vs_L2size.pdf"
)
)
def runExperiment6(resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
varying size of L2
calculate capacity by varying number of objects with fixed size
"""
numPointsPerObject = 10
numRpts = 5
numInputBits = 10
externalInputSize = 2400
numL4MiniColumns = 150
l4Params = getL4Params()
l2Params = getL2Params()
expParams = []
expParams.append(
{'L2cellCount': 4096, 'L2activeBits': 10, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
expParams.append(
{'L2cellCount': 4096, 'L2activeBits': 20, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
expParams.append(
{'L2cellCount': 4096, 'L2activeBits': 40, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
expParams.append(
{'L2cellCount': 4096, 'L2activeBits': 80, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
for expParam in expParams:
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l2Params['cellCount'] = expParam['L2cellCount']
l2Params['sdrSize'] = expParam['L2activeBits']
l2Params['sampleSizeDistal'] = int(l2Params['sdrSize'] / 2)
l2Params['activationThresholdDistal'] = int(l2Params['sdrSize'] / 2) - 1
numCorticalColumns = expParam['l2Column']
l4Params["columnCount"] = numL4MiniColumns
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {'numInputBits': numInputBits,
'externalInputSize': externalInputSize,
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expName = "multiple_column_capacity_varying_object_sdrSize_{}_l2Cells_{}_l2column_{}".format(
expParam['L2activeBits'], expParam["L2cellCount"], expParam['l2Column'])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expName,
cpuCount,
l2Params,
l4Params,
objectParams,
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle("Varying number of objects", fontsize="x-large")
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expName = "multiple_column_capacity_varying_object_sdrSize_{}_l2Cells_{}_l2column_{}".format(
expParam['L2activeBits'], expParam["L2cellCount"], expParam['l2Column'])
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L2 cells {}/{} #cc {} ".format(
expParam['L2activeBits'], expParam['L2cellCount'], expParam['l2Column']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_vs_L2_sparsity.pdf"
)
)
def runExperiment7(numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
runCapacityTestVaryingObjectNum()
Try different numLocations
"""
numPointsPerObject = 10
numRpts = 1
l4Params = getL4Params()
l2Params = getL2Params()
l2Params['cellCount'] = 4096
l2Params['sdrSize'] = 40
expParams = [
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 500, 'numLocations': 16},
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 500, 'numLocations': 128},
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 500, 'numLocations': 1000},
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 500, 'numLocations': 5000},
]
for expParam in expParams:
l4Params["columnCount"] = expParam['l4Column']
numInputBits = expParam['w']
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {
'numInputBits': numInputBits,
'externalInputSize': expParam['externalInputSize'],
'numFeatures': expParam['numFeatures'],
'numLocations': expParam['numLocations'],
'uniquePairs': False
}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expname = "multiple_column_capacity_varying_object_num_locations_{}_num_features_{}_l4column_{}".format(
expParam['numLocations'], expParam['numFeatures'], expParam["l4Column"])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expname,
cpuCount,
l2Params,
l4Params,
objectParams,
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying number of objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large"
)
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expname = "multiple_column_capacity_varying_object_num_locations_{}_num_features_{}_l4column_{}".format(
expParam['numLocations'], expParam['numFeatures'], expParam["l4Column"])
resultFileName = _prepareResultsDir("{}.csv".format(expname),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L4 mcs {} locs {} feats {}".format(
expParam["l4Column"], expParam['numLocations'], expParam['numFeatures']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_num_locations_num_summary.pdf"
)
)
def runExperiment8(numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
runCapacityTestVaryingObjectNum()
Try different numFeatures
"""
numPointsPerObject = 10
numRpts = 1
l4Params = getL4Params()
l2Params = getL2Params()
l2Params['cellCount'] = 4096
l2Params['sdrSize'] = 40
expParams = [
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 15, 'numLocations': 128},
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 150, 'numLocations': 128},
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 500, 'numLocations': 128},
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 5000, 'numLocations': 128},
]
for expParam in expParams:
l4Params["columnCount"] = expParam['l4Column']
numInputBits = expParam['w']
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {
'numInputBits': numInputBits,
'externalInputSize': expParam['externalInputSize'],
'numFeatures': expParam['numFeatures'],
'numLocations': expParam['numLocations'],
'uniquePairs': False
}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expname = "multiple_column_capacity_varying_object_num_locations_{}_num_features_{}_l4column_{}".format(
expParam['numLocations'], expParam['numFeatures'], expParam["l4Column"])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expname,
cpuCount,
l2Params,
l4Params,
objectParams,
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying number of objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large"
)
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expname = "multiple_column_capacity_varying_object_num_locations_{}_num_features_{}_l4column_{}".format(
expParam['numLocations'], expParam['numFeatures'], expParam["l4Column"])
resultFileName = _prepareResultsDir("{}.csv".format(expname),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L4 mcs {} locs {} feats {}".format(
expParam["l4Column"], expParam['numLocations'], expParam['numFeatures']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_num_features_num_summary.pdf"
)
)
def runExperiment9(resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
runCapacityTestVaryingObjectNum()
varying number of cortical columns, 2d topology.
"""
numPointsPerObject = 10
numRpts = 3
objectNumRange = range(10, 1000, 50)
l4Params = getL4Params()
l2Params = getL2Params()
expParams = []
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 4, 'networkType': "MultipleL4L2Columns"})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 9, 'networkType': "MultipleL4L2Columns"})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 16, 'networkType': "MultipleL4L2Columns"})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 4, 'networkType': "MultipleL4L2ColumnsWithTopology"})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 9, 'networkType': "MultipleL4L2ColumnsWithTopology"})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 16, 'networkType': "MultipleL4L2ColumnsWithTopology"})
run_params = []
for object_num in reversed(objectNumRange):
for expParam in expParams:
for rpt in range(numRpts):
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l4Params["columnCount"] = expParam['l4Column']
numInputBits = expParam['w']
numCorticalColumns = expParam['l2Column']
networkType = expParam['networkType']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {'numInputBits': numInputBits,
'externalInputSize': expParam['externalInputSize'],
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
print "Experiment Params: "
pprint(expParam)
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2column_{}_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"],
expParam['l2Column'], expParam["networkType"])
try:
os.remove(_prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName))
except OSError:
pass
run_params.append((numPointsPerObject,
numCorticalColumns,
resultDirName,
object_num,
expName,
l2Params,
l4Params,
objectParams,
networkType,
rpt))
pool = multiprocessing.Pool(cpuCount or multiprocessing.cpu_count(), maxtasksperchild=1)
pool.map(invokeRunCapacityTestWrapper, run_params, chunksize = 1)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle("Varying number of objects", fontsize="x-large")
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
colormap = plt.get_cmap("jet")
colors = [colormap(x) for x in np.linspace(0., 1., len(expParam))]
legendEntries = []
for expParam in expParams:
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2column_{}_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"],
expParam['l2Column'], expParam["networkType"])
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, colors[ploti])
ploti += 1
if "Topology" in expParam["networkType"]:
legendEntries.append("L4 mcs {} #cc {} w/ topology".format(
expParam['l4Column'], expParam['l2Column']))
else:
legendEntries.append("L4 mcs {} #cc {}".format(
expParam['l4Column'], expParam['l2Column']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"multiple_column_capacity_varying_object_num_column_num_connection_type_summary.pdf"
)
)
def runExperiment10(numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=1):
"""
Try different L4 network size
"""
numPointsPerObject = 10
numRpts = 1
l4Params = getL4Params()
l2Params = getL2Params()
l2Params['cellCount'] = 4096
l2Params['sdrSize'] = 40
expParams = []
expParams.append(
{'l4Column': 100, 'externalInputSize': 2400, 'w': 10, 'sample': 5,
'L2cellCount': 2000, 'L2activeBits': 20, 'thresh': 4})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 15, 'sample': 8,
'L2cellCount': 3000, 'L2activeBits': 30, 'thresh': 6})
expParams.append(
{'l4Column': 200, 'externalInputSize': 2400, 'w': 20, 'sample': 10,
'L2cellCount': 4000, 'L2activeBits': 40, 'thresh': 8})
expParams.append(
{'l4Column': 250, 'externalInputSize': 2400, 'w': 25, 'sample': 13,
'L2cellCount': 5000, 'L2activeBits': 50, 'thresh': 10})
for expParam in expParams:
l4Params["columnCount"] = expParam['l4Column']
numInputBits = expParam['w']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l2Params['cellCount'] = expParam['L2cellCount']
l2Params['sdrSize'] = expParam['L2activeBits']
l2Params['sampleSizeDistal'] = int(expParam['L2cellCount']*.5)
l2Params['activationThresholdDistal'] = int(expParam['L2cellCount'] * .5)-1
objectParams = {'numInputBits': numInputBits,
'externalInputSize': expParam['externalInputSize'],
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expname = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2cell_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"], expParam['L2cellCount'])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expname,
cpuCount,
l2Params,
l4Params,
objectParams,
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying number of objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large"
)
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expname = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2cell_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"], expParam['L2cellCount'])
resultFileName = _prepareResultsDir("{}.csv".format(expname),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L4 mcs {} w {} s {} thresh {}".format(
expParam["l4Column"], expParam['w'], expParam['sample'],
expParam['thresh']))
ax[0, 0].legend(legendEntries, loc=4, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_num_l4l2size_summary.pdf"
)
)
def runExperiments(resultDirName, plotDirName, cpuCount):
# # Varying number of pts per objects, two objects
# runExperiment1(numCorticalColumns=1,
# resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
#
# # 10 pts per object, varying number of objects
# runExperiment2(numCorticalColumns=1,
# resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
#
# # 10 pts per object, varying number of objects, varying L4 size
# runExperiment3(numCorticalColumns=1,
# resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
#
# # 10 pts per object, varying number of objects and number of columns
# runExperiment4(resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
#
# # 10 pts per object, varying number of L2 cells
# runExperiment5(resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
#
# # 10 pts per object, varying sparsity of L2
# runExperiment6(resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
#
# # 10 pts per object, varying number of location SDRs
# runExperiment7(numCorticalColumns=1,
# resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
#
# # 10 pts per object, varying number of feature SDRs
# runExperiment8(numCorticalColumns=1,
# resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
# #10 pts per object, varying number of objects and number of columns
runExperiment9(resultDirName=resultDirName,
plotDirName=plotDirName,
cpuCount=cpuCount)
# 10 pts per object, varying number of objects, varying L4/L2 size
# runExperiment10(numCorticalColumns=1,
# resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--resultDirName",
default=DEFAULT_RESULT_DIR_NAME,
type=str,
metavar="DIRECTORY"
)
parser.add_argument(
"--plotDirName",
default=DEFAULT_PLOT_DIR_NAME,
type=str,
metavar="DIRECTORY"
)
parser.add_argument(
"--cpuCount",
default=None,
type=int,
metavar="NUM",
help="Limit number of cpu cores. Defaults to `multiprocessing.cpu_count()`"
)
opts = parser.parse_args()
runExperiments(resultDirName=opts.resultDirName,
plotDirName=opts.plotDirName,
cpuCount=opts.cpuCount)
| agpl-3.0 |
RachitKansal/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 83 | 17276 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LogisticRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
mikebenfield/scikit-learn | examples/linear_model/plot_robust_fit.py | 147 | 3050 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worse than OLS.
- The scores of HuberRegressor may not be compared directly to both TheilSen
and RANSAC because it does not attempt to completely filter the outliers
but lessen their effect.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn.linear_model import (
LinearRegression, TheilSenRegressor, RANSACRegressor, HuberRegressor)
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)),
('HuberRegressor', HuberRegressor())]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen', 'HuberRegressor': 'black'}
linestyle = {'OLS': '-', 'Theil-Sen': '-.', 'RANSAC': '--', 'HuberRegressor': '--'}
lw = 3
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling Errors Only', X, y),
('Corrupt X, Small Deviants', X_errors, y),
('Corrupt y, Small Deviants', X, y_errors),
('Corrupt X, Large Deviants', X_errors_large, y),
('Corrupt y, Large Deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'b+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot, color=colors[name], linestyle=linestyle[name],
linewidth=lw, label='%s: error = %.3f' % (name, mse))
legend_title = 'Error of Mean\nAbsolute Deviation\nto Non-corrupt Data'
legend = plt.legend(loc='upper right', frameon=False, title=legend_title,
prop=dict(size='x-small'))
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
tdegeus/GooseEYE | docs/examples/S2_mask.py | 1 | 5061 | r'''
Plot and/or check.
Usage:
script [options]
Options:
-s, --save Save output for later check.
-c, --check Check against earlier results.
-p, --plot Plot.
-h, --help Show this help.
'''
# <snippet>
import numpy as np
import GooseEYE
# generate image, extract 'volume-fraction' for plotting
I = GooseEYE.dummy_circles((500, 500))
phi = np.mean(I)
# 2-point probability
S2 = GooseEYE.S2((101, 101), I, I)
# define image with artefact and the corresponding mask
mask = np.zeros(I.shape, dtype='bool')
Ierr = np.array(I, copy=True)
mask[:150, :150] = 1
Ierr[:150, :150] = 1
# 2-point correlation on image with artefact (no mask)
S2err = GooseEYE.S2((101, 101), Ierr, Ierr)
# 2-point correlation on image with artefact, with artefact masked
S2mask = GooseEYE.S2((101, 101), Ierr, Ierr, fmask=mask, gmask=mask)
# </snippet>
if __name__ == '__main__':
import docopt
args = docopt.docopt(__doc__)
if args['--save']:
import h5py
with h5py.File('S2_mask.h5', 'w') as data:
data['I'] = I
data['Ierr'] = Ierr
data['mask'] = mask
data['S2'] = S2
data['S2err'] = S2err
data['S2mask'] = S2mask
if args['--check']:
import h5py
with h5py.File('S2_mask.h5', 'r') as data:
assert np.all(np.equal(data['I'][...], I))
assert np.all(np.equal(data['Ierr'][...], Ierr))
assert np.all(np.equal(data['mask'][...], mask))
assert np.allclose(data['S2'][...], S2)
assert np.allclose(data['S2err'][...], S2err)
assert np.allclose(data['S2mask'][...], S2mask)
if args['--plot']:
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
try:
plt.style.use(['goose', 'goose-latex'])
except:
pass
fig, axes = plt.subplots(figsize=(18, 12), nrows=2, ncols=3)
ax = axes[0, 0]
im = ax.imshow(I, clim=(0, 1), cmap=mpl.colors.ListedColormap(cm.gray([0, 255])))
ax.xaxis.set_ticks([0, 500])
ax.yaxis.set_ticks([0, 500])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title(r'$\mathcal{I}$')
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
cbar.set_ticks([0, 1])
ax = axes[0, 1]
im = ax.imshow(Ierr, clim=(0, 1), cmap=mpl.colors.ListedColormap(cm.gray([0, 255])))
ax.xaxis.set_ticks([0, 500])
ax.yaxis.set_ticks([0, 500])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title(r'$\mathcal{I}$ (with artifact)')
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
cbar.set_ticks([0, 1])
ax = axes[0, 2]
im = ax.imshow(mask, clim=(0, 1), cmap=mpl.colors.ListedColormap(cm.gray([0, 255])))
ax.xaxis.set_ticks([0, 500])
ax.yaxis.set_ticks([0, 500])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title(r'mask')
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
cbar.set_ticks([0, 1])
ax = axes[1, 0]
im = ax.imshow(S2, clim=(0, phi), cmap='jet', extent=(-50, 50, -50, 50))
ax.xaxis.set_ticks([-50, 0, +50])
ax.yaxis.set_ticks([-50, 0, +50])
ax.set_xlabel(r'$\Delta x$')
ax.set_ylabel(r'$\Delta y$')
ax.set_title(r'$S_2$')
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
cbar.set_ticks ([0 , phi])
cbar.set_ticklabels(['0', r'$\varphi$'])
ax = axes[1, 1]
im = ax.imshow(S2err, clim=(0, phi), cmap='jet', extent=(-50, 50, -50, 50))
ax.xaxis.set_ticks([-50, 0, +50])
ax.yaxis.set_ticks([-50, 0, +50])
ax.set_xlabel(r'$\Delta x$')
ax.set_ylabel(r'$\Delta y$')
ax.set_title(r'$S_2$ (with artifact)')
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
cbar.set_ticks ([0 , phi])
cbar.set_ticklabels(['0', r'$\varphi$'])
ax = axes[1, 2]
im = ax.imshow(S2mask, clim=(0, phi), cmap='jet', extent=(-50, 50, -50, 50))
ax.xaxis.set_ticks([-50, 0, +50])
ax.yaxis.set_ticks([-50, 0, +50])
ax.set_xlabel(r'$\Delta x$')
ax.set_ylabel(r'$\Delta y$')
ax.set_title(r'$S_2$ (masked artifact)')
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
cbar.set_ticks ([0 , phi])
cbar.set_ticklabels(['0', r'$\varphi$'])
plt.savefig('S2_mask.svg')
| gpl-3.0 |
ronghanghu/vqa-mcb | train/multi_att_2_glove_vgg/visualize_tools.py | 1 | 10530 | import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import json
import re
import shutil
from PIL import Image
from PIL import ImageFont, ImageDraw
import caffe
from caffe import layers as L
from caffe import params as P
from vqa_data_provider_layer import VQADataProvider
from vqa_data_provider_layer import VQADataProviderLayer
import config
sys.path.append(config.VQA_TOOLS_PATH)
sys.path.append(config.VQA_EVAL_TOOLS_PATH)
from vqaTools.vqa import VQA
from vqaEvaluation.vqaEval import VQAEval
def visualize_failures(stat_list,mode):
def save_qtype(qtype_list, save_filename, mode):
if mode == 'val':
savepath = os.path.join('./eval', save_filename)
# TODO
img_pre = '/home/dhpseth/vqa/02_tools/VQA/Images/val2014'
elif mode == 'test-dev':
savepath = os.path.join('./test-dev', save_filename)
# TODO
img_pre = '/home/dhpseth/vqa/02_tools/VQA/Images/test2015'
elif mode == 'test':
savepath = os.path.join('./test', save_filename)
# TODO
img_pre = '/home/dhpseth/vqa/02_tools/VQA/Images/test2015'
else:
raise Exception('Unsupported mode')
if os.path.exists(savepath): shutil.rmtree(savepath)
if not os.path.exists(savepath): os.makedirs(savepath)
for qt in qtype_list:
count = 0
for t_question in stat_list:
#print count, t_question
if count < 40/len(qtype_list):
t_question_list = t_question['q_list']
saveflag = False
#print 'debug****************************'
#print qt
#print t_question_list
#print t_question_list[0] == qt[0]
#print t_question_list[1] == qt[1]
if t_question_list[0] == qt[0] and t_question_list[1] == qt[1]:
saveflag = True
else:
saveflag = False
if saveflag == True:
t_iid = t_question['iid']
if mode == 'val':
t_img = Image.open(os.path.join(img_pre, \
'COCO_val2014_' + str(t_iid).zfill(12) + '.jpg'))
elif mode == 'test-dev' or 'test':
t_img = Image.open(os.path.join(img_pre, \
'COCO_test2015_' + str(t_iid).zfill(12) + '.jpg'))
# for caption
#print t_iid
#annIds = caps.getAnnIds(t_iid)
#anns = caps.loadAnns(annIds)
#cap_list = [ann['caption'] for ann in anns]
ans_list = t_question['ans_list']
draw = ImageDraw.Draw(t_img)
for i in range(len(ans_list)):
try:
draw.text((10,10*i), str(ans_list[i]))
except:
pass
ans = t_question['answer']
pred = t_question['pred']
if ans == -1:
pre = ''
elif ans == pred:
pre = 'correct '
else:
pre = 'failure '
#print ' aaa ', ans, pred
ans = re.sub( '/', ' ', str(ans))
pred = re.sub( '/', ' ', str(pred))
img_title = pre + str(' '.join(t_question_list)) + '. a_' + \
str(ans) + ' p_' + str(pred) + '.png'
count += 1
print((os.path.join(savepath,img_title)))
t_img.save(os.path.join(savepath,img_title))
print('saving whatis')
qt_color_list = [['what','color']]
save_qtype(qt_color_list, 'colors', mode)
print('saving whatis')
qt_whatis_list = [['what','is'],['what','kind'],['what','are']]
save_qtype(qt_whatis_list, 'whatis', mode)
print('saving is')
qt_is_list = [['is','the'], ['is','this'],['is','there']]
save_qtype(qt_is_list, 'is', mode)
print('saving how many')
qt_howmany_list =[['how','many']]
save_qtype(qt_howmany_list, 'howmany', mode)
def exec_validation(device_id, mode, it='', visualize=False):
caffe.set_device(device_id)
caffe.set_mode_gpu()
net = caffe.Net('./result/proto_test.prototxt',\
'./result/tmp.caffemodel',\
caffe.TEST)
dp = VQADataProvider(mode=mode,batchsize=config.VAL_BATCH_SIZE)
total_questions = len(dp.getQuesIds())
epoch = 0
pred_list = []
testloss_list = []
stat_list = []
while epoch == 0:
t_word, t_cont, t_img_feature, t_answer, t_glove_matrix, t_qid_list, t_iid_list, epoch = dp.get_batch_vec()
net.blobs['data'].data[...] = np.transpose(t_word,(1,0))
net.blobs['cont'].data[...] = np.transpose(t_cont,(1,0))
net.blobs['img_feature_raw'].data[...] = t_img_feature
net.blobs['label'].data[...] = t_answer
net.blobs['glove'].data[...] = np.transpose(t_glove_matrix, (1,0,2))
net.forward()
t_pred_list = net.blobs['prediction'].data.argmax(axis=1)
t_pred_str = [dp.vec_to_answer(pred_symbol) for pred_symbol in t_pred_list]
testloss_list.append(net.blobs['loss'].data)
for qid, iid, ans, pred in zip(t_qid_list, t_iid_list, t_answer.tolist(), t_pred_str):
pred_list.append({'answer':pred, 'question_id': int(dp.getStrippedQuesId(qid))})
if visualize:
q_list = dp.seq_to_list(dp.getQuesStr(qid))
if mode == 'test-dev' or 'test':
ans_str = ''
ans_list = ['']*10
else:
ans_str = dp.vec_to_answer(ans)
ans_list = [ dp.getAnsObj(qid)[i]['answer'] for i in range(10)]
stat_list.append({\
'qid' : qid,
'q_list' : q_list,
'iid' : iid,
'answer': ans_str,
'ans_list': ans_list,
'pred' : pred })
percent = 100 * float(len(pred_list)) / total_questions
sys.stdout.write('\r' + ('%.2f' % percent) + '%')
sys.stdout.flush()
mean_testloss = np.array(testloss_list).mean()
if mode == 'val':
valFile = './result/val2015_resfile'
with open(valFile, 'w') as f:
json.dump(pred_list, f)
if visualize:
visualize_failures(stat_list,mode)
annFile = config.DATA_PATHS['val']['ans_file']
quesFile = config.DATA_PATHS['val']['ques_file']
vqa = VQA(annFile, quesFile)
vqaRes = vqa.loadRes(valFile, quesFile)
vqaEval = VQAEval(vqa, vqaRes, n=2)
vqaEval.evaluate()
acc_overall = vqaEval.accuracy['overall']
acc_perQuestionType = vqaEval.accuracy['perQuestionType']
acc_perAnswerType = vqaEval.accuracy['perAnswerType']
return mean_testloss, acc_overall, acc_perQuestionType, acc_perAnswerType
elif mode == 'test-dev':
filename = './result/vqa_OpenEnded_mscoco_test-dev2015_v3t'+str(it).zfill(8)+'_results'
with open(filename+'.json', 'w') as f:
json.dump(pred_list, f)
if visualize:
visualize_failures(stat_list,mode)
elif mode == 'test':
filename = './result/vqa_OpenEnded_mscoco_test2015_v3c'+str(it).zfill(8)+'_results'
with open(filename+'.json', 'w') as f:
json.dump(pred_list, f)
if visualize:
visualize_failures(stat_list,mode)
def drawgraph(results, save_question_type_graphs=False):
# 0:it
# 1:trainloss
# 2:testloss
# 3:oa_acc
# 4:qt_acc
# 5:at_acc
# training curve
it = np.array([l[0] for l in results])
loss = np.array([l[1] for l in results])
valloss = np.array([l[2] for l in results])
valacc = np.array([l[3] for l in results])
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.plot(it,loss, color='blue', label='train loss')
ax1.plot(it,valloss, '--', color='blue', label='test loss')
ax2.plot(it,valacc, color='red', label='acc on val')
plt.legend(loc='lower left')
ax1.set_xlabel('Iterations')
ax1.set_ylabel('Loss Value')
ax2.set_ylabel('Accuracy on Val [%]')
plt.savefig('./learning_curve max_%2.2f.png'%valacc.max())
plt.clf()
plt.close("all")
# question type
it = np.array([l[0] for l in results])
oa_acc = np.array([l[3] for l in results])
qt_dic_list = [l[4] for l in results]
def draw_qt_acc(target_key_list, figname):
fig = plt.figure()
for k in target_key_list:
print((k,type(k)))
t_val = np.array([ qt_dic[k] for qt_dic in qt_dic_list])
plt.plot(it,t_val,label=str(k))
plt.legend(fontsize='small')
plt.ylim(0,100.)
#plt.legend(prop={'size':6})
plt.xlabel('Iterations')
plt.ylabel('Accuracy on Val [%]')
plt.savefig(figname,dpi=200)
plt.clf()
plt.close("all")
if save_question_type_graphs:
s_keys = sorted(qt_dic_list[0].keys())
draw_qt_acc(s_keys[ 0:13]+[s_keys[31],], './ind_qt_are.png')
draw_qt_acc(s_keys[13:17]+s_keys[49:], './ind_qt_how_where_who_why.png')
draw_qt_acc(s_keys[17:31]+[s_keys[32],], './ind_qt_is.png')
draw_qt_acc(s_keys[33:49], './ind_qt_what.png')
draw_qt_acc(['what color is the','what color are the','what color is',\
'what color','what is the color of the'],'./qt_color.png')
draw_qt_acc(['how many','how','how many people are',\
'how many people are in'],'./qt_number.png')
draw_qt_acc(['who is','why','why is the','where is the','where are the',\
'which'],'./qt_who_why_where_which.png')
draw_qt_acc(['what is the man','is the man','are they','is he',\
'is the woman','is this person','what is the woman','is the person',\
'what is the person'],'./qt_human.png')
| bsd-2-clause |
unicefuganda/uSurvey | survey/utils/query_helper.py | 1 | 2270 | import re
from cacheops import cached_as
import pandas as pd
from django.db import connection
from django.db.models import Q
def normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
''' Splits the query string in invidual keywords, getting rid of unecessary spaces
and grouping quoted words together.
Example:
>>> normalize_query(' some random words "with quotes " and spaces')
['some', 'random', 'words', 'with quotes', 'and', 'spaces']
see http://julienphalip.com/post/2825034077/adding-search-to-a-django-site-in-a-snap for more info
'''
return [normspace(' ', (t[0] or t[1]).strip())
for t in findterms(query_string)]
def _get_query(query_string, search_fields):
''' Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
see http://julienphalip.com/post/2825034077/adding-search-to-a-django-site-in-a-snap for more info
'''
query = None # Query to search for every search term
terms = normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
def get_filterset(objectset, query_string, search_fields):
'''Returns a filterset by applying the query string each search field
'''
if query_string:
query = _get_query(query_string, search_fields)
return objectset.filter(query).distinct()
return objectset.distinct()
def to_df(queryset, date_cols=[]):
@cached_as(queryset)
def _to_df(queryset, date_cols):
query, params = queryset.query.sql_with_params()
return pd.io.sql.read_sql_query(
query, connection, params=params, parse_dates=date_cols)
return _to_df(queryset, date_cols)
| bsd-3-clause |
songjs1993/DeepLearning | 3CNN/CNN_LeNet_5_cifar.py | 1 | 12165 | # Auther: Alan
"""
将LeNet5应用在Cifar数据集上
"""
import tensorflow as tf
import random
import os
import scipy.io as sio
import matplotlib.pyplot as plt # plt 用于显示图片
import matplotlib.image as mpimg # mpimg 用于读取图片
import numpy as np
# import Image
from PIL import Image
global max_row, max_col
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
# 这里完全可以用一个数组代替 tf.zeros(units[1])
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
# strides表示每一维度的步长
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding="SAME")
def max_pool_2x2(x):
# ksize表示池化窗口的大小, 其中最前面的1和最后的1分别表示batch和channel(这里不考虑对不同batch做池化,所以设置为1)
# 另外一个任务:判断两张图片是否为同一个人,觉得可以将其当做不同channel,一起进行池化的操作
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
def max_pool_3x3(x):
return tf.nn.max_pool(x, ksize=[1,3,3,1], strides=[1,3,3,1], padding="SAME")
def max_pool_5x5(x):
return tf.nn.max_pool(x, ksize=[1,5,5,1], strides=[1,5,5,1], padding="SAME")
def CNN_LeNet_5_Mnist(logs_path):
"""
LeNet对Mnist数据集进行测试
:return:
"""
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# print(mnist)
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
x_image = tf.reshape(x, [-1,28,28,1]) # 把向量重新整理成矩阵,最后一个表示通道个数
# 第一二参数值得卷积核尺寸大小,即patch,第三个参数是图像通道数,第四个参数是卷积核的数目,代表会出现多少个卷积特征
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64]) # 多通道卷积,卷积出64个特征
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
tf.summary.scalar("cross_entropy", cross_entropy)
correct_prediction = tf.equal(tf.arg_max(y_conv, 1), tf.arg_max(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
merged_summary_op = tf.summary.merge_all()
# 初始化变量
init_op = tf.global_variables_initializer()
# 开始训练
sess = tf.Session()
sess.run(init_op)
# iterate
# Xtrain, ytrain = get_batch(self.args, self.simrank, self.walks, minibatch * 100, self.tem_simrank) # 找一个大点的数据集测试效果
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
# for i in range((int)(20000)):
num_examples = 12800*2 #这里暂时手动设置吧
minibatch = 128
for epoch in range(20):
print("iter:", epoch)
avg_cost = 0.
total_batch = int(num_examples / minibatch)
# Loop over all batches
for i in range(total_batch):
batchs = mnist.train.next_batch(minibatch)
batch_xs, batch_ys = batchs[0], batchs[1]
# batch_xs, batch_ys = next_batch(self.args, self.simrank, self.walks, minibatch, self.tem_simrank,
# num_examples)
# and summary nodes
_, c, summary = sess.run([train_step, cross_entropy, merged_summary_op], feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})
# Write logs at every iteration
summary_writer.add_summary(summary, epoch * total_batch + i)
# Compute average loss
avg_cost += c / total_batch
if (i % 10 == 0):
print("i:", i, " current c:", c, " ave_cost:", avg_cost)
# Display logs per epoch step
# if (epoch + 1) % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
# 到达一定程度进行测试test输出
if epoch%1==0:
batchs = mnist.train.next_batch(minibatch)
print("test accuracy %g" % sess.run(accuracy, feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
# x: batchs[0], y_: batchs[1], keep_prob: 1.0}))
def get_one_hot(label, num):
y = []
for i in range(num): # 一共17个类别
if i == label:
y.append(1.0)
else:
y.append(0.0)
return y
from scipy.io import loadmat
def read_data_cifar(train_file, test_file):
"""
获取train/val/test数据集
:param input_path:
:param split_path:
:return:
"""
f1 = loadmat(train_file)
f2 = loadmat(test_file)
train_x = f1["data"]
train_y_ = f1["fine_labels"]
test_x = f2["data"]
test_y_ = f2["fine_labels"]
# 需要处理labels
train_y = []
for train in train_y_:
y = []
for i in range(100):
if i == int(train)-1:
y.append(1.0)
else:
y.append(0.0)
train_y.append(y)
test_y = []
for test in test_y_:
y = []
for i in range(100):
if i == int(test) - 1:
y.append(1.0)
else:
y.append(0.0)
test_y.append(y)
train_y = np.array(train_y)
test_y = np.array(test_y)
print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)
return train_x/255.0, train_y, test_x/255.0, test_y
def CNN_LeNet_5(train_file, test_file, log_path):
trainX, trainY, testX, testY = read_data_cifar(train_file, test_file)
print("trainX.shape: ", trainX.shape, trainY.shape, testX.shape, testY.shape)
# 构建网络
x = tf.placeholder(tf.float32, [None, 1024*3])
y_ = tf.placeholder(tf.float32, [None, 100])
x_image = tf.reshape(x, [-1,32,32,3]) # 把向量重新整理成矩阵,最后一个表示通道个数
# 第一二参数值得卷积核尺寸大小,即patch,第三个参数是图像通道数,第四个参数是卷积核的数目,代表会出现多少个卷积特征
W_conv1 = weight_variable([5, 5, 3, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64]) # 多通道卷积,卷积出64个特征
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([8*8*64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 100])
b_fc2 = bias_variable([100])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv+1e-10), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# tf.summary.scalar("cross_entropy", cross_entropy)
correct_prediction = tf.equal(tf.arg_max(y_conv, 1), tf.arg_max(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
merged_summary_op = tf.summary.merge_all()
# 初始化变量
init_op = tf.global_variables_initializer()
# summary_writer = tf.summary.FileWriter(log_path, graph=tf.get_default_graph())
# 开始训练
drops = [1.0, 0.8, 0.6, 0.4, 0.2]
for i in range(len(drops)):
drop = drops[i]
log_path = log_path + str(i)
print("log_path: ", log_path, " drop:", drop)
sess = tf.Session()
sess.run(init_op)
# iterate
# Xtrain, ytrain = get_batch(self.args, self.simrank, self.walks, minibatch * 100, self.tem_simrank) # 找一个大点的数据集测试效果
# for i in range((int)(20000)):
num_examples = trainX.shape[0]
minibatch = 128
maxc = -1.0
for epoch in range(100):
print("iter:", epoch)
avg_cost = 0.
total_batch = int(num_examples / minibatch)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = next_batch(trainX, trainY, minibatch, num_examples)
# print(type(batch_xs),type(batch_ys))
# print(batch_xs.shape, batch_ys.shape)
# print(batch_xs[0])
# and summary nodes
# print(sess.run(h_pool4, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.8}))
# print(sess.run(y_conv, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.8}))
# print(sess.run(cross_entropy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.8}))
# return
# _, c, summary = sess.run([train_step, cross_entropy, merged_summary_op],feed_dict={x: batch_xs, y_: batch_ys, keep_prob: drop})
_, c = sess.run([train_step, cross_entropy],
feed_dict={x: batch_xs, y_: batch_ys, keep_prob: drop})
# Write logs at every iteration
# summary_writer.add_summary(summary, epoch * total_batch + i)
# Compute average loss
avg_cost += c / total_batch
if (i % 50 == 0):
print("i:", i, " current c:", c, " ave_cost:", avg_cost)
if i % 500 == 0:
# batchs = mnist.train.next_batch(minibatch)
print("test accuracy %g" % sess.run(accuracy, feed_dict={
x: testX, y_: testY, keep_prob: 1.0}))
# Display logs per epoch step
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
# 到达一定程度进行测试test输出
if epoch % 1 == 0:
# batchs = mnist.train.next_batch(minibatch)
acc = sess.run(accuracy, feed_dict={x: testX, y_: testY, keep_prob: 1.0})
if acc > maxc:
maxc = acc
print("test accuracy %g" % sess.run(accuracy, feed_dict={
x: testX, y_: testY, keep_prob: 1.0}))
# x: batchs[0], y_: batchs[1], keep_prob: 1.0}))
print("====================================================================")
sess.close()
print("max acc: ", maxc)
print("finish!")
print("finish all!")
def next_batch(trainX, trainY, minibatch, num_examples):
locations = random.sample([i for i in range(num_examples)], minibatch)
batch_xs = trainX[locations]
batch_ys = trainY[locations]
return batch_xs, batch_ys
if __name__ =="__main__":
# 测试LeNet_5在minist数据集上效果
# CNN_LeNet_5_Mnist("./CNN/minist")
# LeNet应用Cifar数据集
CNN_LeNet_5("./cifar_data/train.mat","./cifar_data/test.mat","./CNN/cifar")
# 尝试对LeNet网络加深结构,到5层卷积,尝试效果,这里使用默认的dropout比例0.4
| apache-2.0 |
MatthieuBizien/scikit-learn | sklearn/utils/validation.py | 15 | 25983 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
from .deprecation import deprecated
from ..exceptions import DataConversionWarning as _DataConversionWarning
from ..exceptions import NonBLASDotWarning as _NonBLASDotWarning
from ..exceptions import NotFittedError as _NotFittedError
@deprecated("DataConversionWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class DataConversionWarning(_DataConversionWarning):
pass
@deprecated("NonBLASDotWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class NonBLASDotWarning(_NonBLASDotWarning):
pass
@deprecated("NotFittedError has been moved into the sklearn.exceptions module."
" It will not be available here from version 0.19")
class NotFittedError(_NotFittedError):
pass
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', _NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2D numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
if ensure_min_samples >= 2:
raise ValueError("%s expects at least 2 samples provided "
"in a 2 dimensional array-like input"
% estimator_name)
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, _DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
_DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
# FIXME NotFittedError_ --> NotFittedError in 0.19
raise _NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
klocey/ScalingMicroBiodiversity | fig-scripts/AppFigs/Fig1_Variants/Fig1.py | 2 | 12357 | from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import random
import scipy as sc
from scipy import stats
import os
import sys
from scipy.stats.distributions import t
import statsmodels.stats.api as sms
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import summary_table
import itertools as it
import pandas as pd
from math import log10
import linecache
mydir = os.path.expanduser("~/GitHub/MicrobialScaling/")
mydir2 = os.path.expanduser("~/")
sys.path.append(mydir2 + "GitHub/DiversityTools/metrics")
import metrics as mets
def Fig1():
#tail = '-SADMetricData.txt'
tail = '-SADMetricData_NoMicrobe1s.txt'
datasets = []
#GoodNames = ['MGRAST', 'HMP', 'EMPopen', 'BBS', 'CBC', 'MCDB', 'GENTRY', 'FIA']
#GoodNames = ['MGRAST', 'HMP', 'EMPclosed', 'BBS', 'CBC', 'MCDB', 'GENTRY', 'FIA']
GoodNames = ['BIGN', 'SED', 'BOVINE','CHU', 'LAUB', 'CHINA', 'CATLIN', 'FUNGI', 'HUMAN', 'HYDRO', 'HMP', 'EMPopen', 'BBS', 'CBC', 'MCDB', 'GENTRY', 'FIA']
#GoodNames = ['BCLS', 'CHINA', 'CATLIN', 'HUMAN', 'FUNGI', 'HYDRO', 'EMPopen', 'HMP', 'BBS', 'CBC', 'MCDB', 'GENTRY', 'FIA']
#GoodNames = ['EMPclosed', 'HMP', 'BBS', 'CBC', 'GENTRY', 'FIA']
for name in os.listdir(mydir +'data/micro'):
if name in GoodNames: pass
else: continue
path = mydir+'data/micro/'+name+'/'+name+tail
num_lines = sum(1 for line in open(path))
datasets.append([name, 'micro', num_lines])
print name, num_lines
for name in os.listdir(mydir +'data/macro'):
if name in GoodNames: pass
else: continue
path = mydir+'data/macro/'+name+'/'+name+tail
num_lines = sum(1 for line in open(path))
datasets.append([name, 'macro', num_lines])
print name, num_lines
metrics = ['Rarity, '+r'$log_{10}$',
'Dominance, '+r'$log_{10}$',
'Evenness, ' +r'$log_{10}$',
'Richness, ' +r'$log_{10}$']
fig = plt.figure()
for index, i in enumerate(metrics):
metric = i
fig.add_subplot(2, 2, index+1)
fs = 10 # font size used across figures
MicIntList, MicCoefList, MacIntList, MacCoefList, R2List, metlist = [[], [], [], [], [], []]
Nlist, Slist, Evarlist, ESimplist, klist, radDATA, BPlist, NmaxList, rareSkews, KindList, StdList = [[], [], [], [], [], [], [], [], [], [], []]
#name, kind, N, S, Evar, ESimp, EQ, O, ENee, EPielou, EHeip, BP, SimpDom, Nmax, McN, skew, logskew, chao1, ace, jknife1, jknife2, margalef, menhinick, preston_a, preston_S = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
its = 1
for n in range(its):
#name, kind, N, S, Evar, ESimp, EQ, O, ENee, EPielou, EHeip, BP, SimpDom, Nmax, McN, skew, logskew, chao1, ace, jknife1, jknife2, margalef, menhinick, preston_a, preston_S = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
Nlist, Slist, Evarlist, ESimplist, klist, radDATA, BPlist, NmaxList, rareSkews, KindList, StdList = [[], [], [], [], [], [], [], [], [], [], []]
numMac = 0
numMic = 0
radDATA = []
for dataset in datasets:
name, kind, numlines = dataset
lines = []
lines = np.random.choice(range(1, numlines+1), numlines, replace=False)
"""
if name == 'EMPclosed' or name == 'EMPopen' or name == 'HMP':
lines = np.random.choice(range(1, numlines+1), 100, replace=True)
elif name == 'HUMAN':
lines = np.random.choice(range(1, numlines+1), 10, replace=True)
elif kind == 'micro':
lines = np.random.choice(range(1, numlines+1), 70, replace=True)
elif name == 'GENTRY' or name == 'MCDB':
lines = np.random.choice(range(1, numlines+1), 40, replace=True)
elif kind == 'macro':
lines = np.random.choice(range(1, numlines+1), 140, replace=True)
"""
path = mydir+'data/'+kind+'/'+name+'/'+name+tail
for line in lines:
data = linecache.getline(path, line)
radDATA.append(data)
for data in radDATA:
data = data.split()
#print data
#sys.exit()
name, kind, N, S, Var, Evar, ESimp, EQ, O, ENee, EPielou, EHeip, BP, SimpDom, Nmax, McN, skew, logskew, chao1, ace, jknife1, jknife2, margalef, menhinick, preston_a, preston_S = data
N = float(N)
S = float(S)
#S = float(chao1)
if S < 10 or N < 11: continue
Nlist.append(float(np.log10(N)))
Slist.append(float(np.log10(S)))
ESimplist.append(float(np.log10(float(ESimp))))
KindList.append(kind)
BPlist.append(float(BP))
NmaxList.append(float(np.log10(float(Nmax))))
# log-modulo transformation of skewnness
lms = np.log10(np.abs(float(skew)) + 1)
if skew < 0: lms = lms * -1
rareSkews.append(float(lms))
if kind == 'micro':
numMic += 1
klist.append('b')
if kind == 'macro':
klist.append('r')
numMac += 1
if index == 0: metlist = list(rareSkews)
elif index == 1: metlist = list(NmaxList)
elif index == 2: metlist = list(ESimplist)
elif index == 3: metlist = list(Slist)
# Multiple regression
d = pd.DataFrame({'N': list(Nlist)})
d['y'] = list(metlist)
d['Kind'] = list(KindList)
f = smf.ols('y ~ N * Kind', d).fit()
MacIntList.append(f.params[0])
MacCoefList.append(f.params[2])
if f.pvalues[1] < 0.05:
MicIntList.append(f.params[1] + f.params[0])
else:
MicIntList.append(f.params[0])
if f.pvalues[3] < 0.05:
MicCoefList.append(f.params[3] + f.params[2])
else:
MicCoefList.append(f.params[2])
R2List.append(f.rsquared)
MacPIx, MacFitted, MicPIx, MicFitted = [[],[],[],[]]
macCiH, macCiL, micCiH, micCiL = [[],[],[],[]]
MacListX = []
MacListY = []
MicListX = []
MicListY = []
for j, k in enumerate(KindList):
if k == 'micro':
MicListX.append(Nlist[j])
MicListY.append(metlist[j])
elif k == 'macro':
MacListX.append(Nlist[j])
MacListY.append(metlist[j])
print metric
lm = smf.ols('y ~ N * Kind', d).fit()
#print lm.summary()
#print '\n'
st, data, ss2 = summary_table(lm, alpha=0.05)
# ss2: Obs, Dep Var Population, Predicted Value, Std Error Mean Predict,
# Mean ci 95% low, Mean ci 95% upp, Predict ci 95% low, Predict ci 95% upp,
# Residual, Std Error Residual, Student Residual, Cook's D
fittedvalues = data[:,2]
predict_mean_se = data[:,3]
predict_mean_ci_low, predict_mean_ci_upp = data[:,4:6].T
predict_ci_low, predict_ci_upp = data[:,6:8].T
for j, kval in enumerate(KindList):
if kval == 'macro':
macCiH.append(predict_mean_ci_upp[j])
macCiL.append(predict_mean_ci_low[j])
MacPIx.append(Nlist[j])
MacFitted.append(f.fittedvalues[j])
elif kval == 'micro':
micCiH.append(predict_mean_ci_upp[j])
micCiL.append(predict_mean_ci_low[j])
MicPIx.append(Nlist[j])
MicFitted.append(f.fittedvalues[j])
MicPIx, MicFitted, micCiH, micCiL = zip(*sorted(zip(MicPIx, MicFitted, micCiH, micCiL)))
MacPIx, MacFitted, macCiH, macCiL = zip(*sorted(zip(MacPIx, MacFitted, macCiH, macCiL)))
num = min(len(MacListX), len(MicListX))
for i in range(num):
plt.scatter(MacListX[i], MacListY[i], color = 'LightCoral', alpha= 1 , s = 4, linewidths=0.5, edgecolor='Crimson')
plt.scatter(MicListX[i], MicListY[i], color = 'SkyBlue', alpha= 1 , s = 4, linewidths=0.5, edgecolor='Steelblue')
plt.fill_between(MacPIx, macCiL, macCiH, color='r', lw=0.0, alpha=0.3)
plt.plot(MacPIx, MacFitted, color='r', ls='--', lw=0.5, alpha=0.8)
plt.fill_between(MicPIx, micCiL, micCiH, color='b', lw=0.0, alpha=0.3)
plt.plot(MicPIx, MicFitted, color='b', ls='--', lw=0.5, alpha=0.8)
MicInt = round(np.mean(MicIntList), 2)
MicCoef = round(np.mean(MicCoefList), 2)
MacInt = round(np.mean(MacIntList), 2)
MacCoef = round(np.mean(MacCoefList), 2)
R2 = round(np.mean(R2List), 2)
if index == 0:
plt.ylim(-0.1, 2.0)
plt.xlim(1, 7)
plt.text(1.35, 1.7, r'$micro$'+ ' = '+str(round(MicInt,2))+'*'+r'$N$'+'$^{'+str(round(MicCoef,2))+'}$', fontsize=fs, color='Steelblue')
plt.text(1.35, 1.5, r'$macro$'+ ' = '+str(round(MacInt,2))+'*'+r'$N$'+'$^{'+str(round(MacCoef,2))+'}$', fontsize=fs, color='Crimson')
plt.text(1.35, 1.2, r'$R^2$' + '=' +str(R2), fontsize=fs-1, color='k')
plt.scatter([0],[-1], color = 'SkyBlue', alpha = 1, s=10, linewidths=0.9, edgecolor='Steelblue', label= 'microbes (n='+str(len(MicListY))+')')
plt.scatter([0],[-1], color = 'LightCoral',alpha= 1, s=10, linewidths=0.9, edgecolor='Crimson', label= 'macrobes (n='+str(len(MacListY))+')')
plt.legend(bbox_to_anchor=(-0.04, 1.1, 2.48, .2), loc=10, ncol=2, mode="expand",prop={'size':fs+2})
elif index == 1:
plt.plot([0,7],[0,7], ls = '--', lw=1, c='0.7')
#ax.text(18, 21, '1:1 line', fontsize=fs*1.0, rotation=40, color='0.7')
plt.ylim(0, 6)
plt.xlim(1, 7)
plt.text(1.35, 5.1, r'$micro$'+ ' = '+str(round(MicInt,2))+'*'+r'$N$'+'$^{'+str(round(MicCoef,2))+'}$', fontsize=fs, color='Steelblue')
plt.text(1.35, 4.5, r'$macro$'+ ' = '+str(round(MacInt,2))+'*'+r'$N$'+'$^{'+str(round(MacCoef,2))+'}$', fontsize=fs, color='Crimson')
plt.text(1.35, 3.75, r'$R^2$' + '=' +str(R2), fontsize=fs-1, color='k')
elif index == 2:
plt.ylim(-3.0, 0.0)
plt.xlim(0, 7)
plt.text(0.35, -2.8, r'$micro$'+ ' = '+str(round(MicInt,2))+'*'+r'$N$'+'$^{'+str(round(MicCoef,2))+'}$', fontsize=fs, color='Steelblue')
plt.text(0.35, -2.5, r'$macro$'+ ' = '+str(round(MacInt,2))+'*'+r'$N$'+'$^{'+str(round(MacCoef,2))+'}$', fontsize=fs, color='Crimson')
plt.text(0.35, -2.2, r'$R^2$' + '=' +str(R2), fontsize=fs-1, color='k')
elif index == 3:
plt.ylim(0.9, 5.0)
plt.xlim(1, 7)
plt.text(1.35, 3.9, r'$micro$'+ ' = '+str(round(MicInt,2))+'*'+r'$N$'+'$^{'+str(round(MicCoef,2))+'}$', fontsize=fs, color='Steelblue')
plt.text(1.35, 3.5, r'$macro$'+ ' = '+str(round(MacInt,2))+'*'+r'$N$'+'$^{'+str(round(MacCoef,2))+'}$', fontsize=fs, color='Crimson')
plt.text(1.35, 3.0, r'$R^2$' + '=' +str(R2), fontsize=fs-1, color='k')
plt.xlabel('Number of reads or individuals, '+ '$log$'+r'$_{10}$', fontsize=fs)
plt.ylabel(metric, fontsize=fs)
plt.tick_params(axis='both', which='major', labelsize=fs-3)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.savefig(mydir+'/figs/Fig1/Locey_Lennon_2015_Fig1-OpenReference_NoSingletons.png', dpi=600, bbox_inches = "tight")
#plt.savefig(mydir+'/figs/Fig1/Locey_Lennon_2015_Fig1-ClosedReference_NoSingletons.png', dpi=600, bbox_inches = "tight")
#plt.savefig(mydir+'/figs/Fig1/Locey_Lennon_2015_Fig1-OpenReference.png', dpi=600, bbox_inches = "tight")
#plt.savefig(mydir+'/figs/Fig1/Locey_Lennon_2015_Fig1-ClosedReference.png', dpi=600, bbox_inches = "tight")
#plt.show()
#plt.close()
return
Fig1()
| gpl-3.0 |
quheng/scikit-learn | sklearn/datasets/svmlight_format.py | 79 | 15976 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_labels]
Target values. Class labels must be an integer or float, or array-like
objects of integer or float for multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
strint/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 50 | 2613 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
bthirion/nipy | examples/fiac/fiac_util.py | 5 | 13413 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Support utilities for FIAC example, mostly path management.
The purpose of separating these is to keep the main example code as readable as
possible and focused on the experimental modeling and analysis, rather than on
local file management issues.
Requires matplotlib
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function # Python 2/3 compatibility
# Stdlib
import os
from os import makedirs, listdir
from os.path import exists, abspath, isdir, join as pjoin, splitext
import csv
try:
from StringIO import StringIO # Python 2
except ImportError:
from io import StringIO # Python 3
# Third party
import numpy as np
from matplotlib.mlab import csv2rec, rec2csv
# From NIPY
from nipy.io.api import load_image
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# We assume that there is a directory holding the data and it's local to this
# code. Users can either keep a copy here or a symlink to the real location on
# disk of the data.
DATADIR = 'fiac_data'
# Sanity check
if not os.path.isdir(DATADIR):
e="The data directory %s must exist and contain the FIAC data." % DATADIR
raise IOError(e)
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
# Path management utilities
def load_image_fiac(*path):
"""Return a NIPY image from a set of path components.
"""
return load_image(pjoin(DATADIR, *path))
def subj_des_con_dirs(design, contrast, nsub=16):
"""Return a list of subject directories with this `design` and `contrast`
Parameters
----------
design : {'event', 'block'}
contrast : str
nsub : int, optional
total number of subjects
Returns
-------
con_dirs : list
list of directories matching `design` and `contrast`
"""
rootdir = DATADIR
con_dirs = []
for s in range(nsub):
f = pjoin(rootdir, "fiac_%02d" % s, design, "fixed", contrast)
if isdir(f):
con_dirs.append(f)
return con_dirs
def path_info_run(subj, run):
"""Construct path information dict for current subject/run.
Parameters
----------
subj : int
subject number (0..15 inclusive)
run : int
run number (1..4 inclusive).
Returns
-------
path_dict : dict
a dict with all the necessary path-related keys, including 'rootdir',
and 'design', where 'design' can have values 'event' or 'block'
depending on which type of run this was for subject no `subj` and run no
`run`
"""
path_dict = {'subj': subj, 'run': run}
if exists(pjoin(DATADIR, "fiac_%(subj)02d",
"block", "initial_%(run)02d.csv") % path_dict):
path_dict['design'] = 'block'
else:
path_dict['design'] = 'event'
rootdir = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s") % path_dict
path_dict['rootdir'] = rootdir
return path_dict
def path_info_design(subj, design):
"""Construct path information dict for subject and design.
Parameters
----------
subj : int
subject number (0..15 inclusive)
design : {'event', 'block'}
type of design
Returns
-------
path_dict : dict
having keys 'rootdir', 'subj', 'design'
"""
path_dict = {'subj': subj, 'design': design}
rootdir = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s") % path_dict
path_dict['rootdir'] = rootdir
return path_dict
def results_table(path_dict):
""" Return precalculated results images for subject info in `path_dict`
Parameters
----------
path_dict : dict
containing key 'rootdir'
Returns
-------
rtab : dict
dict with keys given by run directories for this subject, values being a
list with filenames of effect and sd images.
"""
# Which runs correspond to this design type?
rootdir = path_dict['rootdir']
runs = filter(lambda f: isdir(pjoin(rootdir, f)),
['results_%02d' % i for i in range(1,5)] )
# Find out which contrasts have t-statistics,
# storing the filenames for reading below
results = {}
for rundir in runs:
rundir = pjoin(rootdir, rundir)
for condir in listdir(rundir):
for stat in ['sd', 'effect']:
fname_effect = abspath(pjoin(rundir, condir, 'effect.nii'))
fname_sd = abspath(pjoin(rundir, condir, 'sd.nii'))
if exists(fname_effect) and exists(fname_sd):
results.setdefault(condir, []).append([fname_effect,
fname_sd])
return results
def get_experiment_initial(path_dict):
"""Get the record arrays for the experimental/initial designs.
Parameters
----------
path_dict : dict
containing key 'rootdir', 'run', 'subj'
Returns
-------
experiment, initial : Two record arrays.
"""
# The following two lines read in the .csv files
# and return recarrays, with fields
# experiment: ['time', 'sentence', 'speaker']
# initial: ['time', 'initial']
rootdir = path_dict['rootdir']
if not exists(pjoin(rootdir, "experiment_%(run)02d.csv") % path_dict):
e = "can't find design for subject=%(subj)d,run=%(subj)d" % path_dict
raise IOError(e)
experiment = csv2rec(pjoin(rootdir, "experiment_%(run)02d.csv") % path_dict)
initial = csv2rec(pjoin(rootdir, "initial_%(run)02d.csv") % path_dict)
return experiment, initial
def get_fmri(path_dict):
"""Get the images for a given subject/run.
Parameters
----------
path_dict : dict
containing key 'rootdir', 'run'
Returns
-------
fmri : ndarray
anat : NIPY image
"""
fmri_im = load_image(
pjoin("%(rootdir)s/swafunctional_%(run)02d.nii") % path_dict)
return fmri_im
def ensure_dir(*path):
"""Ensure a directory exists, making it if necessary.
Returns the full path."""
dirpath = pjoin(*path)
if not isdir(dirpath):
makedirs(dirpath)
return dirpath
def output_dir(path_dict, tcons, fcons):
"""Get (and make if necessary) directory to write output into.
Parameters
----------
path_dict : dict
containing key 'rootdir', 'run'
tcons : sequence of str
t contrasts
fcons : sequence of str
F contrasts
"""
rootdir = path_dict['rootdir']
odir = pjoin(rootdir, "results_%(run)02d" % path_dict)
ensure_dir(odir)
for n in tcons:
ensure_dir(odir,n)
for n in fcons:
ensure_dir(odir,n)
return odir
def test_sanity():
from nipy.modalities.fmri import design, hrf
import nipy.modalities.fmri.fmristat.hrf as fshrf
from nipy.modalities.fmri.fmristat.tests import FIACdesigns
from nipy.modalities.fmri.fmristat.tests.test_FIAC import matchcol
from nipy.algorithms.statistics import formula
from nose.tools import assert_true
"""
Single subject fitting of FIAC model
"""
# Based on file
# subj3_evt_fonc1.txt
# subj3_bloc_fonc3.txt
for subj, run, design_type in [(3, 1, 'event'), (3, 3, 'block')]:
nvol = 191
TR = 2.5
Tstart = 1.25
volume_times = np.arange(nvol)*TR + Tstart
volume_times_rec = formula.make_recarray(volume_times, 't')
path_dict = {'subj':subj, 'run':run}
if exists(pjoin(DATADIR, "fiac_%(subj)02d",
"block", "initial_%(run)02d.csv") % path_dict):
path_dict['design'] = 'block'
else:
path_dict['design'] = 'event'
experiment = csv2rec(pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "experiment_%(run)02d.csv")
% path_dict)
initial = csv2rec(pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "initial_%(run)02d.csv")
% path_dict)
X_exper, cons_exper = design.event_design(experiment,
volume_times_rec,
hrfs=fshrf.spectral)
X_initial, _ = design.event_design(initial,
volume_times_rec,
hrfs=[hrf.glover])
X, cons = design.stack_designs((X_exper, cons_exper), (X_initial, {}))
# Get original fmristat design
Xf = FIACdesigns.fmristat[design_type]
# Check our new design can be closely matched to the original
for i in range(X.shape[1]):
# Columns can be very well correlated negatively or positively
assert_true(abs(matchcol(X[:,i], Xf)[1]) > 0.999)
def rewrite_spec(subj, run, root = "/home/jtaylo/FIAC-HBM2009"):
"""
Take a FIAC specification file and get two specifications
(experiment, begin).
This creates two new .csv files, one for the experimental
conditions, the other for the "initial" confounding trials that
are to be modelled out.
For the block design, the "initial" trials are the first
trials of each block. For the event designs, the
"initial" trials are made up of just the first trial.
"""
if exists(pjoin("%(root)s", "fiac%(subj)d", "subj%(subj)d_evt_fonc%(run)d.txt") % {'root':root, 'subj':subj, 'run':run}):
designtype = 'evt'
else:
designtype = 'bloc'
# Fix the format of the specification so it is
# more in the form of a 2-way ANOVA
eventdict = {1:'SSt_SSp', 2:'SSt_DSp', 3:'DSt_SSp', 4:'DSt_DSp'}
s = StringIO()
w = csv.writer(s)
w.writerow(['time', 'sentence', 'speaker'])
specfile = pjoin("%(root)s", "fiac%(subj)d", "subj%(subj)d_%(design)s_fonc%(run)d.txt") % {'root':root, 'subj':subj, 'run':run, 'design':designtype}
d = np.loadtxt(specfile)
for row in d:
w.writerow([row[0]] + eventdict[row[1]].split('_'))
s.seek(0)
d = csv2rec(s)
# Now, take care of the 'begin' event
# This is due to the FIAC design
if designtype == 'evt':
b = np.array([(d[0]['time'], 1)], np.dtype([('time', np.float),
('initial', np.int)]))
d = d[1:]
else:
k = np.equal(np.arange(d.shape[0]) % 6, 0)
b = np.array([(tt, 1) for tt in d[k]['time']], np.dtype([('time', np.float),
('initial', np.int)]))
d = d[~k]
designtype = {'bloc':'block', 'evt':'event'}[designtype]
fname = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "experiment_%(run)02d.csv") % {'root':root, 'subj':subj, 'run':run, 'design':designtype}
rec2csv(d, fname)
experiment = csv2rec(fname)
fname = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "initial_%(run)02d.csv") % {'root':root, 'subj':subj, 'run':run, 'design':designtype}
rec2csv(b, fname)
initial = csv2rec(fname)
return d, b
def compare_results(subj, run, other_root, mask_fname):
""" Find and compare calculated results images from a previous run
This scipt checks that another directory containing results of this same
analysis are similar in the sense of numpy ``allclose`` within a brain mask.
Parameters
----------
subj : int
subject number (0..4, 6..15)
run : int
run number (1..4)
other_root : str
path to previous run estimation
mask_fname:
path to a mask image defining area in which to compare differences
"""
# Get information for this subject and run
path_dict = path_info_run(subj, run)
# Get mask
msk = load_image(mask_fname).get_data().copy().astype(bool)
# Get results directories for this run
rootdir = path_dict['rootdir']
res_dir = pjoin(rootdir, 'results_%02d' % run)
if not isdir(res_dir):
return
for dirpath, dirnames, filenames in os.walk(res_dir):
for fname in filenames:
froot, ext = splitext(fname)
if froot in ('effect', 'sd', 'F', 't'):
this_fname = pjoin(dirpath, fname)
other_fname = this_fname.replace(DATADIR, other_root)
if not exists(other_fname):
print(this_fname, 'present but ', other_fname, 'missing')
continue
this_arr = load_image(this_fname).get_data()
other_arr = load_image(other_fname).get_data()
ok = np.allclose(this_arr[msk], other_arr[msk])
if not ok and froot in ('effect', 'sd', 't'): # Maybe a sign flip
ok = np.allclose(this_arr[msk], -other_arr[msk])
if not ok:
print('Difference between', this_fname, other_fname)
def compare_all(other_root, mask_fname):
""" Run results comparison for all subjects and runs """
for subj in range(5) + range(6, 16):
for run in range(1, 5):
compare_results(subj, run, other_root, mask_fname)
| bsd-3-clause |
SMTG-UCL/galore | docs/source/figures/pe_schematic.py | 1 | 2106 | #! /usr/bin/env python3
import matplotlib.pyplot as plt
import galore
import galore.formats
import galore.plot
fig = plt.figure(figsize=(6.2, 2))
xlim = (-6, 2)
new_ylim = (0, 6e-6)
final_ylim = (0, 1.5e-4)
weighting = 'haxpes'
# Total DOS
ax1 = fig.add_subplot(1, 5, 1)
tdos_data = galore.formats.read_vasprun_totaldos('test/MgO/vasprun.xml.gz')
ax1.plot(tdos_data[:, 0], tdos_data[:, 1], 'k-')
ax1.set_title("Total DOS")
ax1.set_xlim(xlim)
# PDOS
ax2 = fig.add_subplot(1, 5, 2, sharey=ax1)
pdos_data = galore.formats.read_vasprun_pdos('test/MgO/vasprun.xml.gz')
galore.plot.plot_pdos(pdos_data, ax=ax2, total=False)
ax2.set_title("Orbitals")
ax2.set_xlim(xlim)
ax2.legend().remove()
## Build a nice key of line colors
line_colors = {line.get_label(): line.get_color() for line in ax2.lines}
## Set solid lines
for line in ax2.lines:
line.set_linestyle('-')
# Weighted PDOS
ax3 = fig.add_subplot(1, 5, 3)
weighted_data = galore.apply_orbital_weights(pdos_data,
galore.get_cross_sections(
weighting))
galore.plot.plot_pdos(weighted_data, ax=ax3, total=False)
ax3.set_title("Weight by \n cross-section")
ax3.set_xlim(xlim)
ax3.legend().remove()
## Fix consistent colors
for line in ax3.lines:
line.set_color(line_colors[line.get_label()])
line.set_linestyle('-')
# Summed PDOS
ax4 = fig.add_subplot(1, 5, 4, sharey=ax3)
galore.plot.plot_pdos(weighted_data, ax=ax4, total=True, show_orbitals=False)
ax4.set_title("Sum")
ax4.set_xlim(xlim)
ax4.set_ylim(new_ylim)
ax4.legend().remove()
# Broadened output
ax5 = fig.add_subplot(1, 5, 5)
broadened_data = galore.process_pdos(input='test/MgO/vasprun.xml.gz',
gaussian=0.5,
weighting=weighting)
galore.plot.plot_pdos(broadened_data, ax=ax5, total=True, show_orbitals=False)
ax5.set_title("Broaden")
ax5.set_xlim(xlim)
ax5.set_ylim(final_ylim)
ax5.legend().remove()
fig.subplots_adjust(left=0.03, right=0.98, top=0.78)
fig.savefig('docs/source/figures/pe_schematic.pdf')
| gpl-3.0 |
bert9bert/statsmodels | statsmodels/tsa/filters/filtertools.py | 25 | 12438 | # -*- coding: utf-8 -*-
"""Linear Filters for time series analysis and testing
TODO:
* check common sequence in signature of filter functions (ar,ma,x) or (x,ar,ma)
Created on Sat Oct 23 17:18:03 2010
Author: Josef-pktd
"""
#not original copied from various experimental scripts
#version control history is there
from statsmodels.compat.python import range
import numpy as np
import scipy.fftpack as fft
from scipy import signal
from scipy.signal.signaltools import _centered as trim_centered
from ._utils import _maybe_get_pandas_wrapper
def _pad_nans(x, head=None, tail=None):
if np.ndim(x) == 1:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[np.nan] * head, x, [np.nan] * tail]
elif tail is None:
return np.r_[[np.nan] * head, x]
elif head is None:
return np.r_[x, [np.nan] * tail]
elif np.ndim(x) == 2:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[[np.nan] * x.shape[1]] * head, x,
[[np.nan] * x.shape[1]] * tail]
elif tail is None:
return np.r_[[[np.nan] * x.shape[1]] * head, x]
elif head is None:
return np.r_[x, [[np.nan] * x.shape[1]] * tail]
else:
raise ValueError("Nan-padding for ndim > 2 not implemented")
#original changes and examples in sandbox.tsa.try_var_convolve
# don't do these imports, here just for copied fftconvolve
#get rid of these imports
#from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
# ifftn, fftfreq
#from numpy import product,array
def fftconvolveinv(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
copied from scipy.signal.signaltools, but here used to try out inverse filter
doesn't work or I can't get it to work
2010-10-23:
looks ok to me for 1d,
from results below with padded data array (fftp)
but it doesn't work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
IN1 = fft.fftn(in1,fsize)
#IN1 *= fftn(in2,fsize) #JP: this looks like the only change I made
IN1 /= fft.fftn(in2,fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO doesn't seem to work for VARMA
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#code duplication with fftconvolveinv
def fftconvolve3(in1, in2=None, in3=None, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
for use with arma (old version: in1=num in2=den in3=data
* better for consistency with other functions in1=data in2=num in3=den
* note in2 and in3 need to have consistent dimension/shape
since I'm using max of in2, in3 shapes and not the sum
copied from scipy.signal.signaltools, but here used to try out inverse
filter doesn't work or I can't get it to work
2010-10-23
looks ok to me for 1d,
from results below with padded data array (fftp)
but it doesn't work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
if (in2 is None) and (in3 is None):
raise ValueError('at least one of in2 and in3 needs to be given')
s1 = np.array(in1.shape)
if not in2 is None:
s2 = np.array(in2.shape)
else:
s2 = 0
if not in3 is None:
s3 = np.array(in3.shape)
s2 = max(s2, s3) # try this looks reasonable for ARMA
#s2 = s3
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
#convolve shorter ones first, not sure if it matters
if not in2 is None:
IN1 = fft.fftn(in2, fsize)
if not in3 is None:
IN1 /= fft.fftn(in3, fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO doesn't seem to work for VARMA
IN1 *= fft.fftn(in1, fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#original changes and examples in sandbox.tsa.try_var_convolve
#examples and tests are there
def recursive_filter(x, ar_coeff, init=None):
'''
Autoregressive, or recursive, filtering.
Parameters
----------
x : array-like
Time-series data. Should be 1d or n x 1.
ar_coeff : array-like
AR coefficients in reverse time order. See Notes
init : array-like
Initial values of the time-series prior to the first value of y.
The default is zero.
Returns
-------
y : array
Filtered array, number of columns determined by x and ar_coeff. If a
pandas object is given, a pandas object is returned.
Notes
-----
Computes the recursive filter ::
y[n] = ar_coeff[0] * y[n-1] + ...
+ ar_coeff[n_coeff - 1] * y[n - n_coeff] + x[n]
where n_coeff = len(n_coeff).
'''
_pandas_wrapper = _maybe_get_pandas_wrapper(x)
x = np.asarray(x).squeeze()
ar_coeff = np.asarray(ar_coeff).squeeze()
if x.ndim > 1 or ar_coeff.ndim > 1:
raise ValueError('x and ar_coeff have to be 1d')
if init is not None: # integer init are treated differently in lfiltic
if len(init) != len(ar_coeff):
raise ValueError("ar_coeff must be the same length as init")
init = np.asarray(init, dtype=float)
if init is not None:
zi = signal.lfiltic([1], np.r_[1, -ar_coeff], init, x)
else:
zi = None
y = signal.lfilter([1.], np.r_[1, -ar_coeff], x, zi=zi)
if init is not None:
result = y[0]
else:
result = y
if _pandas_wrapper:
return _pandas_wrapper(result)
return result
def convolution_filter(x, filt, nsides=2):
'''
Linear filtering via convolution. Centered and backward displaced moving
weighted average.
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
filt : array_like
Linear filter coefficients in reverse time-order. Should have the
same number of dimensions as x though if 1d and ``x`` is 2d will be
coerced to 2d.
nsides : int, optional
If 2, a centered moving average is computed using the filter
coefficients. If 1, the filter coefficients are for past values only.
Both methods use scipy.signal.convolve.
Returns
-------
y : ndarray, 2d
Filtered array, number of columns determined by x and filt. If a
pandas object is given, a pandas object is returned. The index of
the return is the exact same as the time period in ``x``
Notes
-----
In nsides == 1, x is filtered ::
y[n] = filt[0]*x[n-1] + ... + filt[n_filt-1]*x[n-n_filt]
where n_filt is len(filt).
If nsides == 2, x is filtered around lag 0 ::
y[n] = filt[0]*x[n - n_filt/2] + ... + filt[n_filt / 2] * x[n]
+ ... + x[n + n_filt/2]
where n_filt is len(filt). If n_filt is even, then more of the filter
is forward in time than backward.
If filt is 1d or (nlags,1) one lag polynomial is applied to all
variables (columns of x). If filt is 2d, (nlags, nvars) each series is
independently filtered with its own lag polynomial, uses loop over nvar.
This is different than the usual 2d vs 2d convolution.
Filtering is done with scipy.signal.convolve, so it will be reasonably
fast for medium sized data. For large data fft convolution would be
faster.
'''
# for nsides shift the index instead of using 0 for 0 lag this
# allows correct handling of NaNs
if nsides == 1:
trim_head = len(filt) - 1
trim_tail = None
elif nsides == 2:
trim_head = int(np.ceil(len(filt)/2.) - 1) or None
trim_tail = int(np.ceil(len(filt)/2.) - len(filt) % 2) or None
else: # pragma : no cover
raise ValueError("nsides must be 1 or 2")
_pandas_wrapper = _maybe_get_pandas_wrapper(x)
x = np.asarray(x)
filt = np.asarray(filt)
if x.ndim > 1 and filt.ndim == 1:
filt = filt[:, None]
if x.ndim > 2:
raise ValueError('x array has to be 1d or 2d')
if filt.ndim == 1 or min(filt.shape) == 1:
result = signal.convolve(x, filt, mode='valid')
elif filt.ndim == 2:
nlags = filt.shape[0]
nvar = x.shape[1]
result = np.zeros((x.shape[0] - nlags + 1, nvar))
if nsides == 2:
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:, i] = signal.convolve(x[:, i], filt[:, i],
mode='valid')
elif nsides == 1:
for i in range(nvar):
result[:, i] = signal.convolve(x[:, i], np.r_[0, filt[:, i]],
mode='valid')
result = _pad_nans(result, trim_head, trim_tail)
if _pandas_wrapper:
return _pandas_wrapper(result)
return result
#copied from sandbox.tsa.garch
def miso_lfilter(ar, ma, x, useic=False): #[0.1,0.1]):
'''
use nd convolution to merge inputs,
then use lfilter to produce output
arguments for column variables
return currently 1d
Parameters
----------
ar : array_like, 1d, float
autoregressive lag polynomial including lag zero, ar(L)y_t
ma : array_like, same ndim as x, currently 2d
moving average lag polynomial ma(L)x_t
x : array_like, 2d
input data series, time in rows, variables in columns
Returns
-------
y : array, 1d
filtered output series
inp : array, 1d
combined input series
Notes
-----
currently for 2d inputs only, no choice of axis
Use of signal.lfilter requires that ar lag polynomial contains
floating point numbers
does not cut off invalid starting and final values
miso_lfilter find array y such that::
ar(L)y_t = ma(L)x_t
with shapes y (nobs,), x (nobs,nvars), ar (narlags,), ma (narlags,nvars)
'''
ma = np.asarray(ma)
ar = np.asarray(ar)
#inp = signal.convolve(x, ma, mode='valid')
#inp = signal.convolve(x, ma)[:, (x.shape[1]+1)//2]
#Note: convolve mixes up the variable left-right flip
#I only want the flip in time direction
#this might also be a mistake or problem in other code where I
#switched from correlate to convolve
# correct convolve version, for use with fftconvolve in other cases
#inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
inp = signal.correlate(x, ma[::-1,:])[:, (x.shape[1]+1)//2]
#for testing 2d equivalence between convolve and correlate
#np.testing.assert_almost_equal(inp2, inp)
nobs = x.shape[0]
# cut of extra values at end
#todo initialize also x for correlate
if useic:
return signal.lfilter([1], ar, inp,
#zi=signal.lfilter_ic(np.array([1.,0.]),ar, ic))[0][:nobs], inp[:nobs]
zi=signal.lfiltic(np.array([1.,0.]),ar, useic))[0][:nobs], inp[:nobs]
else:
return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs]
#return signal.lfilter([1], ar, inp), inp
| bsd-3-clause |
tmhm/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
kastnerkyle/python-rl | pyrl/agents/planners/fitted_qiteration.py | 2 | 8884 |
# Author: Will Dabney
from random import Random
import numpy
import pyrl.basis.fourier as fourier
import pyrl.basis.rbf as rbf
import pyrl.basis.tilecode as tilecode
import pyrl.basis.trivial as trivial
from planner import Planner
from sklearn import linear_model
from sklearn.svm import SVR
from sklearn import tree
class FittedQIteration(Planner):
"""FittedQIteration is an implementation of the Fitted Q-Iteration algorithm of Ernst, Geurts, Wehenkel (2005).
This class allows the use of a variety of regression algorithms, provided by scikits-learn, to be used for
representing the Q-value function. Additionally, different basis functions can be applied to the features before
being passed to the regressors, including trivial, fourier, tile coding, and radial basis functions.
"""
def __init__(self, model, **kwargs):
"""Inits the Fitted Q-Iteration planner with discount factor, instantiated model learner, and additional parameters.
Args:
model: The model learner object
gamma=1.0: The discount factor for the domain
**kwargs: Additional parameters for use in the class.
"""
Planner.__init__(self, model, **kwargs)
self.fa_name = self.params.setdefault('basis', 'trivial')
self.params.setdefault('iterations', 200)
self.params.setdefault('support_size', 200)
self.basis = None
# Set up regressor
learn_name = self.params.setdefault('regressor', 'linreg')
if learn_name == 'linreg':
self.learner = linear_model.LinearRegression()
elif learn_name == 'ridge':
self.learner = linear_model.Ridge(alpha = self.params.setdefault('l2', 0.5))
elif learn_name == 'tree':
self.learner = tree.DecisionTreeRegressor()
elif learn_name == 'svm':
self.learner = SVR()
else:
self.learner = None
def randomize_parameters(self, **args):
"""Generate parameters randomly, constrained by given named parameters.
Parameters that fundamentally change the algorithm are not randomized over. For
example, basis and softmax fundamentally change the domain and have very few values
to be considered. They are not randomized over.
Basis parameters, on the other hand, have many possible values and ARE randomized.
Args:
**args: Named parameters to fix, which will not be randomly generated
Returns:
List of resulting parameters of the class. Will always be in the same order.
Empty list if parameter free.
"""
self.randParameter('iterations', args, sample=numpy.random.randint(500))
self.randParameter('support_size', args, sample=numpy.random.randint(500))
# Randomize basis parameters
if self.fa_name == 'fourier':
self.randParameter('fourier_order', args, sample=numpy.random.randint(1,5)*2 + 1)
elif self.fa_name == 'rbf':
self.randParameter('rbf_number', args, sample=numpy.random.randint(100))
self.randParameter('rbf_beta', args)
elif self.fa_name == 'tile':
self.randParameter('tile_number', args, sample=numpy.random.randint(200))
self.randParameter('tile_weights', args, sample=2**numpy.random.randint(15))
return super(FittedQIteration,self).randomize_parameters(**args)
def planner_init(self, numDiscStates, contFeatureRanges, numActions, rewardRange):
self.has_plan = False
self.ranges, self.actions = self.model.getStateSpace()
# Set up basis
if self.fa_name == 'fourier':
self.basis = fourier.FourierBasis(len(self.ranges), self.ranges,
order=self.params.setdefault('fourier_order', 3))
elif self.fa_name == 'rbf':
self.basis = rbf.RBFBasis(len(self.ranges), self.ranges,
num_functions=self.params.setdefault('rbf_number', len(self.ranges)),
beta=self.params.setdefault('rbf_beta', 1.0))
elif self.fa_name == 'tile':
self.basis = tilecode.TileCodingBasis(len(self.ranges), self.ranges,
num_tiles=self.params.setdefault('tile_number', 100),
num_weights=self.params.setdefault('tile_weights', 2048))
else:
self.basis = trivial.TrivialBasis(len(self.ranges), self.ranges)
def getStateAction(self, state, action):
"""Returns the basified state feature array for the given state action pair.
Args:
state: The array of state features
action: The action taken from the given state
Returns:
The array containing the result of applying the basis functions to the state-action.
"""
state = self.basis.computeFeatures(state)
stateaction = numpy.zeros((self.actions, len(state)))
stateaction[action,:] = state
return stateaction.flatten()
def predict(self, state, action):
"""Predict the next state, reward, and termination probability for the current state-action.
Args:
state: The array of state features
action: The action taken from the given state
Returns:
Tuple (next_state, reward, termination), where next_state gives the predicted next state,
reward gives the predicted reward for transitioning to that state, and termination
gives the expected probabillity of terminating the episode upon transitioning.
All three are None if no model has been learned for the given action.
"""
if self.model.has_fit[action]:
return self.model.predict(state, action)
else:
return None, None, None
def getValue(self, state):
"""Get the Q-value function value for the greedy action choice at the given state (ie V(state)).
Args:
state: The array of state features
Returns:
The double value for the value function at the given state
"""
if self.has_plan:
return self.learner.predict([self.getStateAction(state, a) for a in range(self.actions)]).max()
else:
return None
def getAction(self, state):
"""Get the action under the current plan policy for the given state.
Args:
state: The array of state features
Returns:
The current greedy action under the planned policy for the given state. If no plan has been formed,
return a random action.
"""
if self.has_plan:
return self.learner.predict([self.getStateAction(state, a) for a in range(self.actions)]).argmax()
else:
return self.randGenerator.randint(0, self.actions-1)
def updatePlan(self):
"""Run Fitted Q-Iteration on samples from the model, and update the plan accordingly."""
for sample_iter in range(self.params.setdefault('resample', 1)):
self.has_plan = False
prev_coef = None
samples = self.model.sampleStateActions(self.params['support_size'])
outcomes = self.model.predictSet(samples)
Xp = []
X = []
R = []
gammas = []
for a in range(self.actions):
Xp += map(lambda k: [self.getStateAction(k, b) for b in range(self.actions)], outcomes[a][0])
X += map(lambda k: self.getStateAction(k, a), samples[a])
R += list(outcomes[a][1])
gammas += list((1.0 - outcomes[a][2]) * self.gamma)
Xp = numpy.array(Xp)
Xp = Xp.reshape(Xp.shape[0]*Xp.shape[1], Xp.shape[2])
X = numpy.array(X)
R = numpy.array(R)
gammas = numpy.array(gammas)
targets = []
Qp = None
error = 1.0
iter2 = 0
threshold = 1.0e-4
while error > threshold and iter2 < self.params['iterations']:
if self.has_plan:
Qprimes = self.learner.predict(Xp).reshape((X.shape[0], self.actions))
targets = R + gammas*Qprimes.max(1)
Qp = Qprimes
else:
targets = R
self.has_plan = True
self.learner.fit(X, targets)
try:
if prev_coef is not None:
error = numpy.linalg.norm(prev_coef - self.learner.coef_)
prev_coef = self.learner.coef_.copy()
except:
pass
iter2 += 1
#print "#?", sample_iter, iter2, error, self.model.exp_index
if error <= threshold:
return
| gpl-3.0 |
probml/pyprobml | scripts/gauss_height_weight_plot.py | 1 | 1696 |
# Gaussian in 2d fit to height/weight data
# Author: Duane Rich
# Based on matlab code by Kevin Murphy
# https://github.com/probml/pmtk3/blob/master/demos/gaussHeightWeight.m
import numpy as np
import matplotlib.pyplot as plt
import os
#figdir = os.path.join(os.environ["PYPROBML"], "figures")
#datadir = os.path.join(os.environ["PYPROBML"], "data", "heightWeight")
figdir = '../figures'
datadir = '../data/heightWeight'
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
from matplotlib.patches import Ellipse
import scipy.io
dataAll = scipy.io.loadmat(os.path.join(datadir, "heightWeight.mat"))
data = dataAll['heightWeightData']
sex = data[:, 0]
x = data[:, 1]
y = data[:, 2]
male_arg = (sex == 1)
female_arg = (sex == 2)
x_male = x[male_arg]
y_male = y[male_arg]
x_female = x[female_arg]
y_female = y[female_arg]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x_male, y_male, 'bx')
ax.plot(x_female, y_female, 'ro')
save_fig('heightWeightScatter.pdf')
plt.show()
def draw_ell(cov, xy, color):
u, v = np.linalg.eigh(cov)
angle = np.arctan2(v[0][1], v[0][0])
angle = (180 * angle / np.pi)
# here we time u2 with 5, assume 95% are in this ellipse~
u2 = 5 * (u ** 0.5)
e = Ellipse(xy, u2[0], u2[1], angle)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_facecolor('none')
e.set_edgecolor(color)
cov_matrix1 = np.cov(np.vstack([x_female.ravel(), y_female.ravel()]))
xy1 = (np.mean(x_female), np.mean(y_female))
cov_matrix2 = np.cov(np.vstack([x_male.ravel(), y_male.ravel()]))
xy2 = (np.mean(x_male), np.mean(y_male))
draw_ell(cov_matrix1, xy1, 'r')
draw_ell(cov_matrix2, xy2, 'b')
save_fig('heightWeightScatterCov.pdf')
plt.show()
| mit |
rubikloud/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
starikan/trading-with-python | cookbook/reconstructVXX/downloadVixFutures.py | 77 | 3012 | #-------------------------------------------------------------------------------
# Name: download CBOE futures
# Purpose: get VIX futures data from CBOE, process data to a single file
#
#
# Created: 15-10-2011
# Copyright: (c) Jev Kuznetsov 2011
# Licence: BSD
#-------------------------------------------------------------------------------
#!/usr/bin/env python
from urllib import urlretrieve
import os
from pandas import *
import datetime
import numpy as np
m_codes = ['F','G','H','J','K','M','N','Q','U','V','X','Z'] #month codes of the futures
codes = dict(zip(m_codes,range(1,len(m_codes)+1)))
#dataDir = os.path.dirname(__file__)+'/data'
dataDir = os.path.expanduser('~')+'/twpData/vixFutures'
print 'Data directory: ', dataDir
def saveVixFutureData(year,month, path, forceDownload=False):
''' Get future from CBOE and save to file '''
fName = "CFE_{0}{1}_VX.csv".format(m_codes[month],str(year)[-2:])
if os.path.exists(path+'\\'+fName) or forceDownload:
print 'File already downloaded, skipping'
return
urlStr = "http://cfe.cboe.com/Publish/ScheduledTask/MktData/datahouse/{0}".format(fName)
print 'Getting: %s' % urlStr
try:
urlretrieve(urlStr,path+'\\'+fName)
except Exception as e:
print e
def buildDataTable(dataDir):
""" create single data sheet """
files = os.listdir(dataDir)
data = {}
for fName in files:
print 'Processing: ', fName
try:
df = DataFrame.from_csv(dataDir+'/'+fName)
code = fName.split('.')[0].split('_')[1]
month = '%02d' % codes[code[0]]
year = '20'+code[1:]
newCode = year+'_'+month
data[newCode] = df
except Exception as e:
print 'Could not process:', e
full = DataFrame()
for k,df in data.iteritems():
s = df['Settle']
s.name = k
s[s<5] = np.nan
if len(s.dropna())>0:
full = full.join(s,how='outer')
else:
print s.name, ': Empty dataset.'
full[full<5]=np.nan
full = full[sorted(full.columns)]
# use only data after this date
startDate = datetime.datetime(2008,1,1)
idx = full.index >= startDate
full = full.ix[idx,:]
#full.plot(ax=gca())
fName = os.path.expanduser('~')+'/twpData/vix_futures.csv'
print 'Saving to ', fName
full.to_csv(fName)
if __name__ == '__main__':
if not os.path.exists(dataDir):
print 'creating data directory %s' % dataDir
os.makedirs(dataDir)
for year in range(2008,2013):
for month in range(12):
print 'Getting data for {0}/{1}'.format(year,month+1)
saveVixFutureData(year,month,dataDir)
print 'Raw wata was saved to {0}'.format(dataDir)
buildDataTable(dataDir) | bsd-3-clause |
thunderhoser/GewitterGefahr | gewittergefahr/gg_utils/storm_tracking_utils.py | 1 | 19223 | """Processing methods for storm-tracking data (both polygons and tracks)."""
import numpy
import pandas
from gewittergefahr.gg_utils import polygons
from gewittergefahr.gg_utils import geodetic_utils
from gewittergefahr.gg_utils import projections
from gewittergefahr.gg_utils import error_checking
SEGMOTION_NAME = 'segmotion'
PROBSEVERE_NAME = 'probSevere'
DATA_SOURCE_NAMES = [SEGMOTION_NAME, PROBSEVERE_NAME]
FULL_ID_COLUMN = 'full_id_string'
PRIMARY_ID_COLUMN = 'primary_id_string'
SECONDARY_ID_COLUMN = 'secondary_id_string'
FIRST_PREV_SECONDARY_ID_COLUMN = 'first_prev_secondary_id_string'
SECOND_PREV_SECONDARY_ID_COLUMN = 'second_prev_secondary_id_string'
FIRST_NEXT_SECONDARY_ID_COLUMN = 'first_next_secondary_id_string'
SECOND_NEXT_SECONDARY_ID_COLUMN = 'second_next_secondary_id_string'
VALID_TIME_COLUMN = 'valid_time_unix_sec'
SPC_DATE_COLUMN = 'spc_date_string'
TRACKING_START_TIME_COLUMN = 'tracking_start_time_unix_sec'
TRACKING_END_TIME_COLUMN = 'tracking_end_time_unix_sec'
CELL_START_TIME_COLUMN = 'cell_start_time_unix_sec'
CELL_END_TIME_COLUMN = 'cell_end_time_unix_sec'
AGE_COLUMN = 'age_seconds'
CENTROID_LATITUDE_COLUMN = 'centroid_latitude_deg'
CENTROID_LONGITUDE_COLUMN = 'centroid_longitude_deg'
EAST_VELOCITY_COLUMN = 'east_velocity_m_s01'
NORTH_VELOCITY_COLUMN = 'north_velocity_m_s01'
LATITUDES_IN_STORM_COLUMN = 'grid_point_latitudes_deg'
LONGITUDES_IN_STORM_COLUMN = 'grid_point_longitudes_deg'
ROWS_IN_STORM_COLUMN = 'grid_point_rows'
COLUMNS_IN_STORM_COLUMN = 'grid_point_columns'
LATLNG_POLYGON_COLUMN = 'polygon_object_latlng_deg'
ROWCOL_POLYGON_COLUMN = 'polygon_object_rowcol'
BUFFER_COLUMN_PREFIX = 'polygon_object_latlng_deg_buffer'
LINEAR_INDEX_COLUMN = 'linear_index'
CENTROID_X_COLUMN = 'centroid_x_metres'
CENTROID_Y_COLUMN = 'centroid_y_metres'
TRACK_TIMES_COLUMN = 'valid_times_unix_sec'
OBJECT_INDICES_COLUMN = 'object_indices'
TRACK_LATITUDES_COLUMN = 'centroid_latitudes_deg'
TRACK_LONGITUDES_COLUMN = 'centroid_longitudes_deg'
TRACK_X_COORDS_COLUMN = 'centroid_x_coords_metres'
TRACK_Y_COORDS_COLUMN = 'centroid_y_coords_metres'
def check_data_source(source_name):
"""Error-checks data source.
:param source_name: Data source.
:raises: ValueError: if `data_source not in DATA_SOURCE_NAMES`.
"""
error_checking.assert_is_string(source_name)
if source_name not in DATA_SOURCE_NAMES:
error_string = (
'\n{0:s}\nValid data sources (listed above) do not include "{1:s}".'
).format(str(DATA_SOURCE_NAMES), source_name)
raise ValueError(error_string)
def buffer_to_column_name(min_distance_metres, max_distance_metres):
"""Generates column name for distance buffer around storm object.
:param min_distance_metres: Minimum distance around storm object. If the
storm object is included in the buffer, there is no minimum distance and
this value should be NaN.
:param max_distance_metres: Max distance around storm object.
:return: column_name: Column name.
"""
max_distance_metres = int(numpy.round(max_distance_metres))
error_checking.assert_is_geq(max_distance_metres, 0)
if numpy.isnan(min_distance_metres):
return '{0:s}_{1:d}m'.format(BUFFER_COLUMN_PREFIX, max_distance_metres)
min_distance_metres = int(numpy.round(min_distance_metres))
error_checking.assert_is_geq(min_distance_metres, 0)
error_checking.assert_is_greater(max_distance_metres, min_distance_metres)
return '{0:s}_{1:d}m_{2:d}m'.format(
BUFFER_COLUMN_PREFIX, min_distance_metres, max_distance_metres)
def column_name_to_buffer(column_name):
"""Parses distance buffer (min and max distances) from column name.
If distance buffer cannot be parsed from column name, this method will just
return None for all output variables.
:param column_name: Column name.
:return: min_distance_metres: See doc for `buffer_to_column_name`.
:return: max_distance_metres: Same.
"""
if not column_name.startswith(BUFFER_COLUMN_PREFIX):
return None, None
buffer_strings = column_name.replace(
BUFFER_COLUMN_PREFIX + '_', ''
).split('_')
if len(buffer_strings) == 1:
min_distance_metres = numpy.nan
elif len(buffer_strings) == 2:
min_distance_metres = -1
else:
return None, None
max_distance_string = buffer_strings[-1]
if not max_distance_string.endswith('m'):
return None, None
max_distance_string = max_distance_string.replace('m', '')
try:
max_distance_metres = float(int(max_distance_string))
except ValueError:
return None, None
if numpy.isnan(min_distance_metres):
return min_distance_metres, max_distance_metres
min_distance_part = buffer_strings[-2]
if not min_distance_part.endswith('m'):
return None, None
min_distance_part = min_distance_part.replace('m', '')
try:
min_distance_metres = float(int(min_distance_part))
except ValueError:
return None, None
return min_distance_metres, max_distance_metres
def get_buffer_columns(storm_object_table):
"""Finds column names corresponding to distance buffers.
:param storm_object_table: pandas DataFrame with storm objects. Each row is
one storm object. Columns are listed in `write_file`.
:return: buffer_column_names: 1-D list of column names corresponding to
distance buffers. This may be None (empty list).
"""
all_column_names = list(storm_object_table)
buffer_column_names = None
for this_column_name in all_column_names:
_, this_max_distance_metres = column_name_to_buffer(this_column_name)
if this_max_distance_metres is None:
continue
if buffer_column_names is None:
buffer_column_names = [this_column_name]
else:
buffer_column_names.append(this_column_name)
return buffer_column_names
def find_storm_objects(
all_id_strings, all_times_unix_sec, id_strings_to_keep,
times_to_keep_unix_sec, allow_missing=False):
"""Finds storm objects.
N = total number of storm objects
K = number of storm objects to keep
:param all_id_strings: length-N list with all storm IDs.
:param all_times_unix_sec: length-N list with all valid times.
:param id_strings_to_keep: length-K list of IDs to keep.
:param times_to_keep_unix_sec: length-K list of valid times to keep.
:param allow_missing: Boolean flag. If True, will allow missing storm
objects (i.e., some objects specified by `id_strings_to_keep` and
`times_to_keep_unix_sec` may be missing from `all_id_strings` and
`all_times_unix_sec`). If False, this method will error out if any
missing storm objects.
:return: relevant_indices: length-K numpy array of indices, such that:
[find_storm_objects[k] for k in relevant_indices] = id_strings_to_keep
all_times_unix_sec[relevant_indices] = times_to_keep_unix_sec
:raises: ValueError: if `all_id_strings` and `all_times_unix_sec` contain
any duplicate pairs.
:raises: ValueError: if a desired storm object is not found and
`allow_missing = False`.
"""
error_checking.assert_is_boolean(allow_missing)
error_checking.assert_is_numpy_array(
numpy.array(all_id_strings), num_dimensions=1)
num_objects_total = len(all_id_strings)
these_expected_dim = numpy.array([num_objects_total], dtype=int)
error_checking.assert_is_numpy_array(
all_times_unix_sec, exact_dimensions=these_expected_dim)
error_checking.assert_is_numpy_array(
numpy.array(id_strings_to_keep), num_dimensions=1)
num_objects_to_keep = len(id_strings_to_keep)
these_expected_dim = numpy.array([num_objects_to_keep], dtype=int)
error_checking.assert_is_numpy_array(
times_to_keep_unix_sec, exact_dimensions=these_expected_dim)
all_object_id_strings = [
'{0:s}_{1:d}'.format(all_id_strings[i], all_times_unix_sec[i])
for i in range(num_objects_total)
]
object_id_strings_to_keep = [
'{0:s}_{1:d}'.format(id_strings_to_keep[i], times_to_keep_unix_sec[i])
for i in range(num_objects_to_keep)
]
this_num_unique = len(set(all_object_id_strings))
if this_num_unique != len(all_object_id_strings):
error_string = (
'Only {0:d} of {1:d} original storm objects are unique.'
).format(this_num_unique, len(all_object_id_strings))
raise ValueError(error_string)
all_object_id_strings = numpy.array(all_object_id_strings, dtype='object')
object_id_strings_to_keep = numpy.array(
object_id_strings_to_keep, dtype='object')
sort_indices = numpy.argsort(all_object_id_strings)
relevant_indices = numpy.searchsorted(
all_object_id_strings[sort_indices], object_id_strings_to_keep,
side='left'
).astype(int)
relevant_indices[relevant_indices < 0] = 0
relevant_indices[
relevant_indices >= len(all_object_id_strings)
] = len(all_object_id_strings) - 1
relevant_indices = sort_indices[relevant_indices]
if allow_missing:
bad_indices = numpy.where(
all_object_id_strings[relevant_indices] != object_id_strings_to_keep
)[0]
relevant_indices[bad_indices] = -1
return relevant_indices
if not numpy.array_equal(all_object_id_strings[relevant_indices],
object_id_strings_to_keep):
missing_object_flags = (
all_object_id_strings[relevant_indices] != object_id_strings_to_keep
)
error_string = (
'{0:d} of {1:d} desired storm objects are missing. Their ID-time '
'pairs are listed below.\n{2:s}'
).format(
numpy.sum(missing_object_flags), num_objects_to_keep,
str(object_id_strings_to_keep[missing_object_flags])
)
raise ValueError(error_string)
return relevant_indices
def create_distance_buffers(storm_object_table, min_distances_metres,
max_distances_metres):
"""Creates one or more distance buffers around each storm object.
K = number of buffers
:param storm_object_table: pandas DataFrame with the following columns.
Each row is one storm object.
storm_object_table.centroid_latitude_deg: Latitude (deg N) of storm-object
centroid.
storm_object_table.centroid_longitude_deg: Longitude (deg E) of storm-object
centroid.
storm_object_table.polygon_object_latlng_deg: Instance of
`shapely.geometry.Polygon`, with x-coords in longitude (deg E) and
y-coords in latitude (deg N).
:param min_distances_metres: length-K numpy array of minimum distances. If
the storm object is inside the [k]th buffer -- i.e., the [k]th buffer
has no minimum distance -- then min_distances_metres[k] should be NaN.
:param max_distances_metres: length-K numpy array of max distances.
:return: storm_object_table: Same as input but with K additional columns
(one per distance buffer). Column names are generated by
`buffer_to_column_name`, and each value in these columns is a
`shapely.geometry.Polygon` object, with x-coords in longitude (deg E) and
y-coords in latitude (deg N).
"""
num_buffers = len(min_distances_metres)
these_expected_dim = numpy.array([num_buffers], dtype=int)
error_checking.assert_is_numpy_array(
max_distances_metres, exact_dimensions=these_expected_dim)
global_centroid_lat_deg, global_centroid_lng_deg = (
geodetic_utils.get_latlng_centroid(
latitudes_deg=storm_object_table[CENTROID_LATITUDE_COLUMN].values,
longitudes_deg=storm_object_table[CENTROID_LONGITUDE_COLUMN].values)
)
projection_object = projections.init_azimuthal_equidistant_projection(
central_latitude_deg=global_centroid_lat_deg,
central_longitude_deg=global_centroid_lng_deg)
num_storm_objects = len(storm_object_table.index)
object_array = numpy.full(num_storm_objects, numpy.nan, dtype=object)
buffer_column_names = [''] * num_buffers
for j in range(num_buffers):
buffer_column_names[j] = buffer_to_column_name(
min_distance_metres=min_distances_metres[j],
max_distance_metres=max_distances_metres[j])
storm_object_table = storm_object_table.assign(
**{buffer_column_names[j]: object_array}
)
for i in range(num_storm_objects):
this_orig_vertex_dict_latlng_deg = (
polygons.polygon_object_to_vertex_arrays(
storm_object_table[LATLNG_POLYGON_COLUMN].values[i]
)
)
these_orig_x_metres, these_orig_y_metres = (
projections.project_latlng_to_xy(
latitudes_deg=this_orig_vertex_dict_latlng_deg[
polygons.EXTERIOR_Y_COLUMN],
longitudes_deg=this_orig_vertex_dict_latlng_deg[
polygons.EXTERIOR_X_COLUMN],
projection_object=projection_object)
)
for j in range(num_buffers):
this_buffer_poly_object_xy_metres = polygons.buffer_simple_polygon(
vertex_x_metres=these_orig_x_metres,
vertex_y_metres=these_orig_y_metres,
min_buffer_dist_metres=min_distances_metres[j],
max_buffer_dist_metres=max_distances_metres[j])
this_buffer_vertex_dict = polygons.polygon_object_to_vertex_arrays(
this_buffer_poly_object_xy_metres)
(this_buffer_vertex_dict[polygons.EXTERIOR_Y_COLUMN],
this_buffer_vertex_dict[polygons.EXTERIOR_X_COLUMN]
) = projections.project_xy_to_latlng(
x_coords_metres=this_buffer_vertex_dict[
polygons.EXTERIOR_X_COLUMN],
y_coords_metres=this_buffer_vertex_dict[
polygons.EXTERIOR_Y_COLUMN],
projection_object=projection_object)
this_num_holes = len(
this_buffer_vertex_dict[polygons.HOLE_X_COLUMN]
)
for k in range(this_num_holes):
(this_buffer_vertex_dict[polygons.HOLE_Y_COLUMN][k],
this_buffer_vertex_dict[polygons.HOLE_X_COLUMN][k]
) = projections.project_xy_to_latlng(
x_coords_metres=this_buffer_vertex_dict[
polygons.HOLE_X_COLUMN][k],
y_coords_metres=this_buffer_vertex_dict[
polygons.HOLE_Y_COLUMN][k],
projection_object=projection_object)
this_buffer_poly_object_latlng_deg = (
polygons.vertex_arrays_to_polygon_object(
exterior_x_coords=this_buffer_vertex_dict[
polygons.EXTERIOR_X_COLUMN],
exterior_y_coords=this_buffer_vertex_dict[
polygons.EXTERIOR_Y_COLUMN],
hole_x_coords_list=this_buffer_vertex_dict[
polygons.HOLE_X_COLUMN],
hole_y_coords_list=this_buffer_vertex_dict[
polygons.HOLE_Y_COLUMN]
)
)
storm_object_table[buffer_column_names[j]].values[i] = (
this_buffer_poly_object_latlng_deg
)
return storm_object_table
def storm_objects_to_tracks(storm_object_table):
"""Converts table of storm objects to table of storm tracks.
T = number of time steps (objects) in a given track
:param storm_object_table: pandas DataFrame with at least the following
columns. Each row is one storm object.
storm_object_table.primary_id_string: ID for corresponding storm cell.
storm_object_table.valid_time_unix_sec: Valid time of storm object.
storm_object_table.centroid_latitude_deg: Latitude (deg N) of storm-object
centroid.
storm_object_table.centroid_longitude_deg: Longitude (deg E) of storm-object
centroid.
storm_object_table.centroid_x_metres: x-coordinate of storm-object centroid.
storm_object_table.centroid_y_metres: y-coordinate of storm-object centroid.
:return: storm_track_table: pandas DataFrame with the following columns.
Each row is one storm track (cell).
storm_track_table.primary_id_string: ID for storm cell.
storm_track_table.valid_times_unix_sec: length-T numpy array of valid times.
storm_track_table.object_indices: length-T numpy array with indices of storm
objects in track. These are indices into the rows of
`storm_object_table`.
storm_track_table.centroid_latitudes_deg: length-T numpy array of centroid
latitudes (deg N).
storm_track_table.centroid_longitudes_deg: length-T numpy array of centroid
longitudes (deg E).
storm_track_table.centroid_x_coords_metres: length-T numpy array of centroid
x-coords.
storm_track_table.centroid_y_coords_metres: length-T numpy array of centroid
y-coords.
"""
object_id_strings = numpy.array(
storm_object_table[PRIMARY_ID_COLUMN].values)
track_id_strings, object_to_track_indices = numpy.unique(
object_id_strings, return_inverse=True)
storm_track_dict = {PRIMARY_ID_COLUMN: track_id_strings}
storm_track_table = pandas.DataFrame.from_dict(storm_track_dict)
nested_array = storm_track_table[[
PRIMARY_ID_COLUMN, PRIMARY_ID_COLUMN
]].values.tolist()
storm_track_table = storm_track_table.assign(**{
TRACK_TIMES_COLUMN: nested_array,
OBJECT_INDICES_COLUMN: nested_array,
TRACK_LATITUDES_COLUMN: nested_array,
TRACK_LONGITUDES_COLUMN: nested_array,
TRACK_X_COORDS_COLUMN: nested_array,
TRACK_Y_COORDS_COLUMN: nested_array
})
num_storm_tracks = len(storm_track_table.index)
for i in range(num_storm_tracks):
these_object_indices = numpy.where(object_to_track_indices == i)[0]
sort_indices = numpy.argsort(
storm_object_table[VALID_TIME_COLUMN].values[these_object_indices]
)
these_object_indices = these_object_indices[sort_indices]
storm_track_table[TRACK_TIMES_COLUMN].values[i] = (
storm_object_table[VALID_TIME_COLUMN].values[these_object_indices]
)
storm_track_table[OBJECT_INDICES_COLUMN].values[i] = (
these_object_indices
)
storm_track_table[TRACK_LATITUDES_COLUMN].values[i] = (
storm_object_table[CENTROID_LATITUDE_COLUMN].values[
these_object_indices]
)
storm_track_table[TRACK_LONGITUDES_COLUMN].values[i] = (
storm_object_table[CENTROID_LONGITUDE_COLUMN].values[
these_object_indices]
)
storm_track_table[TRACK_X_COORDS_COLUMN].values[i] = (
storm_object_table[CENTROID_X_COLUMN].values[these_object_indices]
)
storm_track_table[TRACK_Y_COORDS_COLUMN].values[i] = (
storm_object_table[CENTROID_Y_COLUMN].values[these_object_indices]
)
return storm_track_table
| mit |
vignesh88/machine_learning | testing/plot.py | 1 | 1365 | import pandas
import pandas as pd
# Import matplotlib and Basemap
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
# Set iPython to display visualization inline
#%matplotlib inline
raw_data = {'latitude': [27.173968, 27.164328, 20.930555, 31.784217, 29.935895],
'longitude': [78.037519, 78.015289, 49.948611, 35.134277, 29.935895]}
df = pd.DataFrame(raw_data, columns = ['latitude', 'longitude'])
df
# Create a figure of size (i.e. pretty big)
fig = plt.figure(figsize=(20,10))
# Create a map, using the Gall–Peters projection
map = Basemap(projection='gall',
# with low resolution,
resolution = 'l',
# And threshold 100000
area_thresh = 100000.0,
# Centered at 0,0 (i.e null island)
lat_0=0, lon_0=0)
# Draw the coastlines on the map
map.drawcoastlines()
# Draw country borders on the map
map.drawcountries()
# Fill the land with grey
map.fillcontinents(color = '#888888')
# Draw the map boundaries
map.drawmapboundary(fill_color='#f4f4f4')
# Define our longitude and latitude points
# We have to use .values because of a wierd bug when passing pandas data
# to basemap.
x,y = map(df['longitude'].values, df['latitude'].values)
# Plot them using round markers of size 6
map.plot(x, y, 'ro', markersize=6)
# Show the map
plt.show()
| gpl-3.0 |
jhelie/dx_plot | dx_plot.py | 1 | 22904 | #generic python modules
import argparse
import operator
from operator import itemgetter
import sys, os, shutil
import os.path
##########################################################################################
# RETRIEVE USER INPUTS
##########################################################################################
#=========================================================================================
# create parser
#=========================================================================================
version_nb = "0.0.1"
parser = argparse.ArgumentParser(prog = 'dx_plot', usage='', add_help = False, formatter_class = argparse.RawDescriptionHelpFormatter, description =\
'''
**********************************************
v''' + version_nb + '''
author: Jean Helie ([email protected])
git: https://github.com/jhelie/dx_plot
**********************************************
[ DESCRIPTION ]
This loads a data file containing electrostatic potential values stored in the OpenDX
format and plots it.
By default the content of the dx file is assumed to contain data in Volts (V) (or a
potential expressed in PMEPot units, based on kT.e-1, which can be convered to Volts
via the --pmepot flag).
[ REQUIREMENTS ]
The following python modules are needed :
- MDAnalysis
- matplotlib
- numpy
- scipy
[ USAGE ]
Option Default Description
-----------------------------------------------------
-f : dx file
-o : name of output files
--ax [z] : axis along which to produce the 1D graph (x,y or z)
--sl [xz] : slice plane for 2D graphs (xz,yz or xy)
--vmax : upper limit of scale
--vmin : lower limit of scale
--xticks [10] : nb of ticks along the plot horizontal axis
--yticks [7] : nb of ticks along the plot vertical axis
--cticks [10] : nb of ticks on the colour bar
--pmepot : use this flag to convert units from PMEPot to V
--reverse : reverse the z axis
--cmap ['jet_r'] : color map to use
--inter ['none'] : interpolation to use for 2D plot
Volume to process
-----------------------------------------------------
--xmin [0] : position of lower delimiter on the x axis (as a %)
--ymin [0] : position of lower delimiter on the y axis (as a %)
--zmin [0] : position of lower delimiter on the z axis (as a %)
--xmax [100] : position of upper delimiter on the x axis (as a %)
--ymax [100] : position of upper delimiter on the y axis (as a %)
--zmax [100] : position of upper delimiter on the z axis (as a %)
Potential reference
-----------------------------------------------------
-r [z] : the potential will be set to 0 at the lower extremity of this axis (x,y or z)
--pad [5] : nb of slices used to calculate potential offset (set to 0 to not offset)
Other options
-----------------------------------------------------
--version : show version number and exit
-h, --help : show this menu and exit
''')
#options
parser.add_argument('-f', nargs=1, dest='dxfilename', help=argparse.SUPPRESS, required=True)
parser.add_argument('-o', nargs=1, dest='outfilename', default=['auto'], help=argparse.SUPPRESS)
parser.add_argument('--ax', dest='axis1D', choices=['x','y','z'], default='z', help=argparse.SUPPRESS)
parser.add_argument('--sl', dest='axis2D', choices=['xz','yz','xy'], default='xz', help=argparse.SUPPRESS)
parser.add_argument('--vmax', nargs=1, dest='vmax', default=['auto'], help=argparse.SUPPRESS)
parser.add_argument('--vmin', nargs=1, dest='vmin', default=['auto'], help=argparse.SUPPRESS)
parser.add_argument('--xticks', nargs=1, dest='xticks', default=[10], type=int, help=argparse.SUPPRESS)
parser.add_argument('--yticks', nargs=1, dest='yticks', default=[7], type=int, help=argparse.SUPPRESS)
parser.add_argument('--cticks', nargs=1, dest='cticks', default=[10], type=int, help=argparse.SUPPRESS)
parser.add_argument('--pmepot', dest='pmepot', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--reverse', dest='reverse', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--cmap', nargs=1, dest='cmap', default=['jet_r'], help=argparse.SUPPRESS)
parser.add_argument('--inter', nargs=1, dest='inter', default=['none'], help=argparse.SUPPRESS)
#volume to process
parser.add_argument('--xmin', nargs=1, dest='xmin', default=[0], type=float, help=argparse.SUPPRESS)
parser.add_argument('--ymin', nargs=1, dest='ymin', default=[0], type=float, help=argparse.SUPPRESS)
parser.add_argument('--zmin', nargs=1, dest='zmin', default=[0], type=float, help=argparse.SUPPRESS)
parser.add_argument('--xmax', nargs=1, dest='xmax', default=[100], type=float, help=argparse.SUPPRESS)
parser.add_argument('--ymax', nargs=1, dest='ymax', default=[100], type=float, help=argparse.SUPPRESS)
parser.add_argument('--zmax', nargs=1, dest='zmax', default=[100], type=float, help=argparse.SUPPRESS)
#potential reference
parser.add_argument('-r', dest='axisref', choices=['x','y','z'], default='z', help=argparse.SUPPRESS)
parser.add_argument('--pad', nargs=1, dest='pad', default=[5], type=int, help=argparse.SUPPRESS)
#other options
parser.add_argument('--version', action='version', version='%(prog)s v' + version_nb, help=argparse.SUPPRESS)
parser.add_argument('-h','--help', action='help', help=argparse.SUPPRESS)
#=========================================================================================
# store inputs
#=========================================================================================
args = parser.parse_args()
args.dxfilename = args.dxfilename[0]
args.outfilename = args.outfilename[0]
args.vmax = args.vmax[0]
args.vmin = args.vmin[0]
args.xmin = args.xmin[0]
args.ymin = args.ymin[0]
args.zmin = args.zmin[0]
args.xmax = args.xmax[0]
args.ymax = args.ymax[0]
args.zmax = args.zmax[0]
args.pad = args.pad[0]
args.xticks = args.xticks[0]
args.yticks = args.yticks[0]
args.cticks = args.cticks[0]
args.cmap = args.cmap[0]
args.inter = args.inter[0]
if args.outfilename == "auto":
args.outfilename = args.dxfilename[:-3]
#=========================================================================================
# import modules (doing it now otherwise might crash before we can display the help menu!)
#=========================================================================================
try:
import numpy as np
except:
print "Error: you need to install the numpy module."
sys.exit(1)
try:
import scipy
import scipy.stats
except:
print "Error: you need to install the scipy module."
sys.exit(1)
try:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.colors as mcolors
mcolorconv = mcolors.ColorConverter()
import matplotlib.cm as cm #colours library
import matplotlib.ticker
from matplotlib.ticker import MaxNLocator
from matplotlib.font_manager import FontProperties
fontP=FontProperties()
except:
print "Error: you need to install the matplotlib module."
sys.exit(1)
try:
import pylab as plt
except:
print "Error: you need to install the pylab module."
sys.exit(1)
try:
import MDAnalysis
from MDAnalysis import *
import MDAnalysis.analysis
import MDAnalysis.analysis.density
except:
print "Error: you need to install the MDAnalysis module first. See http://mdanalysis.googlecode.com"
sys.exit(1)
#=======================================================================
# sanity check
#=======================================================================
if not os.path.isfile(args.dxfilename):
print "Error: file " + str(args.dxfilename) + " not found."
sys.exit(1)
if args.vmin > args.vmax:
print "Error: --vmin must be smaller than --vmax."
sys.exit(1)
if args.pad < 0:
print "Error: --pad cannot be negative. Set it to 0 to avoid offsetting the potential."
sys.exit(1)
if args.xmin < 0:
print "Error: --xmin must be > 0."
sys.exit(1)
if args.ymin < 0:
print "Error: --ymin must be > 0."
sys.exit(1)
if args.zmin < 0:
print "Error: --zmin must be > 0."
sys.exit(1)
if args.xmax > 100:
print "Error: --xmax must be < 100."
sys.exit(1)
if args.ymax > 100:
print "Error: --ymax must be < 100."
sys.exit(1)
if args.zmax > 100:
print "Error: --zmax must be < 100."
sys.exit(1)
if args.xmin > args.xmax:
print "Error: --xmin must be smaller than --xmax."
sys.exit(1)
if args.ymin > args.ymax:
print "Error: --ymin must be smaller than --ymax."
sys.exit(1)
if args.zmin > args.zmax:
print "Error: --zmin must be smaller than --zmax."
sys.exit(1)
colormaps_possible = ['Spectral', 'summer', 'coolwarm', 'pink_r', 'Set1', 'Set2', 'Set3', 'brg_r', 'Dark2', 'hot', 'PuOr_r', 'afmhot_r', 'terrain_r', 'PuBuGn_r', 'RdPu', 'gist_ncar_r', 'gist_yarg_r', 'Dark2_r', 'YlGnBu', 'RdYlBu', 'hot_r', 'gist_rainbow_r', 'gist_stern', 'gnuplot_r', 'cool_r', 'cool', 'gray', 'copper_r', 'Greens_r', 'GnBu', 'gist_ncar', 'spring_r', 'gist_rainbow', 'RdYlBu_r', 'gist_heat_r', 'OrRd_r', 'CMRmap', 'bone', 'gist_stern_r', 'RdYlGn', 'Pastel2_r', 'spring', 'terrain', 'YlOrRd_r', 'Set2_r', 'winter_r', 'PuBu', 'RdGy_r', 'spectral', 'flag_r', 'jet_r', 'RdPu_r', 'Purples_r', 'gist_yarg', 'BuGn', 'Paired_r', 'hsv_r', 'bwr', 'cubehelix', 'YlOrRd', 'Greens', 'PRGn', 'gist_heat', 'spectral_r', 'Paired', 'hsv', 'Oranges_r', 'prism_r', 'Pastel2', 'Pastel1_r', 'Pastel1', 'gray_r', 'PuRd_r', 'Spectral_r', 'gnuplot2_r', 'BuPu', 'YlGnBu_r', 'copper', 'gist_earth_r', 'Set3_r', 'OrRd', 'PuBu_r', 'ocean_r', 'brg', 'gnuplot2', 'jet', 'bone_r', 'gist_earth', 'Oranges', 'RdYlGn_r', 'PiYG', 'CMRmap_r', 'YlGn', 'binary_r', 'gist_gray_r', 'Accent', 'BuPu_r', 'gist_gray', 'flag', 'seismic_r', 'RdBu_r', 'BrBG', 'Reds', 'BuGn_r', 'summer_r', 'GnBu_r', 'BrBG_r', 'Reds_r', 'RdGy', 'PuRd', 'Accent_r', 'Blues', 'Greys', 'autumn', 'cubehelix_r', 'nipy_spectral_r', 'PRGn_r', 'Greys_r', 'pink', 'binary', 'winter', 'gnuplot', 'RdBu', 'prism', 'YlOrBr', 'coolwarm_r', 'rainbow_r', 'rainbow', 'PiYG_r', 'YlGn_r', 'Blues_r', 'YlOrBr_r', 'seismic', 'Purples', 'bwr_r', 'autumn_r', 'ocean', 'Set1_r', 'PuOr', 'PuBuGn', 'nipy_spectral', 'afmhot']
if args.cmap not in colormaps_possible:
print "Error: unknown color maps, check matplotlib website or use default."
sys.exit(1)
interpolations_possible = ['none', 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos']
if args.inter not in interpolations_possible:
print "Error: unknown interpolation method, check matplotlib website or use default."
sys.exit(1)
#create log
#----------
filename_log=os.getcwd() + '/' + str(args.outfilename) + '.log'
output_log=open(filename_log, 'w')
output_log.write("[dx_plot v" + str(version_nb) + "]\n")
output_log.write("\nThe dx file was rendered using the following command:\n\n")
tmp_log="python dx_plot.py"
for c in sys.argv[1:]:
tmp_log += " " + c
output_log.write(tmp_log + "\n")
output_log.close()
##########################################################################################
# FUNCTIONS DEFINITIONS
##########################################################################################
global dims
global data
global data_1D
global data_2D
global coords_x
global coords_y
global coords_z
global nx_min
global ny_min
global nz_min
global nx_max
global ny_max
global nz_max
global cmap
cmap = cm.get_cmap(args.cmap)
#=========================================================================================
# data loading
#=========================================================================================
def load_dx():
global dims
global data
global coords_x
global coords_y
global coords_z
global nx_min
global ny_min
global nz_min
global nx_max
global ny_max
global nz_max
#potential value in each voxel
g = MDAnalysis.analysis.density.Grid()
g.load(str(args.dxfilename))
data = g.grid
#coords of bins along each dimensions
dims = np.shape(g.grid)
coords_x = np.zeros(dims[0])
coords_y = np.zeros(dims[1])
coords_z = np.zeros(dims[2])
tmp_x = g.edges[0]
tmp_y = g.edges[1]
tmp_z = g.edges[2]
for nx in range(0,dims[0]):
coords_x[nx] = (tmp_x[nx+1] + tmp_x[nx])/float(2)
for ny in range(0,dims[1]):
coords_y[ny] = (tmp_y[ny+1] + tmp_y[ny])/float(2)
for nz in range(0,dims[2]):
coords_z[nz] = (tmp_z[nz+1] + tmp_z[nz])/float(2)
#calculate delimiting indexes
nx_min = int(args.xmin * dims[0] / float(100))
ny_min = int(args.ymin * dims[1] / float(100))
nz_min = int(args.zmin * dims[2] / float(100))
nx_max = int(args.xmax * dims[0] / float(100))
ny_max = int(args.ymax * dims[1] / float(100))
nz_max = int(args.zmax * dims[2] / float(100))
#center coordinates
coords_x -= np.average(coords_x)
coords_y -= np.average(coords_y)
coords_z -= np.average(coords_z)
coords_x = coords_x[nx_min:nx_max]
coords_y = coords_y[ny_min:ny_max]
coords_z = coords_z[nz_min:nz_max]
if args.reverse:
coords_z *= -1
return
#=========================================================================================
# averages
#=========================================================================================
def calc_profiles():
global data_1D
global data_2D
# 1D average along chosen axis
#-----------------------------
if args.axis1D == "x":
data_1D = np.zeros(nx_max-nx_min)
for nx in range(nx_min,nx_max):
data_1D[nx] = np.average(data[nx,ny_min:ny_max,nz_min:nz_max])
elif args.axis1D == "y":
data_1D = np.zeros(ny_max-ny_min)
for ny in range(ny_min,ny_max):
data_1D[ny] = np.average(data[nx_min:nx_max,ny,nz_min:nz_max])
else:
data_1D = np.zeros(nz_max-nz_min)
for nz in range(nz_min,nz_max):
data_1D[nz] = np.average(data[nx_min:nx_max,ny_min:ny_max,nz])
# 2D average
#-----------
#case: xz
if args.axis2D == "xz":
data_2D = np.zeros((nx_max-nx_min,nz_max-nz_min))
for nz in range(nz_min,nz_max):
for nx in range(nx_min,nx_max):
data_2D[nx,nz] = np.average(data[nx,ny_min:ny_max,nz])
#case: yz
elif args.axis2D == "yz":
data_2D = np.zeros((ny_max-ny_min,nz_max-nz_min))
for nz in range(nz_min,nz_max):
for ny in range(ny_min,ny_max):
data_2D[ny,nz] = np.average(data[nx_min:nx_max,ny,nz])
#case: xy
else:
data_2D = np.zeros((nx_max-nx_min,ny_max-ny_min))
for ny in range(ny_min,ny_max):
for nx in range(ny_min,ny_max):
data_2D[nx,nz] = np.average(data[nx,ny,nz_min:mz_max])
#convert units to V
#------------------
if args.pmepot:
#convert to kT to kJ (in PMEpot C++ code a temperature of 300 K is used to obtain kT)
factor = 8.3144621 * 300 / float(1000) * 0.010364272
data_1D *= factor
data_2D *= factor
#sets potential to 0 V at the lower extremity of the reference axis using specified padding
#------------------------------------------------------------------------------------------
if args.pad > 0:
offset = np.average(data_1D[0:args.pad])
data_1D -= offset
data_2D -= offset
#set upper an lower boundaries if need be
#----------------------------------------
if args.vmin == "auto":
args.vmin = min(data_1D)
else:
args.vmin = float(args.vmin)
if args.vmax == "auto":
args.vmax = max(data_1D)
else:
args.vmax = float(args.vmax)
return
#=========================================================================================
# outputs
#=========================================================================================
def write_xvg():
#open files
filename_xvg = os.getcwd() + '/' + str(args.outfilename) + '_1D_' + str(args.axis1D) + '.xvg'
output_xvg = open(filename_xvg, 'w')
#general header
output_xvg.write("# [1D average content of " + str(args.dxfilename) + " - written by dx_plot v" + str(version_nb) + "]\n")
output_xvg.write("# -> 1D axis: " + str(args.axis1D) + "\n")
output_xvg.write("# -> 2D axis: " + str(args.axis2D) + "\n")
output_xvg.write("# -> x axis: " + str(args.xmin) + "-" + str(args.xmax) + "\n")
output_xvg.write("# -> y axis: " + str(args.ymin) + "-" + str(args.ymax) + "\n")
output_xvg.write("# -> z axis: " + str(args.zmin) + "-" + str(args.zmax) + "\n")
output_xvg.write("# -> pad: " + str(args.pad) + " slices on " + str(args.axisref) + " axis lower extremity\n")
#xvg metadata
output_xvg.write("@ title \"Average xvg\"\n")
output_xvg.write("@ xaxis label \"distance from box center along " + str(args.axis1D) + " (A)\"\n")
output_xvg.write("@ yaxis label \"potential (V)\"\n")
output_xvg.write("@ autoscale ONREAD xaxes\n")
output_xvg.write("@ TYPE XY\n")
output_xvg.write("@ view 0.15, 0.15, 0.95, 0.85\n")
output_xvg.write("@ legend on\n")
output_xvg.write("@ legend box on\n")
output_xvg.write("@ legend loctype view\n")
output_xvg.write("@ legend 0.98, 0.8\n")
output_xvg.write("@ legend length 0\n")
output_xvg.write("@ s0 legend \"potential\"\n")
#data
if args.axis1D == "x":
for r in range(0, len(data_1D)):
results = str(round(coords_x[r],2)) + " " + "{:.6e}".format(data_1D[r])
output_xvg.write(results + "\n")
elif args.axis1D == "y":
for r in range(0, len(data_1D)):
results = str(round(coords_y[r],2)) + " " + "{:.6e}".format(data_1D[r])
output_xvg.write(results + "\n")
else:
for r in range(0, len(data_1D)):
results = str(round(coords_z[r],2)) + " " + "{:.6e}".format(data_1D[r])
output_xvg.write(results + "\n")
output_xvg.close()
return
def graph_profile_1D():
#filenames
filename_svg = os.getcwd() + '/' + str(args.outfilename) + '_1D_' + str(args.axis1D) + '.svg'
#create figure
fig = plt.figure(figsize=(8, 6.2))
fig.suptitle("Electrostatic profile along " + str(args.axis1D))
#plot data
ax = fig.add_subplot(111)
if args.axis1D == "x":
plt.plot(coords_x, data_1D, color = 'k', linewidth = 2)
plt.hlines(0, min(coords_x), max(coords_x))
elif args.axis1D == "y":
plt.plot(coords_y, data_1D, color = 'k', linewidth = 2)
plt.hlines(0, min(coords_y), max(coords_y))
else:
plt.plot(coords_z, data_1D, color = 'k', linewidth = 2)
plt.hlines(0, min(coords_z), max(coords_z))
plt.vlines(-21, args.vmin, args.vmax, linestyles = 'dashed')
plt.vlines(21, args.vmin, args.vmax, linestyles = 'dashed')
plt.vlines(0, args.vmin, args.vmax, linestyles = 'dashdot')
plt.xlabel(str(args.axis1D) + ' distance to box center ($\AA$)')
plt.ylabel('electrostatic potential (V)')
#save figure
ax.set_ylim(args.vmin, args.vmax)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_major_locator(MaxNLocator(nbins=args.xticks))
ax.yaxis.set_major_locator(MaxNLocator(nbins=args.yticks))
ax.xaxis.labelpad = 20
ax.yaxis.labelpad = 20
ax.get_xaxis().set_tick_params(direction='out')
ax.get_yaxis().set_tick_params(direction='out')
plt.setp(ax.xaxis.get_majorticklabels(), fontsize = "small")
plt.setp(ax.yaxis.get_majorticklabels(), fontsize = "small")
plt.subplots_adjust(top = 0.9, bottom = 0.15, left = 0.15, right = 0.85)
fig.savefig(filename_svg)
plt.close()
return
def graph_profile_2D():
#filenames
filename_svg = os.getcwd() + '/' + str(args.outfilename) + '_2D_' + str(args.axis2D) + '.svg'
#create figure
fig = plt.figure(figsize=(8, 6.2))
fig.suptitle("Electrostatic profile slice")
#plot data
ax = fig.add_subplot(111)
if args.axis2D == "xz":
data_2D_oriented = np.zeros((np.shape(data_2D)[1],np.shape(data_2D)[0]))
if args.reverse:
for nx in range(nx_min, nx_max):
for nz in range(nz_min, nz_max):
data_2D_oriented[nz,nx] = data_2D[nx,nz]
else:
for nx in range(nx_min, nx_max):
for nz in range(nz_min, nz_max):
data_2D_oriented[nz,nx] = data_2D[nx,nz_max-1-nz]
im = plt.imshow(data_2D_oriented, extent = [min(coords_x),max(coords_x),min(coords_z),max(coords_z)], vmin = args.vmin, vmax = args.vmax, cmap = cmap, interpolation=args.inter)
ax.set_xlim(min(coords_x), max(coords_x))
ax.set_ylim(min(coords_z), max(coords_z))
plt.xlabel('x axis ($\AA$)')
plt.ylabel('z axis ($\AA$)')
elif args.axis2D == "yz":
data_2D_oriented = np.zeros((np.shape(data_2D)[1],np.shape(data_2D)[0]))
if args.reverse:
for ny in range(ny_min, ny_max):
for nz in range(nz_min, nz_max):
data_2D_oriented[nz,ny] = data_2D[ny,nz]
else:
for ny in range(ny_min, ny_max):
for nz in range(nz_min, nz_max):
data_2D_oriented[nz,ny] = data_2D[ny,nz_max-1-nz]
im = plt.imshow(data_2D_oriented, extent = [min(coords_y),max(coords_y),min(coords_z),max(coords_z)], vmin = args.vmin, vmax = args.vmax, cmap = cmap, interpolation=args.inter)
ax.set_xlim(min(coords_y), max(coords_y))
ax.set_ylim(min(coords_z), max(coords_z))
plt.xlabel('y axis ($\AA$)')
plt.ylabel('z axis ($\AA$)')
else:
data_2D_oriented = np.zeros((np.shape(data_2D)[1],np.shape(data_2D)[0]))
for nx in range(nx_min, nx_max):
for ny in range(ny_min, ny_max):
data_2D_oriented[ny,nx] = data_2D[nx,ny_max-1-ny]
im = plt.imshow(data_2D_oriented, extent = [min(coords_x),max(coords_x),min(coords_y),max(coords_y)], vmin = args.vmin, vmax = args.vmax, cmap = cmap, interpolation=args.inter)
ax.set_xlim(min(coords_x), max(coords_x))
ax.set_ylim(min(coords_y), max(coords_y))
plt.xlabel('x axis ($\AA$)')
plt.ylabel('y axis ($\AA$)')
if args.axis2D != "xy":
plt.vlines(-21, args.vmin, args.vmax, linestyles = 'dashed')
plt.vlines(21, args.vmin, args.vmax, linestyles = 'dashed')
plt.vlines(0, args.vmin, args.vmax, linestyles = 'dashdot')
#color bar
cax = fig.add_axes([0.83, 0.2, 0.025, 0.65])
cbar = fig.colorbar(im, orientation='vertical', cax=cax)
cbar.ax.tick_params(axis='y', direction='out')
cbar.set_label(r'potential (V)')
plt.setp(cbar.ax.yaxis.get_majorticklabels(), fontsize = "small")
cbar.locator = MaxNLocator(nbins=args.cticks)
cbar.update_ticks()
cbar.ax.yaxis.labelpad = 10
#save figure
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_major_locator(MaxNLocator(nbins=args.xticks))
ax.yaxis.set_major_locator(MaxNLocator(nbins=args.yticks))
ax.xaxis.labelpad = 10
ax.yaxis.labelpad = 10
ax.get_xaxis().set_tick_params(direction='out')
ax.get_yaxis().set_tick_params(direction='out')
plt.setp(ax.xaxis.get_majorticklabels(), fontsize = "small")
plt.setp(ax.yaxis.get_majorticklabels(), fontsize = "small")
plt.subplots_adjust(top = 0.9, bottom = 0.15, left = 0.1, right = 0.8)
fig.savefig(filename_svg)
plt.close()
return
##########################################################################################
# MAIN
##########################################################################################
print "\nReading file..."
load_dx()
print "\nCalculating profiles..."
calc_profiles()
write_xvg()
graph_profile_1D()
graph_profile_2D()
#=========================================================================================
# exit
#=========================================================================================
print "\nFinished successfully!"
print ""
sys.exit(0)
| gpl-2.0 |
mindgarage/Ovation | models/siamese_cnn_lstm_network.py | 1 | 9361 | import os
import pickle
import datetime
import tensorflow as tf
from utils import ops
from utils import distances
from utils import losses
from scipy.stats import pearsonr
from sklearn.metrics import mean_squared_error
from tensorflow.contrib.tensorboard.plugins import projector
from models.model import Model
class SiameseCNNLSTM(Model):
"""
A LSTM based deep Siamese network for text similarity.
Uses a word embedding layer, followed by a bLSTM and a simple Energy Loss
layer.
"""
def create_placeholders(self):
# A tensorflow Placeholder for the 1st input sentence. This
# placeholder would expect data in the shape [BATCH_SIZE X
# SEQ_MAX_LENGTH], where each row of this Tensor will contain a
# sequence of token ids representing the sentence
self.input_s1 = tf.placeholder(tf.int32, [None,
self.args.get("sequence_length")],
name="input_s1")
# This is similar to self.input_s1, but it is used to feed the second
# sentence
self.input_s2 = tf.placeholder(tf.int32, [None,
self.args.get("sequence_length")],
name="input_s2")
# This is a placeholder to feed in the ground truth similarity
# between the two sentences. It expects a Matrix of shape [BATCH_SIZE]
self.input_sim = tf.placeholder(tf.float32, [None], name="input_sim")
def build_model(self, metadata_path=None, embedding_weights=None):
"""
This method builds the computation graph by adding layers of
computations. It takes the metadata_path (of the dataset vocabulary)
and a preloaded word2vec matrix and input and uses them (if not None)
to initialize the Tensorflow variables. The metadata is used to
visualize the word embeddings that are being trained using Tensorflow
Projector. Additionally you can use any other tool to visualize them.
https://www.tensorflow.org/versions/r0.12/how_tos/embedding_viz/
:param metadata_path: Path to the metadata of the vocabulary. Refer
to the datasets API
https://github.com/mindgarage/Ovation/wiki/The-Datasets-API
:param embedding_weights: the preloaded w2v matrix that corresponds
to the vocabulary. Refer to https://github.com/mindgarage/Ovation/wiki/The-Datasets-API#what-does-a-dataset-object-have
:return:
"""
# Build the Embedding layer as the first layer of the model
self.embedding_weights, self.config = ops.embedding_layer(
metadata_path, embedding_weights)
self.embedded_s1 = tf.nn.embedding_lookup(self.embedding_weights,
self.input_s1)
self.embedded_s2 = tf.nn.embedding_lookup(self.embedding_weights,
self.input_s2)
self.s1_cnn_out = ops.multi_filter_conv_block(self.embedded_s1,
self.args["n_filters"],
dropout_keep_prob=self.args["dropout"])
self.s1_lstm_out = ops.lstm_block(self.s1_cnn_out,
self.args["hidden_units"],
dropout=self.args["dropout"],
layers=self.args["rnn_layers"],
dynamic=False,
bidirectional=self.args["bidirectional"])
self.s2_cnn_out = ops.multi_filter_conv_block(self.embedded_s2,
self.args["n_filters"], reuse=True,
dropout_keep_prob=self.args["dropout"])
self.s2_lstm_out = ops.lstm_block(self.s2_cnn_out,
self.args["hidden_units"],
dropout=self.args["dropout"],
layers=self.args["rnn_layers"],
dynamic=False, reuse=True,
bidirectional=self.args["bidirectional"])
self.distance = distances.exponential(self.s1_lstm_out,
self.s2_lstm_out)
with tf.name_scope("loss"):
self.loss = losses.mean_squared_error(self.input_sim, self.distance)
if self.args["l2_reg_beta"] > 0.0:
self.regularizer = ops.get_regularizer(self.args["l2_reg_beta"])
self.loss = tf.reduce_mean(self.loss + self.regularizer)
# Compute some Evaluation Measures to keep track of the training process
with tf.name_scope("Pearson_correlation"):
self.pco, self.pco_update = tf.contrib.metrics.streaming_pearson_correlation(
self.distance, self.input_sim, name="pearson")
# Compute some Evaluation Measures to keep track of the training process
with tf.name_scope("MSE"):
self.mse, self.mse_update = tf.metrics.mean_squared_error(
self.input_sim, self.distance, name="mse")
def create_scalar_summary(self, sess):
"""
This method creates Tensorboard summaries for some scalar values
like loss and pearson correlation
:param sess:
:return:
"""
# Summaries for loss and accuracy
self.loss_summary = tf.summary.scalar("loss", self.loss)
self.pearson_summary = tf.summary.scalar("pco", self.pco)
self.mse_summary = tf.summary.scalar("mse", self.mse)
# Train Summaries
self.train_summary_op = tf.summary.merge([self.loss_summary,
self.pearson_summary,
self.mse_summary])
self.train_summary_writer = tf.summary.FileWriter(self.checkpoint_dir,
sess.graph)
projector.visualize_embeddings(self.train_summary_writer,
self.config)
# Dev summaries
self.dev_summary_op = tf.summary.merge([self.loss_summary,
self.pearson_summary,
self.mse_summary])
self.dev_summary_writer = tf.summary.FileWriter(self.dev_summary_dir,
sess.graph)
def train_step(self, sess, s1_batch, s2_batch, sim_batch,
epochs_completed, verbose=True):
"""
A single train step
"""
# Prepare data to feed to the computation graph
feed_dict = {
self.input_s1: s1_batch,
self.input_s2: s2_batch,
self.input_sim: sim_batch,
}
# create a list of operations that you want to run and observe
ops = [self.tr_op_set, self.global_step, self.loss, self.distance]
# Add summaries if they exist
if hasattr(self, 'train_summary_op'):
ops.append(self.train_summary_op)
_, step, loss, sim, summaries = sess.run(ops,
feed_dict)
self.train_summary_writer.add_summary(summaries, step)
else:
_, step, loss, sim = sess.run(ops, feed_dict)
# Calculate the pearson correlation and mean squared error
pco = pearsonr(sim, sim_batch)
mse = mean_squared_error(sim_batch, sim)
if verbose:
time_str = datetime.datetime.now().isoformat()
print("Epoch: {}\tTRAIN {}: Current Step{}\tLoss{:g}\t"
"PCO:{}\tMSE={}".format(epochs_completed,
time_str, step, loss, pco, mse))
return pco, mse, loss, step
def evaluate_step(self, sess, s1_batch, s2_batch, sim_batch, verbose=True):
"""
A single evaluation step
"""
# Prepare the data to be fed to the computation graph
feed_dict = {
self.input_s1: s1_batch,
self.input_s2: s2_batch,
self.input_sim: sim_batch
}
# create a list of operations that you want to run and observe
ops = [self.global_step, self.loss, self.distance, self.pco,
self.pco_update, self.mse, self.mse_update]
# Add summaries if they exist
if hasattr(self, 'dev_summary_op'):
ops.append(self.dev_summary_op)
step, loss, sim, pco, _, mse, _, summaries = sess.run(ops,
feed_dict)
self.dev_summary_writer.add_summary(summaries, step)
else:
step, loss, sim, pco, _, mse, _ = sess.run(ops, feed_dict)
time_str = datetime.datetime.now().isoformat()
# Calculate the pearson correlation and mean squared error
pco = pearsonr(sim, sim_batch)
mse = mean_squared_error(sim_batch, sim)
if verbose:
print("EVAL: {}\tStep: {}\tloss: {:g}\t pco:{}\tmse:{}".format(
time_str, step, loss, pco, mse))
return loss, pco, mse, sim
| apache-2.0 |
stefanseibert/DataMining | experiment01/01_finance/b103_stockMarketClustering.py | 2 | 4290 | # -*- coding:utf8 -*-
# author: Prof. Maucher // applied changes: Stefan Seibert
# File for task 3.2
import datetime
from matplotlib import finance
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn import cluster
# Choose a time period reasonable calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 01, 01)
d2 = datetime.datetime(2008, 01, 01)
symbol_dict = {
'TOT' : 'Total',
'XOM' : 'Exxon',
'CVX' : 'Chevron',
'COP' : 'ConocoPhillips',
'VLO' : 'Valero Energy',
'MSFT' : 'Microsoft',
'IBM' : 'IBM',
'TWX' : 'Time Warner',
'CMCSA': 'Comcast',
'CVC' : 'Cablevision',
'YHOO' : 'Yahoo',
'DELL' : 'Dell',
'HPQ' : 'Hewlett-Packard',
'AMZN' : 'Amazon',
'TM' : 'Toyota',
'CAJ' : 'Canon',
'MTU' : 'Mitsubishi',
'SNE' : 'Sony',
'F' : 'Ford',
'HMC' : 'Honda',
'NAV' : 'Navistar',
'NOC' : 'Northrop Grumman',
'BA' : 'Boeing',
'KO' : 'Coca Cola',
'MMM' : '3M',
'MCD' : 'Mc Donalds',
'PEP' : 'Pepsi',
#'KFT' : 'Kraft Foods',
'K' : 'Kellogg',
'UN' : 'Unilever',
'MAR' : 'Marriott',
'PG' : 'Procter Gamble',
'CL' : 'Colgate-Palmolive',
#'NWS' : 'News Corporation',
'GE' : 'General Electrics',
'WFC' : 'Wells Fargo',
'JPM' : 'JPMorgan Chase',
'AIG' : 'AIG',
'AXP' : 'American express',
'BAC' : 'Bank of America',
'GS' : 'Goldman Sachs',
'AAPL' : 'Apple',
'SAP' : 'SAP',
'CSCO' : 'Cisco',
'TXN' : 'Texas instruments',
'XRX' : 'Xerox',
'LMT' : 'Lookheed Martin',
'WMT' : 'Wal-Mart',
'WAG' : 'Walgreen',
'HD' : 'Home Depot',
'GSK' : 'GlaxoSmithKline',
'PFE' : 'Pfizer',
'SNY' : 'Sanofi-Aventis',
'NVS' : 'Novartis',
'KMB' : 'Kimberly-Clark',
'R' : 'Ryder',
'GD' : 'General Dynamics',
'RTN' : 'Raytheon',
'CVS' : 'CVS',
'CAT' : 'Caterpillar',
'DD' : 'DuPont de Nemours',
}
symbols, names = np.array(symbol_dict.items()).T
print "----------------------------Symbols---------------------------------------"
print symbols
print "----------------------------Names---------------------------------------"
print names
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
print "----------------------------Quotes---------------------------------------"
print "Number of quotes: ",len(quotes)
#print "--------------------------open and close-----------------------------------"
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
print "--------------------------clustering tasks---------------------------------"
#difference vectors calculation for task 3.2.1
differenceVectors = pd.DataFrame(columns=symbols)
for business in range(0, len(symbols)):
businessSymbol = symbols[business]
differenceVectors[businessSymbol] = pd.Series(open[business]-close[business], index=quotes[business].date)
# relation matrix for task 3.2.2
# transponsed for having correlation between business and not days
relationMatrix = np.corrcoef(differenceVectors.T)
#affinity propagation calculation for task 3.2.3
af = cluster.AffinityPropagation(affinity="precomputed").fit(relationMatrix)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print "In " + str(len(labels)) + " Companies were " + str(n_clusters_) + " Clusters found."
#plotting the clusters for task 3.2.4
for i in range(0, n_clusters_):
plt.figure("Companies related in Cluster %d" % i)
for y in range(0, len(labels)):
if i == labels[y]:
plt.plot(quotes[y].date, close[y], "b", color=plt.cm.RdYlBu(y*5), ms=2, label=symbols[y])
plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
plt.ylabel("prices")
plt.xlabel("time")
plt.grid()
plt.show() | mit |
loli/sklearn-ensembletrees | sklearn/linear_model/tests/test_sgd.py | 3 | 31580 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
##
## Test Data
##
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
##
## Classification Test Case
##
class CommonTest(object):
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
#... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
"""Input format tests. """
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
"""Test whether clone works ok. """
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory = SGDClassifier
def test_sgd(self):
"""Check that SGD gives any results :-)"""
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
#assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
"""Check whether expected ValueError on bad l1_ratio"""
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
"""Check whether expected ValueError on bad learning_rate"""
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
"""Check whether expected ValueError on bad eta0"""
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
"""Check whether expected ValueError on bad alpha"""
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
"""Test parameter validity check"""
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
"""Test parameter validity check"""
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
"""Checks coef_init not allowed as model argument (only fit)"""
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
"""Checks coef_init shape for the warm starts"""
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
"""Checks intercept_ shape for the warm starts"""
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
"""Checks intercept_ shape for the warm starts in binary case"""
self.factory().fit(X5, Y5, intercept_init=0)
def test_set_intercept_to_intercept(self):
"""Checks intercept_ shape consistency for the warm starts"""
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
"""Target must have at least two labels"""
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_sgd_multiclass(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_with_init_coef(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
"""Multi-class test case with multi-core support"""
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
"""Checks coef_init and intercept_init shape for for multi-class
problems"""
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
"""Check SGD.predict_proba"""
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
"""Test L1 regularization"""
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
"""Test if equal class weights approx. equals no class weights. """
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
"""ValueError due to not existing class label."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
"""ValueError due to wrong class_weight argument type."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_auto_weight(self):
"""Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X)), 0.96,
decimal=1)
# make the same prediction using automated class_weight
clf_auto = self.factory(alpha=0.0001, n_iter=1000,
class_weight="auto").fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X)), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "auto"
assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred), 0.96)
# fit a model with auto class_weight enabled
clf = self.factory(n_iter=1000, class_weight="auto")
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="auto")
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred), 0.96)
def test_sample_weights(self):
"""Test weights on individual samples"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
"""Test if ValueError is raised if sample_weight has wrong shape"""
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
"""Partial_fit should work after initial fit in the multiclass case.
Non-regression test for #2496; fit would previously produce a
Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
"""
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
"""Test multiple calls of fit w/ different shaped inputs."""
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory = SGDRegressor
def test_sgd(self):
"""Check that SGD gives any results."""
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
"""Check that the SGD output is consistent with coordinate descent"""
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = SparseSGDRegressor
def test_l1_ratio():
"""Test if l1 ratio extremes match L1 and L2 penalty settings. """
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1').fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2').fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
# Generate some weird data with unscaled features
rng = np.random.RandomState(42)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, 0] *= 100
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(scale(X), ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(scale(X), y)
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
| bsd-3-clause |
mattphysics/PNRESD | FIGURES/Figure_7/Figure_7.py | 1 | 13395 | # Generate Figure 7a, 7b of Van Zalinge et al. (2017), On determining the point of no return in climate change, Earth System Dynamics.
# ============================================================
# PACKAGE IMPORT
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from datetime import datetime,timedelta
from scipy.optimize import leastsq
from scipy.stats import norm
# Graphics functions =========================================
def figsize(scale):
'''Give fraction of width, get figsize in inches with height scaled according to golden_mean'''
fig_width_pt = 426.79135 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt*inches_per_pt*scale # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
return fig_size
def figsize2(width,height):
'''Give fraction of width and heigt, get figsize in inches'''
textwidth_pt = 426.79135
textheight_pt = 426.79135
inches_per_pt = 1.0/72.27
return textwidth_pt * inches_per_pt * width,textheight_pt * inches_per_pt * height
# I make my own newfig and savefig functions
def newfig(width,nr=1,nc=1,**kwargs):
'''use function figsize'''
fig,ax = plt.subplots(nrows=nr,ncols=nc,figsize=figsize(width),**kwargs)
return fig, ax
def newfig2(width,height,nr=1,nc=1,**kwargs):
'''use function figsize2'''
fig,ax = plt.subplots(nrows=nr,ncols=nc,figsize=figsize2(width,height),**kwargs)
return fig, ax
def savefig(fig,filename):
fig.savefig('{}.pgf'.format(filename),bbox_inches='tight',dpi=400,rasterized=True)
fig.savefig('{}.pdf'.format(filename),bbox_inches='tight',dpi=400,rasterized=True)
# ============================================================
# Further Functions
# Mitigation scenarios
def mitscen(emis,tstart,efold):
'''return emissions under a reduction scenario that starts at t=tstart and decays expoentially with efolding timescale efold
assumes timestep of Delta t = 1 yr and tstart is counted from the start with t_start = 0
'''
emisreduce = np.zeros(emis.shape)
index = np.arange(emis.shape[0])
emisreduce[:] = emis
emisreduce[index>=tstart] = emis[tstart] * np.exp(-(index[tstart:]-tstart)/efold)
return emisreduce
# Radiative forcing
def getRadForcCO2(C):
'''radiative forcing due to CO2 relative to 1750'''
alpha = 5.35
C0 = 278 # ppm # pre-industrial CO2 (1750)
return alpha * np.log(C/C0)
# Compute temperature response
def getDeltaTCO2_v1(EMIS_CO2_GtC,CO2_0,G_CO2,G_Temp,radForc_factor):
'''
compute Temperature response from emissions
radiative forcing is scaled up by a factor of 'radForc_factor'
'''
MA = 28.97 # kg kmol^{-1} # mean molecular weight of air
MC = 12.011 # kg kmol^{-1} # mean molecular weight of C
TM = 5.1352e18 # kg # total mass of atmosphere
factorC = (1/MC) * (MA/TM) * 1e18 # conversion from Gt to ppm
EMIS_CO2_ppm = factorC * EMIS_CO2_GtC
# Compute CO2 concentration as function of time
DeltaC_CO2_ppm = np.convolve(G_CO2,EMIS_CO2_ppm,mode='full')[:EMIS_CO2_ppm.shape[0]]
C_CO2_ppm = CO2_0 + DeltaC_CO2_ppm
# Compute radiative forcing relative to 1750
radForc_CO2 = getRadForcCO2(C_CO2_ppm) * radForc_factor
# Compute temperature perturbation
DeltaT_CO2 = np.convolve(G_Temp,radForc_CO2,mode='full')[:radForc_CO2.shape[0]]
res = {
'EMIS_GtC':EMIS_CO2_GtC,
'EMIS_ppm':EMIS_CO2_ppm,
'C_CO2_ppm':C_CO2_ppm,
'radForc_CO2':radForc_CO2,
'DeltaT_CO2':DeltaT_CO2
}
return res
# Joos et al., 2013 IRF for emissions to concentrations
def IRFexpFit(a0,a1,a2,a3,tau1,tau2,tau3,time):
'''exponential fit from Joos et al., 2011'''
return a0 + a1 * np.exp(-time/tau1) + a2 * np.exp(-time/tau2) + a3 * np.exp(-time/tau3)
# Fit exponential
def fitGfunc4(time,a1,t1):
return a1 * np.exp(-time/t1)
def devGGfit(params,time,Garr,Gfunc):
''' deviation between fit and data'''
return Gfunc(time,*params) - Garr
# ============================================================
# ============================================================
# DATA
# Load RCP Emission data
emisrcp26 = pd.read_table('../../DATA/RCP_Data/RCP3PD_EMISSIONS.DAT',skiprows=37,delim_whitespace=True,index_col=0,parse_dates=True,infer_datetime_format=True)
emisrcp26.index = pd.period_range('%i-01-01'%emisrcp26.index[0],'%i-01-01' % emisrcp26.index[-1],freq='a')
emisrcp45 = pd.read_table('../../DATA/RCP_Data/RCP45_EMISSIONS.DAT',skiprows=37,delim_whitespace=True,index_col=0,parse_dates=True,infer_datetime_format=True)
emisrcp45.index = pd.period_range('%i-01-01'%emisrcp45.index[0],'%i-01-01' % emisrcp45.index[-1],freq='a')
emisrcp60 = pd.read_table('../../DATA/RCP_Data/RCP6_EMISSIONS.DAT',skiprows=37,delim_whitespace=True,index_col=0,parse_dates=True,infer_datetime_format=True)
emisrcp60.index = pd.period_range('%i-01-01'%emisrcp60.index[0],'%i-01-01' % emisrcp60.index[-1],freq='a')
emisrcp85 = pd.read_table('../../DATA/RCP_Data/RCP85_EMISSIONS.DAT',skiprows=37,delim_whitespace=True,index_col=0,parse_dates=True,infer_datetime_format=True)
emisrcp85.index = pd.period_range('%i-01-01'%emisrcp85.index[0],'%i-01-01' % emisrcp85.index[-1],freq='a')
# ============================================================
# GREEN FUNCTIONS
# CARBON GREEN FUNCTION FROM JOOS ET AL., 2013
a0,a1,a2,a3,tau1,tau2,tau3=2.17278e-01, 2.24037e-01, 2.82381e-01, 2.76303e-01, 3.94409e+02, 3.65393e+01, 4.30365e+00
time1000 = np.arange(1000)
# For mean response
G_CO2 = IRFexpFit(a0,a1,a2,a3,tau1,tau2,tau3,time1000)
# Variance from Joos Data
pd100 = np.loadtxt('../../DATA/Carbon_Response/IRF_PD100_SMOOTHED_CO2.dat')
G_CO2_var = pd100[:1000,18]**2
# PLASIM Data
# add pre-industrial reference before perturbation at 0-th timestep for each member
T0 = 287.804719046 # K # reference temperature, pre-industrial
Tvar0 = 0.00843298290214 # K^2 # pre-industrial temperature variance
GMST_2co2 = np.zeros((201,201))
GMST_2co2[1:,:] = np.load('../../DATA/PlaSim_Ensembles/2co2_ym.npy')
GMST_2co2[0,:] = T0
GMST_1pct = np.zeros((201,201))
GMST_1pct[1:,:] = np.load('../../DATA/PlaSim_Ensembles/co2_1pr_ym.npy')
GMST_1pct[0,:] = T0
# ensemble mean
EXP_2co2 = GMST_2co2.mean(axis=1)
EXP_1pct = GMST_1pct.mean(axis=1)
# ensemble standard deviation
VAR_2co2 = GMST_2co2.var(axis=1)
VAR_1pct = GMST_1pct.var(axis=1)
VAR_2co2[0] = Tvar0
VAR_1pct[0] = Tvar0
# CO2 Forcing in ppm for abrupt and smooth scenarios
C0 = 278 # ppm
CO2_2co2 = C0 * np.ones(EXP_2co2.shape[0])
CO2_2co2[1:] = 2 * C0
CO2_1pct = C0 * 1.01**(np.arange(EXP_1pct.shape[0]))
CO2_1pct[CO2_1pct>2*C0] = 2*C0
# Radiative forcing in W m^-2 from CO2 Forcing
F2co2 = getRadForcCO2(CO2_2co2)
F1pct = getRadForcCO2(CO2_1pct)
# Compute Data based green functions
Garrd = 1/F2co2[1] * np.diff(EXP_2co2)
Garrd = np.array(list(Garrd)+[Garrd[-1]])
Garr_vard = 1/F2co2[1] * np.diff(VAR_2co2)
Garr_vard = np.array(list(Garr_vard)+[Garr_vard[-1]])
# For long simulations (longer than the simulations on which the Green function is based) we need to extend the Green function. We do so by exponential fits to the data and thereby generate response function for simulations of up to 500 years duration. The data-based functions are used for as long as possible
# Green function for mean response
time = np.arange(Garrd.shape[0])
par7 = leastsq(devGGfit,[1,2],args=(time[1:],Garrd[1:],fitGfunc4))[0]
time500 = np.arange(500)
G_Temp_i = fitGfunc4(time500,*par7)
G_Temp_i[0] = Garrd[0]
# Assign data-based Green function for t<=200 years
G_Temp = np.copy(G_Temp_i)
G_Temp[:Garrd.shape[0]] = Garrd
# Green function for variance of response
# Variance: Simple white noise with mean and variance from data-based G
time500 = np.arange(500)
Garr_var = np.random.normal(loc=Garr_vard.mean(),scale=Garr_vard.std(),size=len(time500))
Garr_var[:Garr_vard.shape[0]] = Garr_vard
# ============================================================
# SIMULATION RESULTS
# Compute temperature distributions for mitigation scenarios, based on RCP emissions, with exponential emission reduction starting in a given year
CO2_0 = 278.05158 # ppm # pre-industrial CO2 concentration
# RCP2.6
muT2100 = pd.Series(index=np.arange(2005,2099).astype('str'))
varT200_Tonly = pd.Series(index=np.arange(2005,2099).astype('str'))
for tstart in range(2005,2100):
emisco226 = mitscen(emisrcp26['FossilCO2'][:'2100'].values,tstart-1765,25)
res = getDeltaTCO2_v1(emisco226,CO2_0,G_CO2,G_Temp,0.6)
muT2100[str(tstart)] = res['DeltaT_CO2'][-1]
varT200_Tonly[str(tstart)] = Tvar0 + np.convolve(Garr_var,res['radForc_CO2'],mode='full')[:res['radForc_CO2'].shape[0]][-1]
T0926 = norm.ppf(0.9,muT2100,varT200_Tonly**0.5)
# RCP4.5
muT2100 = pd.Series(index=np.arange(2005,2099).astype('str'))
varT200_Tonly = pd.Series(index=np.arange(2005,2099).astype('str'))
for tstart in range(2005,2100):
emisco245 = mitscen(emisrcp45['FossilCO2'][:'2100'].values,tstart-1765,25)
res = getDeltaTCO2_v1(emisco245,CO2_0,G_CO2,G_Temp,0.6)
muT2100[str(tstart)] = res['DeltaT_CO2'][-1]
varT200_Tonly[str(tstart)] = Tvar0 + np.convolve(Garr_var,res['radForc_CO2'],mode='full')[:res['radForc_CO2'].shape[0]][-1]
T0945 = norm.ppf(0.9,muT2100,varT200_Tonly**0.5)
# RCP6.0
muT2100 = pd.Series(index=np.arange(2005,2099).astype('str'))
varT200_Tonly = pd.Series(index=np.arange(2005,2099).astype('str'))
for tstart in range(2005,2100):
emisco260 = mitscen(emisrcp60['FossilCO2'][:'2100'].values,tstart-1765,25)
res = getDeltaTCO2_v1(emisco260,CO2_0,G_CO2,G_Temp,0.6)
muT2100[str(tstart)] = res['DeltaT_CO2'][-1]
varT200_Tonly[str(tstart)] = Tvar0 + np.convolve(Garr_var,res['radForc_CO2'],mode='full')[:res['radForc_CO2'].shape[0]][-1]
T0960 = norm.ppf(0.9,muT2100,varT200_Tonly**0.5)
# RCP8.5
muT2100 = pd.Series(index=np.arange(2005,2099).astype('str'))
varT200_Tonly = pd.Series(index=np.arange(2005,2099).astype('str'))
for tstart in range(2005,2100):
emisco285 = mitscen(emisrcp85['FossilCO2'][:'2100'].values,tstart-1765,25)
res = getDeltaTCO2_v1(emisco285,CO2_0,G_CO2,G_Temp,0.6)
muT2100[str(tstart)] = res['DeltaT_CO2'][-1]
varT200_Tonly[str(tstart)] = Tvar0 + np.convolve(Garr_var,res['radForc_CO2'],mode='full')[:res['radForc_CO2'].shape[0]][-1]
T0985 = norm.ppf(0.9,muT2100,varT200_Tonly**0.5)
# Point of no return
T0926 = pd.Series(T0926,index=muT2100.index)
T0945 = pd.Series(T0945,index=muT2100.index)
T0960 = pd.Series(T0960,index=muT2100.index)
T0985 = pd.Series(T0985,index=muT2100.index)
# ============================================================
# PLOTTING
# ============================================================
# FIGURE 7a
fig,ax = newfig(1.0)
T0926.plot(ax=ax,label='RCP2.6')
T0945.plot(ax=ax,label='RCP4.5')
T0960.plot(ax=ax,label='RCP6.0')
T0985.plot(ax=ax,label='RCP8.5')
pd.Series(2*np.ones(T0985.shape),index=muT2100.index).plot(c='k',linestyle='--',label='')
ax.set_xlabel('year starting to reduce emissions')
ax.set_ylabel(r'$90\%$ Warming in 2100 relative to pre-industrial')
ax.set_title(r'PLASIM warming for exponential emissions decrease (25 yr)')
ax.legend()
#savefig(fig,'PLASIM_DeltaT_emisreduce6_2')
plt.show()
# ============================================
#FIGURE 7b
#PofnoR26 = T0926.index[np.where(T0926>=2)[0][0]]
PofnoR45 = int(T0945.index[np.where(T0945>=2)[0][0]])
PofnoR60 = int(T0960.index[np.where(T0960>=2)[0][0]])
PofnoR85 = int(T0985.index[np.where(T0985>=2)[0][0]])
# Plot concentrations for mitigation starting at point of no return
fig,ax = newfig(1.0)
years = np.arange(1765,2101)
# RCP2.6
resn26 = getDeltaTCO2_v1(emisrcp26['FossilCO2'][:'2100'].values,CO2_0,G_CO2,G_Temp,0.6)
ax.plot(years,resn26['C_CO2_ppm'],c='C0')
# RCP4.5
resn45 = getDeltaTCO2_v1(emisrcp45['FossilCO2'][:'2100'].values,CO2_0,G_CO2,G_Temp,0.6)
ax.plot(years,resn45['C_CO2_ppm'],c='C1')
emisco245 = mitscen(emisrcp45['FossilCO2'][:'2100'].values,PofnoR45-1765,25)
resy45 = getDeltaTCO2_v1(emisco245,CO2_0,G_CO2,G_Temp,0.6)
ax.plot(years,resy45['C_CO2_ppm'],'--',c='C1')
# RCP6.0
resn60 = getDeltaTCO2_v1(emisrcp60['FossilCO2'][:'2100'].values,CO2_0,G_CO2,G_Temp,0.6)
ax.plot(years,resn60['C_CO2_ppm'],c='C2')
emisco260 = mitscen(emisrcp60['FossilCO2'][:'2100'].values,PofnoR60-1765,25)
resy60 = getDeltaTCO2_v1(emisco260,CO2_0,G_CO2,G_Temp,0.6)
ax.plot(years,resy60['C_CO2_ppm'],'--',c='C2')
# RCP8.5
resn85 = getDeltaTCO2_v1(emisrcp85['FossilCO2'][:'2100'].values,CO2_0,G_CO2,G_Temp,0.6)
ax.plot(years,resn85['C_CO2_ppm'],c='C3')
emisco285 = mitscen(emisrcp85['FossilCO2'][:'2100'].values,PofnoR85-1765,25)
resy85 = getDeltaTCO2_v1(emisco285,CO2_0,G_CO2,G_Temp,0.6)
ax.plot(years,resy85['C_CO2_ppm'],'--',c='C3')
ax.plot(PofnoR45,resy45['C_CO2_ppm'][np.where(years==PofnoR45)],'o',label=r'RCP4.5, $\pi_t = %i$' % PofnoR45,c='C1')
ax.plot(PofnoR60,resy60['C_CO2_ppm'][np.where(years==PofnoR60)],'o',label=r'RCP6.0, $\pi_t = %i$' % PofnoR60,c='C2')
ax.plot(PofnoR85,resy85['C_CO2_ppm'][np.where(years==PofnoR85)],'o',label=r'RCP8.5, $\pi_t = %i$' % PofnoR85,c='C3')
ax.set_xlim(2000,2100)
ax.set_ylim(350,600)
ax.ticklabel_format(axis='x',scilimits=(-5,5))
ax.legend()
ax.set_xlabel('Year')
ax.set_ylabel(r'CO$_2$ (ppm)')
#savefig(fig,'PLASIM_PofnoR_CO2')
plt.show()
# ============================================================
# End of script
| gpl-3.0 |
sqvarfort/bad-boids | boids/boids.py | 1 | 4367 | """
Boids
-----------------
Simulates the behaviour of flocking animals, such as starlings.
This file contains the Boid class.
Adapted from a deliberately bad implementation of [Boids](http://dl.acm.org/citation.cfm?doid=37401.37406)
for use as an exercise on refactoring.
"""
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
import random
import yaml
import os
class Boids(object):
def __init__(self, boid_no, config):
# Open config file, raise exception if yaml cannot reaad the text.
try:
input_data = yaml.load(open(os.path.join(os.path.dirname(__file__),config)))
except IOError:
print 'Could not read file.'
raise IOError
quit()
if boid_no == 0:
raise ValueError
quit()
# Assign config file values to variables
position_values = input_data[0]
velocity_values = input_data[1]
# Define input paramters
lower_pos_limits = np.array([int(position_values['xmin']), int(position_values['ymin'])])
upper_pos_limits = np.array([int(position_values['xmax']), int(position_values['ymax'])])
lower_vel_limits = np.array([int(velocity_values['vxmin']), int(velocity_values['vymin'])])
upper_vel_limits = np.array([int(velocity_values['vxmax']), int(velocity_values['vymax'])])
# Make 2xN arrays with positions[0] = x-values and positions[1] = y-values
self.positions=self.initialise(boid_no, lower_pos_limits, upper_pos_limits)
self.velocities=self.initialise(boid_no, lower_vel_limits, upper_vel_limits)
# Create figure
self.figure=plt.figure()
self.axes=plt.axes(xlim=(int(lower_pos_limits[0]),int(upper_pos_limits[0])+1000), ylim=(int(lower_pos_limits[1])-500,int(upper_pos_limits[1])+500)) #Axes will adapt with input parameters
self.scatter=self.axes.scatter(self.positions[0,:],self.positions[1,:])
plt.ylabel('$y$')
plt.title('Boids')
plt.xlabel('$x$')
plt.rcParams.update({'font.size': 40})
def initialise(self, count, lower_limits, upper_limits): # Initialise random values for positions and velocities over the specified range
width=upper_limits-lower_limits
return (lower_limits[:,np.newaxis] + np.random.rand(2, count)*width[:,np.newaxis])
def calculate_separations(self):
self.separations = self.positions[:,np.newaxis,:] - self.positions[:,:,np.newaxis] # Use broacast to calculate a matrix of separations
self.squared_displacements = self.separations * self.separations
self.square_distances = np.sum(self.squared_displacements, 0)
def fly_to_middle(self): # Make the Boids fly towards the middle
middle = np.mean(self.positions,1) # Calculate middle of flcok
dir_to_middle = self.positions-middle[:, np.newaxis]
middle_strength = 0.01
self.velocities -= dir_to_middle*middle_strength
def avoid_collisions(self): # Include collission detection
self.calculate_separations() #update separation matrix
alert_distance = 100
far_away = self.square_distances > alert_distance # Create logical array
separations_if_close = np.copy(self.separations)
separations_if_close[0,:,:][far_away] = 0
separations_if_close[1,:,:][far_away] = 0
self.velocities += np.sum(separations_if_close,1) # Update velocities
def match_velocity(self):
self.calculate_separations() #update separation matrix
velocity_differences = self.velocities[:,np.newaxis,:] - self.velocities[:,:,np.newaxis] #Get 10x10 matrix with the difference between every bird
formation_flying_distance = 10000 # Set limit to the distance that the birds want
formation_flying_strength = 0.125
very_far = self.square_distances > formation_flying_distance # Create boolean matrix
velocity_differences_if_close = np.copy(velocity_differences)
velocity_differences_if_close[0,:,:][very_far] = 0
velocity_differences_if_close[1,:,:][very_far] = 0
self.velocities -= np.mean(velocity_differences_if_close, 1) * formation_flying_strength # update velocities
def update_boids(self): # Apply all methods
# Call all the methods used
self.fly_to_middle()
self.avoid_collisions()
self.match_velocity()
dt = 1 # Time constant to define iteration steps. This is probably inefficient
self.positions += dt * self.velocities # Update velocities
def animate(self, frame): # Parameters for the animation. Note that frame cannot be removed.
self.update_boids()
self.scatter.set_offsets(zip(self.positions[0, :],self.positions[1, :]))
| mit |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/core/categorical.py | 9 | 67232 | # pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import types
from pandas import compat, lib
from pandas.compat import u
from pandas.core.algorithms import factorize
from pandas.core.base import PandasObject, PandasDelegate, NoNewAttributesMixin
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.util.decorators import cache_readonly, deprecate_kwarg
from pandas.core.common import (ABCSeries, ABCIndexClass, ABCPeriodIndex, ABCCategoricalIndex,
isnull, notnull, is_dtype_equal,
is_categorical_dtype, is_integer_dtype, is_object_dtype,
_possibly_infer_to_datetimelike, get_dtype_kinds,
is_list_like, is_sequence, is_null_slice, is_bool,
_ensure_platform_int, _ensure_object, _ensure_int64,
_coerce_indexer_dtype, take_1d)
from pandas.core.dtypes import CategoricalDtype
from pandas.util.terminal import get_terminal_size
from pandas.core.config import get_option
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and Categoricals can be
# seen as a custom type, but having different results depending whether categories are
# the same or not is kind of insane, so be a bit stricter here and use the python3 idea
# of comparing only things of equal type.
if not self.ordered:
if op in ['__lt__', '__gt__','__le__','__ge__']:
raise TypeError("Unordered Categoricals can only compare equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are the same
if (len(self.categories) != len(other.categories)) or \
not ((self.categories == other.categories).all()):
raise TypeError("Categoricals can only be compared if 'categories' are the same")
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if 'ordered' is the same")
na_mask = (self._codes == -1) | (other._codes == -1)
f = getattr(self._codes, op)
ret = f(other._codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if lib.isscalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = "Cannot compare a Categorical for op {op} with a scalar, " \
"which is not a category."
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__','__ne__']:
return getattr(np.array(self),op)(np.array(other))
msg = "Cannot compare a Categorical for op {op} with type {typ}. If you want to \n" \
"compare values, use 'np.asarray(cat) <op> other'."
raise TypeError(msg.format(op=op,typ=type(other)))
f.__name__ = op
return f
def maybe_to_categorical(array):
""" coerce to a categorical if a series is given """
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
return array
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item setter to change
values in the categorical.
"""
_categories_doc = """The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be unique and the number of items
in the new categories must be the same as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the number of new categories is
unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
class Categorical(PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order is
defined by the order of the `categories`, not lexical order of the values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in categories will
be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the categories are assumed
to be the unique values of values.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical. If not given,
the resulting categorical will not be ordered.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the `values` are
not sortable.
Examples
--------
>>> from pandas import Categorical
>>> Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]
>>> Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a < b < c]
>>> a = Categorical(['a','b','c','a','b','c'], ['c', 'b', 'a'], ordered=True)
>>> a.min()
'c'
"""
dtype = CategoricalDtype()
"""The dtype (always "category")"""
"""Whether or not this Categorical is ordered.
Only ordered `Categoricals` can be sorted (according to the order
of the categories) and have a min and max value.
See also
--------
Categorical.sort
Categorical.order
Categorical.min
Categorical.max
"""
# For comparisons, so that numpy uses our implementation if the compare ops, which raise
__array_priority__ = 1000
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=False, name=None, fastpath=False,
levels=None):
if fastpath:
# fast path
self._codes = _coerce_indexer_dtype(values, categories)
self._categories = self._validate_categories(categories, fastpath=isinstance(categories, ABCIndexClass))
self._ordered = ordered
return
if not name is None:
msg = "the 'name' keyword is removed, use 'name' with consumers of the " \
"categorical instead (e.g. 'Series(cat, name=\"something\")'"
warn(msg, UserWarning, stacklevel=2)
# TODO: Remove after deprecation period in 2017/ after 0.18
if not levels is None:
warn("Creating a 'Categorical' with 'levels' is deprecated, use 'categories' instead",
FutureWarning, stacklevel=2)
if categories is None:
categories = levels
else:
raise ValueError("Cannot pass in both 'categories' and (deprecated) 'levels', "
"use only 'categories'", stacklevel=2)
# sanitize input
if is_categorical_dtype(values):
# we are either a Series or a CategoricalIndex
if isinstance(values, (ABCSeries, ABCCategoricalIndex)):
values = values._values
if ordered is None:
ordered = values.ordered
if categories is None:
categories = values.categories
values = values.__array__()
elif isinstance(values, ABCIndexClass):
pass
else:
# on numpy < 1.6 datetimelike get inferred to all i8 by _sanitize_array
# which is fine, but since factorize does this correctly no need here
# this is an issue because _sanitize_array also coerces np.nan to a string
# under certain versions of numpy as well
values = _possibly_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# On list with NaNs, int values will be converted to float. Use "object" dtype
# to prevent this. In the end objects will be casted to int/... in the category
# assignment step.
dtype = 'object' if isnull(values).any() else None
values = _sanitize_array(values, None, dtype=dtype)
if categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if ordered:
# raise, as we don't have a sortable data structure and so the user should
# give us one by specifying categories
raise TypeError("'values' is not ordered, please explicitly specify the "
"categories order by passing in a categories argument.")
except ValueError:
### FIXME ####
raise NotImplementedError("> 1 ndim Categorical are not supported at this time")
categories = self._validate_categories(categories)
else:
# there were two ways if categories are present
# - the old one, where each value is a int pointer to the levels array -> not anymore
# possible, but code outside of pandas could call us like that, so make some checks
# - the new one, where each value is also in the categories array (or np.nan)
# make sure that we always have the same type here, no matter what we get passed in
categories = self._validate_categories(categories)
codes = _get_codes_for_values(values, categories)
# TODO: check for old style usage. These warnings should be removes after 0.18/ in 2016
if is_integer_dtype(values) and not is_integer_dtype(categories):
warn("Values and categories have different dtypes. Did you mean to use\n"
"'Categorical.from_codes(codes, categories)'?", RuntimeWarning, stacklevel=2)
if len(values) and is_integer_dtype(values) and (codes == -1).all():
warn("None of the categories were found in values. Did you mean to use\n"
"'Categorical.from_codes(codes, categories)'?", RuntimeWarning, stacklevel=2)
self.set_ordered(ordered or False, inplace=True)
self._categories = categories
self._codes = _coerce_indexer_dtype(codes, categories)
def copy(self):
""" Copy constructor. """
return Categorical(values=self._codes.copy(),categories=self.categories,
ordered=self.ordered, fastpath=True)
def astype(self, dtype):
""" coerce this type to another dtype """
if is_categorical_dtype(dtype):
return self
return np.array(self, dtype=dtype)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def reshape(self, new_shape, **kwargs):
""" compat with .reshape """
return self
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def from_array(cls, data, **kwargs):
"""
Make a Categorical type from a single array-like object.
For internal compatibility with numpy arrays.
Parameters
----------
data : array-like
Can be an Index or array-like. The categories are assumed to be
the unique values of `data`.
"""
return Categorical(data, **kwargs)
@classmethod
def from_codes(cls, codes, categories, ordered=False, name=None):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and so do not need the
(computation intensive) factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical. If not given,
the resulting categorical will be unordered.
"""
if not name is None:
msg = "the 'name' keyword is removed, use 'name' with consumers of the " \
"categorical instead (e.g. 'Series(cat, name=\"something\")'"
warn(msg, UserWarning, stacklevel=2)
try:
codes = np.asarray(codes, np.int64)
except:
raise ValueError("codes need to be convertible to an arrays of integers")
categories = cls._validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
return Categorical(codes, categories=categories, ordered=ordered, fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _get_labels(self):
"""
Get the category labels (deprecated).
Deprecated, use .codes!
"""
warn("'labels' is deprecated. Use 'codes' instead", FutureWarning, stacklevel=2)
return self.codes
labels = property(fget=_get_labels, fset=_set_codes)
_categories = None
@classmethod
def _validate_categories(cls, categories, fastpath=False):
"""
Validates that we have good categories
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
"""
if not isinstance(categories, ABCIndexClass):
dtype = None
if not hasattr(categories, "dtype"):
categories = _convert_to_list_like(categories)
# on categories with NaNs, int values would be converted to float.
# Use "object" dtype to prevent this.
if isnull(categories).any():
without_na = np.array([x for x in categories if notnull(x)])
with_na = np.array(categories)
if with_na.dtype != without_na.dtype:
dtype = "object"
from pandas import Index
categories = Index(categories, dtype=dtype)
if not fastpath:
# check properties of the categories
# we don't allow NaNs in the categories themselves
if categories.hasnans:
# NaNs in cats deprecated in 0.17, remove in 0.18 or 0.19 GH 10748
msg = ('\nSetting NaNs in `categories` is deprecated and '
'will be removed in a future version of pandas.')
warn(msg, FutureWarning, stacklevel=3)
# categories must be unique
if not categories.is_unique:
raise ValueError('Categorical categories must be unique')
return categories
def _set_categories(self, categories, fastpath=False):
""" Sets new categories
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
"""
categories = self._validate_categories(categories, fastpath=fastpath)
if not fastpath and not self._categories is None and len(categories) != len(self._categories):
raise ValueError("new categories need to have the same number of items than the old "
"categories!")
self._categories = categories
def _get_categories(self):
""" Gets the categories """
# categories is an Index, which is immutable -> no need to copy
return self._categories
categories = property(fget=_get_categories, fset=_set_categories, doc=_categories_doc)
def _set_levels(self, levels):
""" set new levels (deprecated, use "categories") """
warn("Assigning to 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=2)
self.categories = levels
def _get_levels(self):
""" Gets the levels (deprecated, use "categories") """
warn("Accessing 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=2)
return self.categories
# TODO: Remove after deprecation period in 2017/ after 0.18
levels = property(fget=_get_levels, fset=_set_levels)
_ordered = None
def _set_ordered(self, value):
""" Sets the ordered attribute to the boolean value """
warn("Setting 'ordered' directly is deprecated, use 'set_ordered'", FutureWarning,
stacklevel=2)
self.set_ordered(value, inplace=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy of this categorical
with ordered set to the value
"""
if not is_bool(value):
raise TypeError("ordered must be a boolean value")
cat = self if inplace else self.copy()
cat._ordered = value
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy of this categorical
with ordered set to True
"""
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy of this categorical
with ordered set to False
"""
return self.set_ordered(False, inplace=inplace)
def _get_ordered(self):
""" Gets the ordered attribute """
return self._ordered
ordered = property(fget=_get_ordered, fset=_set_ordered)
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in unused categories) or
or remove old categories (which results in values set to NaN). If `rename==True`,
the categories will simple be renamed (less or more items than in old categories will
result in values set to NaN or in unused categories respectively).
This method can be used to perform more than one action of adding, removing,
and reordering simultaneously and is therefore faster than performing the individual steps
via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the old categories are
included in the new categories on a reorder), which can result in surprising changes, for
example when using special string dtypes on python3, which does not considers a S1 string
equal to a single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical. If not given,
do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename of the old
categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of this categorical
with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
new_categories = self._validate_categories(new_categories)
cat = self if inplace else self.copy()
if rename:
if not cat._categories is None and len(new_categories) < len(cat._categories):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_categories)] = -1
else:
values = cat.__array__()
cat._codes = _get_codes_for_values(values, new_categories)
cat._categories = new_categories
if ordered is None:
ordered = self.ordered
cat.set_ordered(ordered, inplace=True)
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
The new categories has to be a list-like object. All items must be unique and the number of
items in the new categories must be the same as the number of items in the old categories.
Raises
------
ValueError
If the new categories do not have the same number of items than the current categories
or do not validate as categories
Parameters
----------
new_categories : Index-like
The renamed categories.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of this categorical
with renamed categories.
Returns
-------
cat : Categorical with renamed categories added or None if inplace.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
cat = self if inplace else self.copy()
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category items.
Raises
------
ValueError
If the new categories do not contain all old category items or any new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical. If not given,
do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of this categorical
with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
if set(self._categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in old categories")
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the categories and will be
unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of this categorical
with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self._categories)
if len(already_included) != 0:
msg = "new categories must not include old categories: %s" % str(already_included)
raise ValueError(msg)
new_categories = list(self._categories) + list(new_categories)
cat = self if inplace else self.copy()
cat._categories = self._validate_categories(new_categories)
cat._codes = _coerce_indexer_dtype(cat._codes, new_categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in the removed
categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of this categorical
with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self._categories)
new_categories = [ c for c in self._categories if c not in removal_set ]
# GH 10156
if any(isnull(removals)):
not_included = [x for x in not_included if notnull(x)]
new_categories = [x for x in new_categories if notnull(x)]
if len(not_included) != 0:
raise ValueError("removals must all be in old categories: %s" % str(not_included))
return self.set_categories(new_categories, ordered=self.ordered, rename=False,
inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of this categorical
with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
cat._codes = inv
cat._categories = cat.categories.take(idx)
if not inplace:
return cat
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesnt make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, com._ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return Categorical.from_codes(codes,
categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or, if dtype==None (default), the same
dtype as categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype,self.categories.dtype):
return np.asarray(ret, dtype)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_codes' not in state and 'labels' in state:
state['_codes'] = state.pop('labels')
if '_categories' not in state and '_levels' in state:
state['_categories'] = \
self._validate_categories(state.pop('_levels'))
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self._categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self._categories.memory_usage(deep=deep)
def searchsorted(self, v, side='left', sorter=None):
"""Find indices where elements should be inserted to maintain order.
Find the indices into a sorted Categorical `self` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `self` would be preserved.
Parameters
----------
v : array_like
Array-like values or a scalar value, to insert/search for in `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
Series.searchsorted
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = pd.Categorical(['apple', 'bread', 'bread', 'cheese', 'milk' ])
[apple, bread, bread, cheese, milk]
Categories (4, object): [apple < bread < cheese < milk]
>>> x.searchsorted('bread')
array([1]) # Note: an array, not a scalar
>>> x.searchsorted(['bread'])
array([1])
>>> x.searchsorted(['bread', 'eggs'])
array([1, 4])
>>> x.searchsorted(['bread', 'eggs'], side='right')
array([3, 4]) # eggs before milk
>>> x = pd.Categorical(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts' ])
>>> x.searchsorted(['bread', 'eggs'], side='right', sorter=[0, 1, 2, 3, 5, 4])
array([3, 5]) # eggs after donuts, after switching milk and donuts
"""
if not self.ordered:
raise ValueError("Categorical not ordered\n"
"you can use .as_ordered() to change the Categorical to an ordered one\n")
from pandas.core.series import Series
values_as_codes = self.categories.values.searchsorted(Series(v).values, side)
return self.codes.searchsorted(values_as_codes, sorter=sorter)
def isnull(self):
"""
Detect missing values
Both missing values (-1 in .codes) and NA as a category are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
pandas.isnull : pandas version
Categorical.notnull : boolean inverse of Categorical.isnull
"""
ret = self._codes == -1
# String/object and float categories can hold np.nan
if self.categories.dtype.kind in ['S', 'O', 'f']:
if np.nan in self.categories:
nan_pos = np.where(isnull(self.categories))[0]
# we only have one NA in categories
ret = np.logical_or(ret , self._codes == nan_pos)
return ret
def notnull(self):
"""
Reverse of isnull
Both missing values (-1 in .codes) and NA as a category are detected as null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
pandas.notnull : pandas version
Categorical.isnull : boolean inverse of Categorical.notnull
"""
return ~self.isnull()
def dropna(self):
"""
Return the Categorical without null values.
Both missing values (-1 in .codes) and NA as a category are detected.
NA is removed from the categories if present.
Returns
-------
valid : Categorical
"""
result = self[self.notnull()]
if isnull(result.categories).any():
result = result.remove_categories([np.nan])
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN, even if NaN is a category.
Returns
-------
counts : Series
"""
from numpy import bincount
from pandas.core.common import isnull
from pandas.core.series import Series
from pandas.core.index import CategoricalIndex
obj = self.remove_categories([np.nan]) \
if dropna and isnull(self.categories).any() else self
code, cat = obj._codes, obj.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
count = bincount(code if clean else code[mask], minlength=ncat)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = Categorical(ix, categories=cat,
ordered=obj.ordered, fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if com.is_datetimelike(self.categories):
return self.categories.take(self._codes)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the Categorical to an ordered one\n".format(op=op))
def argsort(self, ascending=True, **kwargs):
""" Implements ndarray.argsort.
For internal compatibility with numpy arrays.
Only ordered Categoricals can be argsorted!
Returns
-------
argsorted : numpy array
"""
result = np.argsort(self._codes.copy(), **kwargs)
if not ascending:
result = result[::-1]
return result
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Category by category value returning a new Categorical by default.
Only ordered Categoricals can be sorted!
Categorical.sort is the equivalent but sorts the Categorical inplace.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Sort ascending. Passing False sorts descending
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Category or None
See Also
--------
Category.sort
"""
if na_position not in ['last','first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes==-1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position=="first" and not ascending:
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position=="last" and not ascending:
# ... and to the end
new_codes = codes.copy()
pos = len(codes)-n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return Categorical(values=codes,categories=self.categories, ordered=self.ordered,
fastpath=True)
def order(self, inplace=False, ascending=True, na_position='last'):
"""
DEPRECATED: use :meth:`Categorical.sort_values`
Sorts the Category by category value returning a new Categorical by default.
Only ordered Categoricals can be sorted!
Categorical.sort is the equivalent but sorts the Categorical inplace.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Sort ascending. Passing False sorts descending
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Category or None
See Also
--------
Category.sort
"""
warn("order is deprecated, use sort_values(...)",
FutureWarning, stacklevel=2)
return self.sort_values(inplace=inplace, ascending=ascending, na_position=na_position)
def sort(self, inplace=True, ascending=True, na_position='last'):
""" Sorts the Category inplace by category value.
Only ordered Categoricals can be sorted!
Catgorical.order is the equivalent but returns a new Categorical.
Parameters
----------
ascending : boolean, default True
Sort ascending. Passing False sorts descending
inplace : boolean, default False
Do operation in place.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Category or None
See Also
--------
Category.sort_values
"""
return self.sort_values(inplace=inplace, ascending=ascending,
na_position=na_position)
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
value : scalar
Value to use to fill holes (e.g. 0)
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
values = self._codes
# Make sure that we also get NA in categories
if self.categories.dtype.kind in ['S', 'O', 'f']:
if np.nan in self.categories:
values = values.copy()
nan_pos = np.where(isnull(self.categories))[0]
# we only have one NA in categories
values[values == nan_pos] = -1
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(
values, method, 0, None, value).astype(self.categories.dtype)[0]
values = _get_codes_for_values(values, self.categories)
else:
if not isnull(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = values==-1
if mask.any():
values = values.copy()
values[mask] = self.categories.get_loc(value)
return Categorical(values, categories=self.categories, ordered=self.ordered,
fastpath=True)
def take_nd(self, indexer, allow_fill=True, fill_value=None):
""" Take the codes by the indexer, fill with the fill_value.
For internal compatibility with numpy arrays.
"""
# filling must always be None/nan here
# but is passed thru internally
assert isnull(fill_value)
codes = take_1d(self._codes, indexer, allow_fill=True, fill_value=-1)
result = Categorical(codes, categories=self.categories, ordered=self.ordered,
fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim categorical")
slicer = slicer[1]
_codes = self._codes[slicer]
return Categorical(values=_codes,categories=self.categories, ordered=self.ordered,
fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values())
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default footer) """
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False,
footer=False)
result = '%s, ..., %s' % (head[:-1], tail[1:])
if footer:
result = '%s\n%s' % (result, self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0
else get_option("display.max_categories"))
from pandas.core import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str', str(self.categories.dtype))
levheader = "Categories (%d, %s): " % (len(self.categories), dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if com.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "["+levstring.replace(" < ... < ", " ... ")+"]"
def _repr_footer(self):
return u('Length: %d\n%s') % (len(self), self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.core import format as fmt
formatter = fmt.CategoricalFormatter(self,
length=length,
na_rep=na_rep,
footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
result = '[], %s' % self._get_repr(length=False,
footer=True,
).replace("\n",", ")
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return Categorical(values=self._codes[key], categories=self.categories,
ordered=self.ordered, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned `Categorical` has not the
same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, without identical "
"categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set something to np.nan
if len(to_add) and not isnull(to_add).all():
raise ValueError("cannot setitem on a Categorical with a new category,"
" set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a indexer
# https://github.com/pydata/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
# FIXME: the following can be removed after https://github.com/pydata/pandas/issues/7820
# is fixed.
# float categories do currently return -1 for np.nan, even if np.nan is included in the
# index -> "repair" this here
if isnull(rvalue).any() and isnull(self.categories).any():
nan_pos = np.where(isnull(self.categories))[0]
lindexer[lindexer == -1] = nan_pos
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
#### reduction ops ####
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation """
func = getattr(self,name,None)
if func is None:
raise TypeError("Categorical cannot perform the operation {op}".format(op=name))
return func(numeric_only=numeric_only, **kwds)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self):
"""
Returns the mode(s) of the Categorical.
Empty if nothing occurs at least 2 times. Always returns `Categorical` even
if only one value.
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas.hashtable as htable
good = self._codes != -1
result = Categorical(sorted(htable.mode_int64(_ensure_int64(self._codes[good]))),
categories=self.categories,ordered=self.ordered, fastpath=True)
return result
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are unique.
Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
"""
from pandas.core.nanops import unique1d
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = sorted(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
return self.is_dtype_equal(other) and np.array_equal(self._codes, other._codes)
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return self.categories.equals(other.categories) and self.ordered == other.ordered
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.tools.merge import concat
result = concat([counts,freqs],axis=1)
result.columns = ['counts','freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
codes = self._codes.repeat(repeats)
return Categorical(values=codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
##### The Series.cat accessor #####
class CategoricalAccessor(PandasDelegate, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all methods return
new categorical data per default (but can be called with `inplace=True`).
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, values, index):
self.categorical = values
self.index = index
self._freeze()
def _delegate_property_get(self, name):
return getattr(self.categorical, name)
def _delegate_property_set(self, name, new_values):
return setattr(self.categorical, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self.categorical.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self.categorical, name)
res = method(*args, **kwargs)
if not res is None:
return Series(res, index=self.index)
CategoricalAccessor._add_delegate_accessors(delegate=Categorical,
accessors=["categories", "ordered"],
typ='property')
CategoricalAccessor._add_delegate_accessors(delegate=Categorical,
accessors=["rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered",
"as_unordered"],
typ='method')
##### utility routines #####
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if not is_dtype_equal(values.dtype,categories.dtype):
values = _ensure_object(values)
categories = _ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return _coerce_indexer_dtype(t.lookup(vals), cats)
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple)
or isinstance(list_like, types.GeneratorType)):
return list(list_like)
elif np.isscalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _concat_compat(to_concat, axis=0):
"""Concatenate an object/categorical array of arrays, each of which is a
single dtype
Parameters
----------
to_concat : array of arrays
axis : int
Axis to provide concatenation in the current implementation this is
always 0, e.g. we only have 1D categoricals
Returns
-------
Categorical
A single array, preserving the combined dtypes
"""
def convert_categorical(x):
# coerce to object dtype
if is_categorical_dtype(x.dtype):
return x.get_values()
return x.ravel()
if get_dtype_kinds(to_concat) - set(['object', 'category']):
# convert to object type and perform a regular concat
from pandas.core.common import _concat_compat
return _concat_compat([np.array(x, copy=False, dtype=object)
for x in to_concat], axis=0)
# we could have object blocks and categoricals here
# if we only have a single categoricals then combine everything
# else its a non-compat categorical
categoricals = [x for x in to_concat if is_categorical_dtype(x.dtype)]
# validate the categories
categories = categoricals[0]
rawcats = categories.categories
for x in categoricals[1:]:
if not categories.is_dtype_equal(x):
raise ValueError("incompatible categories in categorical concat")
# we've already checked that all categoricals are the same, so if their
# length is equal to the input then we have all the same categories
if len(categoricals) == len(to_concat):
# concating numeric types is much faster than concating object types
# and fastpath takes a shorter path through the constructor
return Categorical(np.concatenate([x.codes for x in to_concat], axis=0),
rawcats,
ordered=categoricals[0].ordered,
fastpath=True)
else:
concatted = np.concatenate(list(map(convert_categorical, to_concat)),
axis=0)
return Categorical(concatted, rawcats)
| artistic-2.0 |
mne-tools/mne-tools.github.io | 0.11/_downloads/plot_decoding_sensors.py | 16 | 1976 | """
==========================
Decoding sensor space data
==========================
Decoding, a.k.a MVPA or supervised machine learning applied to MEG
data in sensor space. Here the classifier is applied to every time
point.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.decoding import TimeDecoding
print(__doc__)
data_path = sample.data_path()
plt.close('all')
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=None, preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
epochs_list = [epochs[k] for k in event_id]
mne.epochs.equalize_epoch_counts(epochs_list)
data_picks = mne.pick_types(epochs.info, meg=True, exclude='bads')
###############################################################################
# Setup decoding: default is linear SVC
td = TimeDecoding(predict_mode='cross-validation', n_jobs=1)
# Fit
td.fit(epochs)
# Compute accuracy
td.score(epochs)
# Plot scores across time
td.plot(title='Sensor space decoding')
| bsd-3-clause |
datapythonista/pandas | pandas/tests/indexes/datetimes/test_reindex.py | 4 | 2145 | from datetime import timedelta
import numpy as np
from pandas import (
DatetimeIndex,
date_range,
)
import pandas._testing as tm
class TestDatetimeIndexReindex:
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH#7774
index = date_range("2013-01-01", periods=3, tz="US/Eastern")
assert str(index.reindex([])[0].tz) == "US/Eastern"
assert str(index.reindex(np.array([]))[0].tz) == "US/Eastern"
def test_reindex_with_same_tz_nearest(self):
# GH#32740
rng_a = date_range("2010-01-01", "2010-01-02", periods=24, tz="utc")
rng_b = date_range("2010-01-01", "2010-01-02", periods=23, tz="utc")
result1, result2 = rng_a.reindex(
rng_b, method="nearest", tolerance=timedelta(seconds=20)
)
expected_list1 = [
"2010-01-01 00:00:00",
"2010-01-01 01:05:27.272727272",
"2010-01-01 02:10:54.545454545",
"2010-01-01 03:16:21.818181818",
"2010-01-01 04:21:49.090909090",
"2010-01-01 05:27:16.363636363",
"2010-01-01 06:32:43.636363636",
"2010-01-01 07:38:10.909090909",
"2010-01-01 08:43:38.181818181",
"2010-01-01 09:49:05.454545454",
"2010-01-01 10:54:32.727272727",
"2010-01-01 12:00:00",
"2010-01-01 13:05:27.272727272",
"2010-01-01 14:10:54.545454545",
"2010-01-01 15:16:21.818181818",
"2010-01-01 16:21:49.090909090",
"2010-01-01 17:27:16.363636363",
"2010-01-01 18:32:43.636363636",
"2010-01-01 19:38:10.909090909",
"2010-01-01 20:43:38.181818181",
"2010-01-01 21:49:05.454545454",
"2010-01-01 22:54:32.727272727",
"2010-01-02 00:00:00",
]
expected1 = DatetimeIndex(
expected_list1, dtype="datetime64[ns, UTC]", freq=None
)
expected2 = np.array([0] + [-1] * 21 + [23], dtype=np.dtype("intp"))
tm.assert_index_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
| bsd-3-clause |
abimannans/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
dopplershift/MetPy | src/metpy/deprecation.py | 1 | 11236 | """Matplotlib license for the deprecation module.
License agreement for matplotlib versions 1.3.0 and later
=========================================================
1. This LICENSE AGREEMENT is between the Matplotlib Development Team
("MDT"), and the Individual or Organization ("Licensee") accessing and
otherwise using matplotlib software in source or binary form and its
associated documentation.
2. Subject to the terms and conditions of this License Agreement, MDT
hereby grants Licensee a nonexclusive, royalty-free, world-wide license
to reproduce, analyze, test, perform and/or display publicly, prepare
derivative works, distribute, and otherwise use matplotlib
alone or in any derivative version, provided, however, that MDT's
License Agreement and MDT's notice of copyright, i.e., "Copyright (c)
2012- Matplotlib Development Team; All Rights Reserved" are retained in
matplotlib alone or in any derivative version prepared by
Licensee.
3. In the event Licensee prepares a derivative work that is based on or
incorporates matplotlib or any part thereof, and wants to
make the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to matplotlib .
4. MDT is making matplotlib available to Licensee on an "AS
IS" basis. MDT MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, MDT MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB
WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
5. MDT SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between MDT and
Licensee. This License Agreement does not grant permission to use MDT
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using matplotlib ,
Licensee agrees to be bound by the terms and conditions of this License
Agreement.
License agreement for matplotlib versions prior to 1.3.0
========================================================
1. This LICENSE AGREEMENT is between John D. Hunter ("JDH"), and the
Individual or Organization ("Licensee") accessing and otherwise using
matplotlib software in source or binary form and its associated
documentation.
2. Subject to the terms and conditions of this License Agreement, JDH
hereby grants Licensee a nonexclusive, royalty-free, world-wide license
to reproduce, analyze, test, perform and/or display publicly, prepare
derivative works, distribute, and otherwise use matplotlib
alone or in any derivative version, provided, however, that JDH's
License Agreement and JDH's notice of copyright, i.e., "Copyright (c)
2002-2011 John D. Hunter; All Rights Reserved" are retained in
matplotlib alone or in any derivative version prepared by
Licensee.
3. In the event Licensee prepares a derivative work that is based on or
incorporates matplotlib or any part thereof, and wants to
make the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to matplotlib.
4. JDH is making matplotlib available to Licensee on an "AS
IS" basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB
WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between JDH and
Licensee. This License Agreement does not grant permission to use JDH
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using matplotlib,
Licensee agrees to be bound by the terms and conditions of this License
Agreement.
"""
import functools
import warnings
class MetpyDeprecationWarning(UserWarning):
"""A class for issuing deprecation warnings for MetPy users.
In light of the fact that Python builtin DeprecationWarnings are ignored
by default as of Python 2.7 (see link below), this class was put in to
allow for the signaling of deprecation, but via UserWarnings which are not
ignored by default. Borrowed with love from matplotlib.
https://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
"""
pass
metpyDeprecation = MetpyDeprecationWarning # noqa: N816
def _generate_deprecation_message(since, message='', name='',
alternative='', pending=False,
obj_type='attribute',
addendum=''):
if not message:
if pending:
message = (
'The {} {} will be deprecated in a '
'future version.'.format(name, obj_type))
else:
message = (
'The {} {} was deprecated in version '
'{}.'.format(name, obj_type, since))
altmessage = ''
if alternative:
altmessage = f' Use {alternative} instead.'
message = message + altmessage
if addendum:
message += addendum
return message
def warn_deprecated(since, message='', name='', alternative='', pending=False,
obj_type='attribute', addendum=''):
"""Display deprecation warning in a standard way.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)s` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated object.
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user
about this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
obj_type : str, optional
The object type being deprecated.
addendum : str, optional
Additional text appended directly to the final message.
Examples
--------
Basic example::
# To warn of the deprecation of "metpy.name_of_module"
warn_deprecated('0.6.0', name='metpy.name_of_module',
obj_type='module')
"""
message = _generate_deprecation_message(since, message, name, alternative,
pending, obj_type)
warnings.warn(message, metpyDeprecation, stacklevel=1)
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type=None, addendum=''):
"""Mark a function or a class as deprecated.
Parameters
----------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the object,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
object. `%(obj_type)s` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated object; if not provided the name
is automatically determined from the passed in object,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative object that the user may use in place of the
deprecated object. The deprecation warning will tell the user
about this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
addendum : str, optional
Additional text appended directly to the final message.
Examples
--------
Basic example::
@deprecated('1.4.0')
def the_function_to_deprecate():
pass
"""
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending, addendum=addendum):
import textwrap
if not name:
name = obj.__name__
if isinstance(obj, type):
obj_type = 'class'
old_doc = obj.__doc__
func = obj.__init__
def finalize(wrapper, new_doc):
obj.__init__ = wrapper
return obj
else:
obj_type = 'function'
func = obj
old_doc = func.__doc__
def finalize(wrapper, new_doc):
wrapper = functools.wraps(func)(wrapper)
# wrapper.__doc__ = new_doc
return wrapper
message = _generate_deprecation_message(since, message, name,
alternative, pending,
obj_type, addendum)
def wrapper(*args, **kwargs):
warnings.warn(message, metpyDeprecation, stacklevel=2)
return func(*args, **kwargs)
old_doc = textwrap.dedent(old_doc or '').strip('\n')
message = message.strip()
new_doc = ('\n.. deprecated:: {}'
'\n {}\n\n'.format(since, message) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return finalize(wrapper, new_doc)
return deprecate
| bsd-3-clause |
jbaker92/logistic_control_variate | examples/synthetic.py | 1 | 2351 | import os
import pkg_resources
import pickle
import urllib
import numpy as np
import bz2
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from ..logistic_regression.logistic_regression import LogisticRegression
class Synthetic:
"""Example that fits the logistic regression model to a synthetic dataset for simple testing"""
def __init__(self, seed = None):
"""Load data into the object"""
self.data_dir = pkg_resources.resource_filename('logistic_control_variate', 'data/')
self.generate_data(seed)
# Holds logistic regression object for this example
self.lr = None
def fit(self,stepsize, n_iters = 10**4):
"""
Fit a Bayesian logistic regression model to the data using the LogisticRegression class.
Parameters:
stepsize - stepsize parameter for the stochastic gradient langevin dynamics
Returns:
lr - fitted LogisticRegression object
"""
self.lr = LogisticRegression( self.X_train, self.X_test, self.y_train, self.y_test )
self.lr.fit(stepsize, n_iters = n_iters)
def generate_data(self,seed):
"""Generate synthetic dataset using standard methods in scikit-learn"""
X, y = make_classification( n_samples = 12000, random_state = seed, n_features = 40, class_sep = 0.4, n_clusters_per_class = 2, weights = [0.2,0.8] )
# Add bias term
X = np.concatenate( ( np.ones( ( 12000, 1 ) ), X ), axis = 1 )
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
X, y, test_size = 2000, random_state = seed )
def log_loss_objective( self, stepsize_mat ):
n_rows = stepsize_mat.shape[0]
outputs = np.zeros( stepsize_mat.shape )
for i in range(n_rows):
stepsize_curr = stepsize_mat[i,0]
try:
self.fit(stepsize_curr,10**3)
except FloatingPointError:
outputs[i,0] = 8.0
outputs[i,0] = np.array( self.lr.training_loss ).mean()
return outputs
if __name__ == '__main__':
example = Synthetic( 13 )
example.fit(1e-6)
llold, llnew = example.lr.postprocess()
print( np.mean( llold ) )
print( np.mean( llnew ) )
print( np.cov( llold ) )
print( np.cov( llnew ) )
| mit |
anooptoffy/Masters-Course-Work-Repository | Semester_2/Machine Perception/Assignment1/question_11.py | 2 | 1343 | import cv2
import numpy as np
from matplotlib import pyplot as plt
# function to perform lane detection
def detectRoads(image):
img_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
img_gr = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# smoothing to remove noise
blur = cv2.GaussianBlur(img_gr, (3, 3), 0)
# detect edges with sobel-x
sbl = cv2.Sobel(blur, -1, 1, 0, ksize=3)
# threshold to remove low intensity edges
sbl[sbl < 100] = 0
# detect lines with probabilistic HLT
lines = cv2.HoughLinesP(sbl, 1, np.pi / 180, 200, minLineLength=10, maxLineGap=5)
# plotting the lanes on original image
for line in lines:
[[x1, y1, x2, y2]] = line
cv2.line(img_rgb, (x1, y1), (x2, y2), (0, 255, 0), 5)
return img_rgb
roads = ["road1.png", "road2.png"]
channel_title = ["Original", "Result"]
fig = plt.figure()
k = 1
# display the results
for road in roads:
img = cv2.imread(road, cv2.IMREAD_COLOR)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
channel_image = detectRoads(img)
ax = fig.add_subplot(2, 2, k)
ax.imshow(img_rgb, cmap="gray")
ax.set_title(channel_title[0])
plt.axis("off")
k = k + 1
ax = fig.add_subplot(2, 2, k)
ax.set_title(channel_title[1])
ax.imshow(channel_image, cmap="gray")
k = k + 1
plt.axis("off")
plt.show()
| mit |
schlegelp/tanglegram | tanglegram/tangle.py | 1 | 28171 | # A Python package to plot tanglegrams
#
# Copyright (C) 2017 Philipp Schlegel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
import matplotlib.pyplot as plt
import scipy.cluster as sclust
import scipy.spatial.distance as sdist
import numpy as np
import pandas as pd
import pylab
import math
import logging
from itertools import product
from tqdm import trange, tqdm
__all__ = ['plot', 'entanglement', 'untangle']
# Set up logging
module_logger = logging.getLogger(__name__)
module_logger.setLevel(logging.INFO)
if not module_logger.handlers:
# Generate stream handler
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
# Create formatter and add it to the handlers
formatter = logging.Formatter(
'%(levelname)-5s : %(message)s (%(name)s)')
sh.setFormatter(formatter)
module_logger.addHandler(sh)
def plot(a, b, labelsA=None, labelsB=None, sort=True,
color_by_diff=True, link_kwargs={}, dend_kwargs={}, sort_kwargs={}):
"""Plot a tanglegram from two dendrograms.
Parameters
----------
(a,b) : pandas.DataFrame | scipy.cluster.hierarchy.linkage
Dendrograms to be compared. If DataFrame, will be
considered a distance matrix and linkage is
generated (see ``link_kwargs``).
(labelsA,labelsB) : list of str
If not provided and a/b are pandas Dataframes,
will try to extract from columns.
sort : bool | "random" | "step1side" | "step2side" | "permuations"
If True, will try rearranging dendrogram to
optimise pairing of similar values. You can provide
the exact method to use as a string. ``True``
defaults to "random". See ``untangle()`` for a
full description.
link_kwargs : dict, optional
Keyword arguments to be passed to ``scipy.cluster.hierarchy.linkage``
dend_kwargs : dict, optional
Keyword arguments to be passed to ``scipy.cluster.hierarchy.dendrogram``
sort_kwargs : dict, optional
Keyword arguments to be passed to ``tanglegram.untangle``
Returns
-------
matplotlib figure
"""
plt.style.use('ggplot')
if isinstance(a, pd.DataFrame):
module_logger.info('Generating linkage from distances')
link1 = sclust.hierarchy.linkage(sdist.squareform(a, checks=False), **link_kwargs)
if not labelsA:
labelsA = a.columns.tolist()
elif isinstance(a, np.ndarray):
link1 = a
else:
raise TypeError('Parameter `a` needs to be either pandas DataFrame or numpy array')
if isinstance(b, pd.DataFrame):
module_logger.info('Generating linkage from distances')
link2 = sclust.hierarchy.linkage(sdist.squareform(b, checks=False), **link_kwargs)
if not labelsB:
labelsB = b.columns.tolist()
elif isinstance(b, np.ndarray):
link2 = b
else:
raise TypeError('Parameter `b` needs to be either pandas DataFrame or numpy array')
if sort:
if not isinstance(sort, str):
sort = 'random'
link1, link2 = untangle(link1, link2,
labelsA, labelsB,
method=sort, **sort_kwargs)
fig = pylab.figure(figsize=(8, 8))
# Compute and plot left dendrogram.
ax1 = fig.add_axes([0.05, 0.1, 0.25, 0.8])
Z1 = sclust.hierarchy.dendrogram(link1, orientation='left', labels=labelsA, **dend_kwargs)
# Compute and plot right dendrogram.
ax2 = fig.add_axes([0.7, 0.1, 0.25, 0.8]) # [0.3, 0.71, 0.6, 0.2])
Z2 = sclust.hierarchy.dendrogram(link2, labels=labelsB, orientation='right', **dend_kwargs)
missing = list(set([l for l in Z1['ivl'] if l not in Z2['ivl']] + [l for l in Z2['ivl'] if l not in Z1['ivl']]))
if any(missing):
module_logger.warning('Labels {0} do not exist in both dendrograms'.format(missing))
# Generate middle plot with connecting lines
ax3 = fig.add_axes([0.4, 0.1, 0.2, 0.8])
ax3.axis('off')
ax3.set_xlim((0, 1))
# Get min and max y dimensions
max_y = max(ax1.viewLim.y1, ax2.viewLim.y1)
min_y = min(ax1.viewLim.y0, ax2.viewLim.y0)
# Make sure labels of both dendrograms have the same font size
ax1.set_yticklabels(ax1.get_yticklabels(), fontsize=8)
ax2.set_yticklabels(ax2.get_yticklabels(), fontsize=8)
ax1.set_xticklabels(ax1.get_xticklabels(), fontsize=8)
ax2.set_xticklabels(ax2.get_xticklabels(), fontsize=8)
# Make sure all y axes have same resolution
for _ in [ax3]: # [ax1,ax2,ax3]:
_.set_ylim((min_y, max_y))
# Now iterate over all left leaves
for ix_l, l in enumerate(Z1['ivl']):
# Skip if no corresponding element
if l not in Z2['ivl']:
continue
ix_r = Z2['ivl'].index(l)
coords_l = (ax3.viewLim.y1 - ax3.viewLim.y0) / (len(Z1['leaves'])) * (ix_l + .5)
coords_r = (ax3.viewLim.y1 - ax3.viewLim.y0) / (len(Z2['leaves'])) * (ix_r + .5)
if not color_by_diff:
c = 'black'
else:
v = max(round(.75 - math.fabs(ix_l - ix_r) / len(Z1['ivl']), 2), 0)
c = (v, v, v)
ax3.plot([0, 1], [coords_l, coords_r], '-', linewidth=1, c=c)
module_logger.info('Done. Use matplotlib.pyplot.show() to show plot.')
return fig
def rotate(linkage, i, copy=True):
"""Rotate linkage at given hinge."""
# Make copy
if copy:
linkage = linkage.copy()
# Rotate
linkage[i][0], linkage[i][1] = linkage[i][1], linkage[i][0]
return linkage
def get_all_linkage(linkage, li_MID):
"""Generate all possible combinations of rotations for a given linkage.
Parameters
----------
linkage : scipy.cluster.hierarchy.linkage
li_MID : int
Index (from the top) of the linkage at which to stop rotating.
"""
length = len(linkage)
permutations = linkage.reshape(-1, length, 4)
i = length - 1
while i >= li_MID:
for item in permutations:
# Make copy
new = item.copy()
# Rotate
new[i][0], new[i][1] = new[i][1], new[i][0]
# Append this permutation
permutations = np.append(permutations, new)
permutations = permutations.reshape(-1, length, 4)
i -= 1
return permutations
class CachedGenerator:
"""Caches result of generator for re-use."""
def __init__(self, generator):
self.generator = generator
self._cache = []
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n > (len(self._cache) - 1):
self._cache.append(next(self.generator))
value = self._cache[self.n]
self.n += 1
return value
def get_all_linkage_gen(linkage, stop, labels, start=0):
"""Generator for all possible combinations of rotations for a given linkage.
Parameters
----------
linkage : scipy.cluster.hierarchy.linkage
start : int
Index of the linkage at which to stop rotating. Counts from
top.
labels : list
Labels for given linkage.
start : int
At what hinge to start returning permutations.
Yields
------
new_link : np.ndarray
A permutation of linkage rotations.
lindex : dict
The mapping of labels to leaf indices.
"""
length = len(linkage)
linkage = linkage.reshape(-1, length, 4)
# Invert to/from
start = length - 1 - start
stop = length - 1 - stop
i = length - 1
while i > stop:
# Use range because linkage will change in size as we edit it
for j in range(len(linkage)):
# This is the new linkage matrix
new = linkage[j].copy()
new[i][0], new[i][1] = new[i][1], new[i][0]
if i <= start:
# This is the leaf order
lindex = leaf_order(new, labels, as_dict=True)
yield new, lindex
linkage = np.append(linkage, new)
linkage = linkage.reshape(-1, length, 4)
i -= 1
def bottom_up(stop, link1, link2, labels1, labels2, L=1.5):
"""Rotate dendrogram from bottom to "stop" and find smallest entanglement."""
# Find leafs and entanglement of start position
lindex1 = leaf_order(link1, labels1, as_dict=True)
lindex2 = leaf_order(link2, labels2, as_dict=True)
min_entang = entanglement(lindex1, lindex2, L=L)
org_entang = float(min_entang)
# No go over each hinge/knot from bottom to "stop" and rotate it
for i in range(stop):
# Rotate left and right linkage
new1 = rotate(link1, i)
new2 = rotate(link2, i)
# Generate leafs for the new variants
lindexn1 = leaf_order(new1, labels1, as_dict=True)
lindexn2 = leaf_order(new2, labels2, as_dict=True)
# Now test pairwise entanglement
for j, lx1 in zip([link1, new1],
[lindex1, lindexn1]):
for k, lx2 in zip([link2, new2],
[lindex2, lindexn2]):
new_entang = entanglement(lx1, lx2, L=L)
if new_entang < min_entang:
min_entang = new_entang
link1 = j
link2 = k
lindex1 = lx1
lindex2 = lx2
improved = min_entang < org_entang
return link1, link2, min_entang, improved
def refine(best_linkage1, best_linkage2, min_entang, labels1, labels2, L=1.5):
"""Refine rotation to maximize horizontal lines."""
org_entang = float(min_entang)
lindex1 = leaf_order(best_linkage1, labels1, as_dict=True)
lindex2 = leaf_order(best_linkage2, labels2, as_dict=True)
# For each label
for k in list(lindex1):
find1 = lindex1[k]
find2 = lindex2[k]
# If this label is not aligned between left and right dendrogram
if find1 != find2:
# Find the first hinges for this label
knot1 = np.where(best_linkage1 == find1)[0][0]
knot2 = np.where(best_linkage2 == find2)[0][0]
# Rotate around these hinges
new1 = rotate(best_linkage1, knot1)
new2 = rotate(best_linkage2, knot2)
all1 = np.append([best_linkage1], [new1], axis=0)
all2 = np.append([best_linkage2], [new2], axis=0)
all1_lindices = []
for j in all1:
all1_lindices.append(leaf_order(j, labels1, as_dict=True))
all2_lindices = []
for k in all2:
all2_lindices.append(leaf_order(k, labels2, as_dict=True))
# Check if any of the new versions are better than the old
for j, lix1 in zip(all1, all1_lindices):
for k, lix2 in zip(all2, all2_lindices):
new_entang = entanglement(lix1, lix2, L=L)
if new_entang < min_entang:
min_entang = new_entang
best_linkage1 = j
best_linkage2 = k
lindex1 = lix1
lindex2 = lix2
improved = min_entang < org_entang
return best_linkage1, best_linkage2, min_entang, improved
def untangle(link1, link2, labels1, labels2, method='random', L=1.5, **kwargs):
"""Untangle two dendrograms using various methods.
Parameters
----------
link1,link2 : scipy.cluster.hierarchy.linkage
Linkages to untangle.
labels1,labels2 : list
Labels for link1 and link2, respectively.
method : "random" | "step1side" | "step2side" | "permuations"
Method to use for untangling. In order of increasing
run-time:
- "random" shuffles the dendrograms ``R`` times
- "step1side" turns every hinge in ``link1`` and
keeps ``link2`` fix
- "step2side" turns every hinge in both dendrograms
- "permutations" runs permutations of rotations for
both dendrograms (has ``O(n^2)^2`` complexity)
**kwargs
Passed to the respective untangling functions.
See
"""
if method == 'random':
return untangle_random_search(link1, link2, labels1, labels2, L=L, **kwargs)
elif method == 'step1side':
return untangle_step_rotate_1side(link1, link2, labels1, labels2, L=L, **kwargs)
elif method == 'step2side':
return untangle_step_rotate_2side(link1, link2, labels1, labels2, L=L, **kwargs)
elif method == 'permutations':
return untangle_permutations(link1, link2, labels1, labels2, L=L, **kwargs)
else:
raise ValueError(f'Unknown method "{method}"')
def untangle_permutations(link1, link2, labels1, labels2, L=1.5, n_permute=-1,
target_ent=0, progress=True):
"""Untangle by greedily testing all possible permutations of rotations.
This algorithm has O(n^2)^2 complexity and can run very long! In brief:
1. Start at N = 1
2. Find all possible permutations of rotating the top N hinges
3. For each permutation, test the entanglement of rotating each individual
hinge from the bottom up to the top hinge.
4. Keep the combination of the best permutation + bottom-up rotations
5. Increase N by +1
6. Go back to step 2 and repeat until we reached our target entanglement.
Parameters
----------
link1,link2 : scipy.cluster.hierarchy.linkage
Linkages to (better) align by rotating.
labels1,labels2 : list
Labels for link1 and link2, respectively.
L : float
Distance norm used to calculate the entanglement.
Passed to ``entanglement()``.
n_permute : int
Number of hinges from to permute. Positive values count
from the top, negative from the bottom. The default of
-1 means that permutations will be run for all hinges.
target_ent : float [0-1]
Target entanglement.
progress : bool
Whether to show a progress bar.
Returns
-------
link1,link2
Reordered linkages.
"""
# TODO:
# - once all possible permutations are computed, we could test them using
# parallel processes
# Keep track of past entanglements
entang = [float('inf')]
bar_format = ("{l_bar}{bar}| [{elapsed}<{remaining}, "
"{rate_fmt}, N {postfix[0]}/{postfix[1]}, "
"entangl {postfix[2]:.4f}]")
if n_permute == 0:
raise ValueError('`n_permute` must not be zero')
elif n_permute < 0:
# Translate to count from top
n_permute = len(link1) - (n_permute + 1)
elif n_permute > len(link1):
raise ValueError('`n_permute` must not be great than number of hinges')
with tqdm(desc='Searching', leave=False, total=2,
postfix=[1, n_permute, 1],
bar_format=bar_format, disable=not progress) as pbar:
for ix in range(1, n_permute):
# Keep track of minimal entanglement this round
min_entang = entang[-1]
if progress:
pbar.total = ((2**ix-1) - (2**(ix-1)-1))**2
pbar.n = 0
pbar.postfix[0] = ix
# Now test these combinations
link_gen1 = get_all_linkage_gen(link1, stop=ix, labels=labels1, start=ix-1)
for i, lindex1 in link_gen1:
link_gen2 = get_all_linkage_gen(link2, stop=ix, labels=labels2, start=ix-1)
for j, lindex2 in link_gen2:
best_linkage1 = i
best_linkage2 = j
# Now optimize from the bottom up to li_MID
# Coarse optimization
(best_linkage1,
best_linkage2,
this_entang,
improved1) = bottom_up(ix,
best_linkage1, best_linkage2,
labels1, labels2, L=L)
# Fine optimization
(best_linkage1,
best_linkage2,
this_entang,
improved2) = refine(best_linkage1, best_linkage2,
this_entang,
labels1, labels2, L=L)
# Keep this iteration if it's better than the previous
if this_entang < min_entang:
final_linkage1 = best_linkage1
final_linkage2 = best_linkage2
min_entang = this_entang
if progress:
pbar.postfix[2] = this_entang
if progress:
pbar.update()
# Stop if optimal entangle found
if min_entang <= target_ent:
break
# Stop if optimal entangle found
if min_entang <= target_ent:
break
# Track how entanglment evolves
entang.append(min_entang)
# Convergence condition:
# If entanglement is optimal
if entang[-1] <= target_ent:
break
module_logger.info(f'Finished optimising at entanglement {entang[-1]:.2f}')
return final_linkage1, final_linkage2
def untangle_step_rotate_2side(link1, link2, labels1, labels2,
direction='down', L=1.5, max_n_iterations=10):
"""Untangle by stepwise rotating around all hinges in both dendrograms.
This is a greedy forward algorithm that rotates the first dendogram, then
the second, then the first again and so on until a locally optimal solution
is found. The break condition is either ``max_n_iterations`` reached or
no improved entanglement in two consecutive iterations.
Parameters
----------
link1,link2 : scipy.cluster.hierarchy.linkage
Linkages to (better) align by rotating.
labels1,labels2 : list
Labels for link1 and link2, respectively.
direction : "down" | "up"
Whether to start at the top and move down (default) or
start at the leafs and move up.
L : float
Distance norm used to calculate the entanglement.
Passed to ``entanglement()``.
max_n_iterations : int
Max iterations (default = 10) to run.
Returns
-------
link1,link2
Reordered linkages.
"""
assert direction in ('down', 'up')
min_entang = float('inf')
for i in range(int(max_n_iterations)):
# Rotate the first dendrogram
link1, link2 = untangle_step_rotate_1side(link1, link2,
labels1, labels2,
L=L, direction=direction)
# Now rotate the second dendrogram
link2, link1 = untangle_step_rotate_1side(link2, link1,
labels2, labels1,
L=L, direction=direction)
# Get the new entanglement
lindex1 = leaf_order(link1, labels1, as_dict=True)
lindex2 = leaf_order(link2, labels2, as_dict=True)
# Get new entanglement
new_entang = entanglement(lindex1, lindex2, L=L)
# Stop if there is no improvement from the last iteration
if new_entang == min_entang:
break
else:
min_entang = new_entang
if min_entang == 0:
break
return link1, link2
def untangle_step_rotate_1side(link1, link2, labels1, labels2,
direction='down', L=1.5):
"""Untangle by stepwise rotating around all hinges in one dendrogram.
Parameters
----------
link1,link2 : scipy.cluster.hierarchy.linkage
Linkages to (better) align by rotating.
labels1,labels2 : list
Labels for link1 and link2, respectively.
direction : "down" | "up"
Whether to start at the top and move down (default) or
start at the leafs and move up.
L : float
Distance norm used to calculate the entanglement.
Passed to ``entanglement()``.
Returns
-------
link1,link2
Reordered linkages.
"""
assert direction in ('down', 'up')
# Get label indices
lindex1 = leaf_order(link1, labels1, as_dict=True)
lindex2 = leaf_order(link2, labels2, as_dict=True)
# Get starting entanglement
min_entang = entanglement(lindex1, lindex2, L=L)
n_hinges = len(link1) - 1
for i in range(n_hinges):
if direction == 'down':
i = n_hinges - i
# Shuffle dendrograms
r_link1 = rotate(link1, i, copy=True)
# Get label indices
r_lindex1 = leaf_order(r_link1, labels1, as_dict=True)
# Get new entanglement
new_entang = entanglement(r_lindex1, lindex2, L=L)
# Check if new entanglment is better
if new_entang < min_entang:
min_entang = new_entang
link1 = r_link1
if min_entang == 0:
break
return link1, link2
def untangle_random_search(link1, link2, labels1, labels2, R=100, L=1.5):
"""Untangle dendrogram using a simple random search.
Shuffle trees and see if entanglement got better.
Parameters
----------
link1,link2 : scipy.cluster.hierarchy.linkage
Linkages to (better) align by shuffling.
labels1,labels2 : list
Labels for link1 and link2, respectively.
R : int
Number of shuffles to perform.
L : float
Distance norm used to calculate the entanglement.
Passed to ``entanglement()``.
Returns
-------
link1,link2
Reordered linkages.
"""
# Get label indices
lindex1 = leaf_order(link1, labels1, as_dict=True)
lindex2 = leaf_order(link2, labels2, as_dict=True)
# Get starting entanglement
min_entang = entanglement(lindex1, lindex2, L=L)
for i in range(int(R)):
# Shuffle dendrograms
s_link1 = shuffle_dendogram(link1)
s_link2 = shuffle_dendogram(link2)
# Get label indices
s_lindex1 = leaf_order(s_link1, labels1, as_dict=True)
s_lindex2 = leaf_order(s_link2, labels2, as_dict=True)
# Get new entanglement
new_entang = entanglement(s_lindex1, s_lindex2, L=L)
# Check if new entanglment is better
if new_entang < min_entang:
min_entang = new_entang
link1 = s_link1
link2 = s_link2
if min_entang == 0:
break
return link1, link2
def shuffle_dendogram(link, copy=True):
"""Randomly shuffle dendrogram.
Parameters
----------
link : scipy.cluster.hierarchy.linkage
Returns
-------
s_link : scipy.cluster.hierarchy.linkage
Shuffled linkage.
"""
assert isinstance(link, np.ndarray)
# How many hinges to rotate
n_rot = np.random.randint(len(link))
# Which hinges to rotate
to_rot = np.random.choice(np.arange(len(link)), n_rot, replace=False)
# Make a copy of the original
if copy:
s_link = link.copy()
else:
s_link = link
# Rotate hinges
s_link[to_rot, :2] = s_link[to_rot, :2][:, ::-1]
return s_link
def leaf_order(link, labels=None, as_dict=True):
"""Generate leaf label order for given linkage.
Parameters
----------
link : scipy.cluster.hierarchy.linkage
Linkage to get leaf label order for.
labels : list, optional
If provided, return ordered labels else will return indices.
as_dict : bool
If True (default), returns a dictionary mapping labels/indices
to leaf indices.
Returns
-------
dict
If ``as_dict=True`` return as ``{'l1': 1, 'l2':, 5, ...}``.
list
If ``as_dict=False`` return as ``['l4', 'l3', 'l1', ...]``.
"""
# This gives us the order of the original labels
leafs_ix = sclust.hierarchy.leaves_list(link)
if as_dict:
if not isinstance(labels, type(None)):
return dict(zip(labels, leafs_ix))
else:
return dict(zip(np.arange(len(leafs_ix)), leafs_ix))
else:
if not isinstance(labels, type(None)):
return np.asarray(labels)[leafs_ix]
else:
return leafs_ix
def entanglement(lindex1, lindex2, L=1.5):
"""Calculage average displacement of leafs in dendogram 1 and 2.
Entanglement is a measure between 1 (full entanglement) and 0 (no
entanglement). Ignores leafs that aren't present in both dendrograms.
Parameters
----------
lindex1,lindex2 : dict
Dictionaries mapping the labels of two dendrograms
to their indices.
L : any positive number
Distance norm to use for measuring the distance
between the two trees. Can be any positive number,
often one will want to use 0, 1, 1.5 or 2:
``sum(abs(x-y)^L)``.
"""
assert isinstance(lindex1, dict)
assert isinstance(lindex2, dict)
exist_in_both = list(set(lindex1) & set(lindex2))
if not exist_in_both:
raise ValueError('Not a single matching label in both dendrograms.')
# Absolute distance
dist = np.array([lindex1[l] - lindex2[l] for l in exist_in_both])
dist = np.abs(dist)
# Absolute entanglement
ent = np.sum(dist ** L)
# Worst case
ix = np.arange(max(len(lindex1), len(lindex2)))
worst = np.sum(np.abs(ix - ix[::-1]) ** L)
# Normalized entanglemtn
return ent / worst
if __name__ == '__main__':
labelsA = ['A', 'B', 'C', 'D']
labelsB = ['B', 'A', 'C', 'D']
data = [[ 0, .1, .4, .3],
[.1, 0, .5, .6],
[ .4, .5, 0, .2],
[ .3, .6, .2, 0]]
mat1 = pd.DataFrame(data,
columns=labelsA,
index=labelsA)
mat2 = pd.DataFrame(data,
columns=labelsB,
index=labelsB)
# Plot tanglegram
fig = gen_tangle(mat1, mat2)
plt.show()
| gpl-3.0 |
mrahim/adni_petmr_analysis | classification_fmri_stacking.py | 1 | 2956 | # -*- coding: utf-8 -*-
"""
Compute a ridge that combines PET model and fMRI correlations.
The general formula is :
|Xw - y|^2 + alpha |w - lambda w_tep|^2
By making :
beta = w - lambda w_tep
We have :
|X beta - (y - lambda X w_tep)|^2 + alpha |beta|^2
Created on Wed Jan 21 09:05:28 2015
@author: [email protected]
"""
import os, time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import RidgeCV, LogisticRegression
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.datasets.base import Bunch
from fetch_data import fetch_adni_petmr
from fetch_data import set_cache_base_dir, set_features_base_dir,\
set_group_indices
def train_and_test(X, y, train, test):
x_train_stacked = []
x_test_stacked = []
coeffs = []
y_train, y_test = y[train], y[test]
for k in range(X.shape[2]):
x = X[...,k]
x_train, x_test = x[train], x[test]
rdg = RidgeCV(alphas=np.logspace(-3, 3, 7))
rdg.fit(x_train, y_train)
x_train_stacked.append(rdg.predict(x_train))
x_test_stacked.append(rdg.predict(x_test))
coeffs.append(rdg.coef_)
x_train_ = np.asarray(x_train_stacked).T
x_test_ = np.asarray(x_test_stacked).T
lgr = LogisticRegression()
lgr.fit(x_train_, y_train)
probas = lgr.decision_function(x_test_)
scores = lgr.score(x_test_, y_test)
coeff_lgr = lgr.coef_
B = Bunch(score=scores, proba=probas, coeff=coeffs, coeff_lgr=coeff_lgr)
ts = str(int(time.time()))
np.savez_compressed(os.path.join(CACHE_DIR, 'ridge_stacking_fmri_' + ts),
data=B)
return B
###########################################################################
###########################################################################
### set paths
CACHE_DIR = set_cache_base_dir()
FIG_DIR = os.path.join(CACHE_DIR, 'figures', 'petmr')
FEAT_DIR = set_features_base_dir()
FMRI_DIR = os.path.join(FEAT_DIR, 'smooth_preproc', 'fmri_subjects_68seeds')
### load dataset
dataset = fetch_adni_petmr()
fmri = dataset['func']
subj_list = dataset['subjects']
idx = set_group_indices(dataset['dx_group'])
idx_ = np.hstack((idx['EMCI'][0], idx['LMCI'][0]))
img_idx = np.hstack((idx['AD'][0], idx_))
X = []
print 'Loading data ...'
for i in img_idx:
X.append(np.load(os.path.join(FMRI_DIR, subj_list[i]+'.npz'))['corr'])
# X.shape = (n_samples, n_features, n_rois)
X = np.array(X)
y = np.ones(X.shape[0])
y[len(y) - len(idx_):] = 0
print 'Classification ...'
n_iter = 100
sss = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=.2,
random_state=np.random.seed(42))
from joblib import Parallel, delayed
p = Parallel(n_jobs=10, verbose=5)(delayed(train_and_test)(X, y, train, test)\
for train, test in sss)
np.savez_compressed(os.path.join(CACHE_DIR, 'ridge_stacking_fmri_'+str(n_iter)),data=p)
| bsd-2-clause |
alexvmarch/atomic | exatomic/gaussian/tests/test_output.py | 3 | 7007 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
#import os
import numpy as np
import pandas as pd
from unittest import TestCase
from exatomic import gaussian
from exatomic.base import resource
from exatomic.gaussian import Output, Fchk
class TestFchk(TestCase):
def setUp(self):
self.mam1 = Fchk(resource('g09-ch3nh2-631g.fchk'))
self.mam2 = Fchk(resource('g09-ch3nh2-augccpvdz.fchk'))
def test_parse_atom(self):
self.mam1.parse_atom()
self.assertEqual(self.mam1.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(self.mam1.atom)))
self.mam2.parse_atom()
self.assertEqual(self.mam2.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(self.mam2.atom)))
def test_parse_basis_set(self):
self.mam1.parse_basis_set()
self.assertEqual(self.mam1.basis_set.shape[0], 32)
self.assertTrue(np.all(pd.notnull(self.mam1.basis_set)))
self.mam2.parse_basis_set()
self.assertEqual(self.mam2.basis_set.shape[0], 53)
self.assertTrue(np.all(pd.notnull(self.mam2.basis_set)))
def test_parse_orbital(self):
self.mam1.parse_orbital()
self.assertEqual(self.mam1.orbital.shape[0], 28)
self.assertTrue(np.all(pd.notnull(self.mam1.orbital)))
self.mam2.parse_orbital()
self.assertEqual(self.mam2.orbital.shape[0], 91)
self.assertTrue(np.all(pd.notnull(self.mam2.orbital)))
def test_parse_momatrix(self):
self.mam1.parse_momatrix()
self.assertEqual(self.mam1.momatrix.shape[0], 784)
self.assertTrue(np.all(pd.notnull(self.mam1.momatrix)))
self.mam2.parse_momatrix()
self.assertEqual(self.mam2.momatrix.shape[0], 8281)
self.assertTrue(np.all(pd.notnull(self.mam2.momatrix)))
def test_parse_basis_set_order(self):
self.mam1.parse_basis_set_order()
self.assertEqual(self.mam1.basis_set_order.shape[0], 28)
self.assertTrue(np.all(pd.notnull(self.mam1.basis_set_order)))
self.mam2.parse_basis_set_order()
self.assertEqual(self.mam2.basis_set_order.shape[0], 91)
self.assertTrue(np.all(pd.notnull(self.mam2.basis_set_order)))
def test_parse_frame(self):
self.mam1.parse_frame()
self.assertEqual(self.mam1.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(self.mam1.frame)))
self.mam2.parse_frame()
self.assertEqual(self.mam2.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(self.mam2.frame)))
def test_to_universe(self):
"""Test the to_universe method."""
mam1 = self.mam1.to_universe(ignore=True)
mam2 = self.mam2.to_universe(ignore=True)
for uni in [mam1, mam2]:
for attr in ['atom', 'basis_set', 'basis_set_order',
'momatrix', 'orbital', 'frame']:
self.assertTrue(hasattr(uni, attr))
class TestOutput(TestCase):
"""
This test ensures that the parsing functionality works on
a smattering of output files that were generated with the
Gaussian software package. Target syntax is for Gaussian
09.
"""
def setUp(self):
# TODO : add some cartesian basis set files
# a geometry optimization and
# maybe properties? like the frequency
# and tddft calcs
self.uo2 = Output(resource('g09-uo2.out'))
self.mam3 = Output(resource('g09-ch3nh2-631g.out'))
self.mam4 = Output(resource('g09-ch3nh2-augccpvdz.out'))
def test_parse_atom(self):
self.uo2.parse_atom()
self.assertEqual(self.uo2.atom.shape[0], 3)
self.assertTrue(np.all(pd.notnull(self.uo2.atom)))
self.mam3.parse_atom()
self.assertEqual(self.mam3.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(self.mam3.atom)))
self.mam4.parse_atom()
self.assertEqual(self.mam4.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(self.mam4.atom)))
def test_parse_basis_set(self):
self.uo2.parse_basis_set()
self.assertEqual(self.uo2.basis_set.shape[0], 49)
self.assertTrue(np.all(pd.notnull(self.uo2.basis_set)))
self.mam3.parse_basis_set()
self.assertEqual(self.mam3.basis_set.shape[0], 32)
self.assertTrue(np.all(pd.notnull(self.mam3.basis_set)))
self.mam4.parse_basis_set()
self.assertEqual(self.mam4.basis_set.shape[0], 53)
self.assertTrue(np.all(pd.notnull(self.mam4.basis_set)))
def test_parse_orbital(self):
self.uo2.parse_orbital()
self.assertEqual(self.uo2.orbital.shape[0], 141)
self.assertTrue(np.all(pd.notnull(self.uo2.orbital)))
self.mam3.parse_orbital()
self.assertEqual(self.mam3.orbital.shape[0], 28)
self.assertTrue(np.all(pd.notnull(self.mam3.orbital)))
self.mam4.parse_orbital()
self.assertEqual(self.mam4.orbital.shape[0], 91)
self.assertTrue(np.all(pd.notnull(self.mam4.orbital)))
def test_parse_momatrix(self):
self.uo2.parse_momatrix()
self.assertEqual(self.uo2.momatrix.shape[0], 19881)
self.assertTrue(np.all(pd.notnull(self.uo2.momatrix)))
self.mam3.parse_momatrix()
self.assertEqual(self.mam3.momatrix.shape[0], 784)
self.assertTrue(np.all(pd.notnull(self.mam3.momatrix)))
self.mam4.parse_momatrix()
self.assertEqual(self.mam4.momatrix.shape[0], 8281)
self.assertTrue(np.all(pd.notnull(self.mam4.momatrix)))
def test_parse_basis_set_order(self):
self.uo2.parse_basis_set_order()
self.assertEqual(self.uo2.basis_set_order.shape[0], 141)
self.assertTrue(np.all(pd.notnull(self.uo2.basis_set_order)))
self.mam3.parse_basis_set_order()
self.assertEqual(self.mam3.basis_set_order.shape[0], 28)
self.assertTrue(np.all(pd.notnull(self.mam3.basis_set_order)))
self.mam4.parse_basis_set_order()
self.assertEqual(self.mam4.basis_set_order.shape[0], 91)
self.assertTrue(np.all(pd.notnull(self.mam4.basis_set_order)))
def test_parse_frame(self):
self.uo2.parse_frame()
self.assertEqual(self.uo2.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(self.uo2.frame)))
self.mam3.parse_frame()
self.assertEqual(self.mam3.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(self.mam3.frame)))
self.mam4.parse_frame()
self.assertEqual(self.mam4.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(self.mam4.frame)))
def test_to_universe(self):
"""Test the to_universe method."""
uo2 = self.uo2.to_universe(ignore=True)
mam3 = self.mam3.to_universe(ignore=True)
for uni in [uo2, mam3]:
for attr in ['atom', 'basis_set', 'basis_set_order',
'momatrix', 'orbital', 'frame']:
self.assertTrue(hasattr(uni, attr))
| apache-2.0 |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/indexes/datetimes/test_formats.py | 15 | 1504 | from pandas import DatetimeIndex
import numpy as np
import pandas.util.testing as tm
import pandas as pd
def test_to_native_types():
index = DatetimeIndex(freq='1D', periods=3, start='2017-01-01')
# First, with no arguments.
expected = np.array(['2017-01-01', '2017-01-02',
'2017-01-03'], dtype=object)
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
# No NaN values, so na_rep has no effect
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
# Make sure slicing works
expected = np.array(['2017-01-01', '2017-01-03'], dtype=object)
result = index.to_native_types([0, 2])
tm.assert_numpy_array_equal(result, expected)
# Make sure date formatting works
expected = np.array(['01-2017-01', '01-2017-02',
'01-2017-03'], dtype=object)
result = index.to_native_types(date_format='%m-%Y-%d')
tm.assert_numpy_array_equal(result, expected)
# NULL object handling should work
index = DatetimeIndex(['2017-01-01', pd.NaT, '2017-01-03'])
expected = np.array(['2017-01-01', 'NaT', '2017-01-03'], dtype=object)
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
expected = np.array(['2017-01-01', 'pandas',
'2017-01-03'], dtype=object)
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
| mit |
gladk/trunk | examples/test/psd.py | 10 | 1794 | # encoding: utf-8
#
# demonstrate how to generate sphere packing based on arbitrary PSD (particle size distribution)
# show the difference between size-based and mass-based (≡ volume-based in our case) PSD
#
import matplotlib; matplotlib.rc('axes',grid=True)
from yade import pack
import pylab
# PSD given as points of piecewise-linear function
psdSizes,psdCumm=[.02,0.04,0.045,.05,.06,.08,.12],[0.,0.1,0.3,0.3,.3,.7,1.]
pylab.plot(psdSizes,psdCumm,label='precribed mass PSD')
sp0=pack.SpherePack();
sp0.makeCloud((0,0,0),(1,1,1),psdSizes=psdSizes,psdCumm=psdCumm,distributeMass=True)
sp1=pack.SpherePack();
sp1.makeCloud((0,0,0),(1,1,1),psdSizes=psdSizes,psdCumm=psdCumm,distributeMass=True,num=5000)
sp2=pack.SpherePack();
sp2.makeCloud((0,0,0),(1,1,1),psdSizes=psdSizes,psdCumm=psdCumm,distributeMass=True,num=20000)
pylab.semilogx(*sp0.psd(bins=30,mass=True),label='Mass PSD of (free) %d random spheres'%len(sp0))
pylab.semilogx(*sp1.psd(bins=30,mass=True),label='Mass PSD of (imposed) %d random spheres'%len(sp1))
pylab.semilogx(*sp2.psd(bins=30,mass=True),label='Mass PSD of (imposed) %d random spheres (scaled down)'%len(sp2))
pylab.legend()
# uniform distribution of size (sp3) and of mass (sp4)
sp3=pack.SpherePack(); sp3.makeCloud((0,0,0),(1,1,1),rMean=0.03,rRelFuzz=2/3.,distributeMass=False);
sp4=pack.SpherePack(); sp4.makeCloud((0,0,0),(1,1,1),rMean=0.03,rRelFuzz=2/3.,distributeMass=True);
pylab.figure()
pylab.plot(*(sp3.psd(mass=True)+('g',)+sp4.psd(mass=True)+('r',)))
pylab.legend(['Mass PSD of size-uniform distribution','Mass PSD of mass-uniform distribution'])
pylab.figure()
pylab.plot(*(sp3.psd(mass=False)+('g',)+sp4.psd(mass=False)+('r',)))
pylab.legend(['Size PSD of size-uniform distribution','Size PSD of mass-uniform distribution'])
pylab.show()
pylab.show() | gpl-2.0 |
nvoron23/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
isaacullah/AgModel | headless/AgModel_headless.py | 1 | 20056 | #!usr/bin/python
# Model Description
############################
# This model simulates a complex hunter-gatherer band making optimal foraging decisions between a high-ranked resource and a low-ranked resource. The high-ranked resource is rich, but hard to find and proces,and potentially very scarce. The low-ranked resource is poor, but common and easy to find and process.
# !! THIS VERSION RUNS HEADLESS (NO GUI), AND CAN BE LOOPED BY AN EXTERNAL PROGRAM !!
## Notes
# Low-ranked resource parameterized for millet.
# Domestic millet produces 1014 kcal/kg, yielding 1000 kg/ha from seeds dispersed at 10 kg/ha, with 176,000 seeds per kilogram (so 1.76 million plants per ha), and a return rate of about 500 kcal/hr once encountered. Assume patch size is one hectare. Data from FAO: http://www.fao.org/ag/AGP/AGPC/doc/gbase/data/pf000280.htm
# Wild millet: Right now, just assuming everything is half as much as domestic millet.
# High ranked resource parameterized as deer, producing 1580 kcal/kg, with 100 kg yield per animal, and a return rate of abotu 10000 kcal/hr once encountered. Assuming two fawns per year and three days average search time when there are 1000 animals in the vicinity. Some of these are estimates from Bettinger 1991, and some come from USDA nutrition info for vennison.
#Note that this model assumes storage of millet, but not of deer
#Changelog:
# v0.3 to v0.4
# selection is now balanced against diffusion in utilized patches.
# diffusion is now density dependent, so that it reduces as the percentage of domestic phenotype in the ecosystem increases.
# added docstrings to classes and functions (no functional changes)
# added variable 'cultiv' to the sparse CLI for added functionality
# DO NOT EDIT BELOW THIS LINE
#########################################################################################################################################
import os
import sys
import numpy as np
import pandas as pd
import argparse
#Set up sparse CLI
parser = argparse.ArgumentParser(description='This model simulates a complex hunter-gatherer band making optimal foraging decisions between a high-ranked resource and a low-ranked resource. The high-ranked resource is rich, but hard to find and proces,and potentially very scarce. The low-ranked resource is poor, but common and easy to find and process.')
parser.add_argument('--hbirth', metavar='0.032', type=float, nargs='?', const=.032, default=.032, help='Enter the annual human per capita birth rate')
parser.add_argument('--mselect', metavar='0.03', type=float, nargs='?', const=.03, default=.03, help='Enter the coefficient of slection (e.g., the rate of change from wild-type to domestic type)')
parser.add_argument('--cultiv', metavar='2000', type=int, nargs='?', const=2000, default=2000, help='Enter the number of additional millet plants to added to a patch each year due to proto cultivation of the patch. The patch reduces by the same number if not exploited.')
parser.add_argument('--label', metavar='Z.ZZ', nargs='?', const='1.01', default='1.01', help='This is the experiment and run number. E.g., experiment 1, run 1, should look like: 1.01')
###############################################################
## EDIT THESE VARIABLES AS YOU SEE FIT
# HUMAN VARIABLES
people = 50 ## Enter the initial number of people in the band
maxpeople = 500 ## Enter the maximum human population (just to keep this in the realm of possibility, and to help set the y axis on the plot)
#THIS COMES FROM CLI NOW hbirth = 0.033 ## Enter the annual human per capita birth rate
hdeath = 0.03 ## Enter the annual human per capita death rate
starvthresh = 0.8 ## Enter the starvation threshold (percentage of the total kcal below which people are starving, and effective reproduction goes to 0)
hkcal = 547500.0 #for a 1500 kcal diet,730000.0 for a 2000kcal diet, or 1095000.0 for a 3000kcal diet ## Enter the number of kcals per year rquired per person
fhours = 4380 ## Enter the number of foraging hours available per person
hgratio = 0.35 ## Enter the ratio of hunters to gatherers in the population (allocates foraging type efforts)
# DEER VARIABLES
deer = 4000 ## Enter the inital number of deer in the hunting region
maxdeer = 6000 ## Enter the maximum number of deer that the region can sustain (carrying capacity) without human predation
dmigrants = 10 ## Enter the number of new deer that migrate into the territory each year (keeps deer pop from being totally wiped out)
dbirth = 0.065 ## Enter the annual per capit birth rate for deer
ddeath = 0.02 ## Enter the annual per capita natural death rate for deer
dret = 158000.0 ## Enter the return rate (number of kcals) per deer killed
ddsrch = 72.0 ## Enter the density dependent search costs for deer (hours time expended per recovery of one deer at the density "ddens")
ddens = 1000 ## Density of deer for which search cost "dsrch" is known
dpatch = 1.0 ## Number of individual deer encountered per discovery
dhndl = 25.0 ## Enter the handling costs for deer (hours handling time expended per deer once encountered)
# MILLET VARIABLES
millet = 750 ## Enter the number of millet patches in the gathering region (assume a patch is ~1ha)
#mout = 100.0 ## Enter the viable seed yield per plant
#mdud = .99 ## Enter the proportion of seeds that don't germinate due to predation or improper emplantation
mretw = 0.0507 ## Enter the return rate (number of kcals) per wild-type millet seed
mretd = 0.1014 ## Enter the return rate (number of kcals) per domestic-type millet seed
mprop = 0.98 ## Enter the starting proportion of wild-type to domestic-type millet (1.0 = all wild, 0.0 = all domestic)
#THIS COMES FROM CLI NOW mselect = 0.03 ## Enter the coefficient of slection (e.g., the rate of change from wild-type to domestic type)
mdiffus = 0.01 ## Enter the coefficient of diffusion for millet (the rate at which selected domestic traits dissappear due to crossbreeding)
msrch = 1.0 ## Enter the search costs for millet (hours expended to find one patch of millet)
mpatch = 880000 ## Number of millet plants per patch at the start of the simulation (individuals encountered per discovery)
maxpatch = 1760000 ## Maximum number of millet plants that can be grown per patch (a bit of a teleology, but we need a stopping point for now)
mhndlw = 0.0001 ## Enter the handling costs for wild millet (hours handling time expended per seed once encountered)
mhndld = 0.00005 ## Enter the handling costs for domestic millet (hours handling time expended per seed once encountered)
# SIMULATION CONTROLS
years = 5000 ## Enter the number of years for which to run the simulation
texton = False ## True will run with output text to the terminal, false will run with without text output
# DO NOT EDIT BELOW THIS LINE
#############################################################
#Get values from command line variables
args = vars(parser.parse_args())
hbirth = args["hbirth"]
mselect = args["mselect"]
cultiv = args["cultiv"]
label = args["label"]
#Make some custom functions for the population dynamics
def babymaker(p, n): #p is the per capita birth rate, n is the population size
"""This is a class to pick the number of births in a given year. p is the per capita birth rate, n is the population size."""
babys = 0
for m in range(int(n)):
x = np.random.random()
if x < float(p):
babys = babys + 1
return(babys)
def deathdealer(p, n): #p is the per capita death rate, n is the population size
"""This is a class to pick the number of deaths in a given year. p is the per capita death rate, n is the population size."""
deaths = 0
for m in range(int(n)):
x = np.random.random()
if x < float(p):
deaths = deaths + 1
return(deaths)
if __name__ == "__main__":
##### Setup the simulation
milletpatches = []
for patch in range(int(millet)): # set up a data container for our millet patches. They will all start out the same.
milletpatches.append([mpatch, mprop])
millet_df = pd.DataFrame(milletpatches, columns=['mpatch','mprop']) # turn this data container into a pandas dataframe for more efficient math and indexing
patchdens_ts = pd.DataFrame(index=range(1,int(millet+1)), columns=range(years+1)) # set up a blank pandas dataframe to catch patch density timeseries stats for possible output
patchprop_ts = pd.DataFrame(index=range(1,int(millet+1)), columns=range(years+1)) # set up a blank pandas dataframe to catch patch domestic proportion timeseries stats for possible output
patchdens_ts[0] = millet_df.mpatch # update with year 0 data
patchprop_ts[0] = millet_df.mprop # update with year 0 data
# set up some individual data containers for the output stats and plots
yr = [0]
hpop = [people]
hkcald = [0]
dpop = [deer]
mpop = [(millet * mpatch)/1000.]
dkil = [0]
mexp = [0]
mdom = [1 - mprop]
mdens = [mpatch/1000]
####### The simulation starts here.
for year in range(1,years+1): #this is the outer loop, that does things at an annual resolution, counting the years down for the simulation
if texton == True: print "Year: %s" % year
kcalneed = people * hkcal # find the number of kcals needed by the band this year
htimebudget = people * fhours * hgratio # find the hunting time budget for the band this year
gtimebudget = people * fhours * (1/hgratio) # find the gathering time budget for the band this year ##NOTE- excess hunting time will be used for gathering
deer_now = deer #set up a variable to track deer population exploitation this year
millet_now = millet #set up a variable to track millet patch exploitation this year
eatmillet = 0 #set up data container to count how many millet patches we ate this year
eatdeer = 0 #set up dat container to count how many deer we ate this year
while kcalneed > 0: #this is the inner loop, doing foraging within the year, until kcal need is satisfied
if deer_now <= 0 and millet_now <= 0:
if texton == True: print "Ate everything!!!"
break
#first calculate info about the current state of millet
mprop_now = np.mean(millet_df.mprop[0:millet_now]) #Note that we are taking the mean proportion acoss all remaining millet patches in the data array.
mpatch_now = np.mean(millet_df.mpatch[0:millet_now]) #Note that we are taking the mean number of individuals perpatch across all remaining patches in the millet data array. Note that we are reading off of the right end of the array list.
mret = (mretw * mprop_now) + (mretd * (1 - mprop_now)) #determine the actual kcal return for millet, based on the proportion of wild to domesticated.
mhndl = (mhndlw * mprop_now) + (mhndld * (1 - mprop_now)) #determine the actual handling time for millet, based on the proportion of wild to domesticated.
if deer_now <= 0:
deerscore = 0
else:
dsrch_now = ddsrch / (deer_now / ddens) #find the actual search time for the amount of deer at this time
deerscore = dret / (dsrch_now + (dhndl * dpatch)) #find the current return rate (kcal/hr) for deer.
if millet_now <= 0:
milletscore = 0
else:
milletscore = (mret * mpatch_now ) / (msrch + (mhndl * mpatch_now)) #find the current return rate (kcal/hr for millet.
#if texton == True; print deerscore, milletscore, kcalneed
if deerscore >= milletscore: #check to see whether the band should eat deer or millet at this moment
## eating deer, so update data containers accordingly
if htimebudget <= 0:
if texton == True: print "Ran out of hunting time this year, hopefully there is gathering time left"
deerscore = 0
pass
if deer_now <= 0: #if they killed all the deer, then go to millet if possible
if texton == True: print "Killed all the deer available this year, will try to make up the remainder of the diet with millet"
deerscore = 0.
pass
else:
kcalneed = kcalneed - dret ## QUESTION: should this be the return for a deer minus the search/handle costs?? Or is that included in the daily dietary need (i.e., the energy expended searching and processing foodstuffs)
htimebudget = htimebudget - (dsrch_now + (dhndl * dpatch))
eatdeer = eatdeer + dpatch
deer_now = deer_now - dpatch
else: ## eating millet, so update data containers accordingly
if gtimebudget <= 0:
if texton == True: print "Ran out of gathering time this year, hopefully there is hunting time left"
milletscore = 0
pass
elif gtimebudget <= 0 and htimebudget > 0:
if texton == True: print "Using remaining hunting time to gather millet"
kcalneed = kcalneed - (mret * mpatch_now)
htimebudget = htimebudget - msrch - (mhndl * mpatch_now)
eatmillet = eatmillet + 1
millet_now = millet_now - 1
elif gtimebudget <=0 and htimebudget <= 0:
if texton == True: print "Not enough hunting time left to use for gathering"
milletscore = 0
pass
else: pass
if millet_now <= 0: #if millet is all gone, then go back to deer
if texton == True: print "Harvested all available millet this year, will try to make up the remainder of the diet with deer."
milletscore = 0
pass
else:
kcalneed = kcalneed - (mret * mpatch_now)
gtimebudget = gtimebudget - msrch - (mhndl * mpatch_now)
eatmillet = eatmillet + 1
millet_now = millet_now - 1
if htimebudget <= 0 and gtimebudget <= 0: #check if they've run out of foraging time, and stop the loop if necessary.
if texton == True: print "Ran out of all foraging time for this year before gathering enough food."
break
if deer <= 0 and millet <= 0: #check if they've run out of food, and stop the loop if necessary.
if texton == True: print "Ate all the deer and all the millet this year before gathering enough food."
break
if deerscore <= 0 and milletscore <= 0: #check if they've run out of food, and stop the loop if necessary.
if texton == True:print "Ate all the deer and all the millet this year before gathering enough food."
break
####### Now that the band has foraged for a year, update human, deer, and millet populations, and implement selection
if (people * hkcal) - kcalneed <= (people * hkcal * starvthresh): #Check if they starved this year and just die deaths if so
if texton == True: print "Starved a bit this year, no births will occur."
people = people - deathdealer(hdeath, people)
else: #otherwise, balance births and deaths, and adjust the population accordingly
people = people + babymaker(hbirth, people) - deathdealer(hdeath, people)
deer = deer_now + babymaker(dbirth, deer_now) - deathdealer(ddeath, deer_now) + dmigrants #Adjust the deer population by calculating the balance of natural births and deaths on the hunted population, and then add the migrants population
if people > maxpeople: people = maxpeople # don't allow human pop to exceed the limit we set
if deer > maxdeer: deer = maxdeer # don't allow deer pop to exceed natural carrying capacity
#This part is a bit complicated. We are adjusting the proportions of wild to domestic millet in JUST the millet patches that were exploited this year. We are also adjusting the density of individuals in those patches. This is the effect of the "artifical selection" exhibited by humans while exploiting those patches. At the same time, we are implementing a "diffusion" of wild-type characteristics back to all the patches. If they are used, selection might outweigh diffusion. If they aren't being used, then just diffusion occurs. In this version of the model, diffusion is density dependent, and is adjusted by (lat year's) the proportion of domestic to non-domestic millets left in the population.
patch_adjust = [] # make a matrix to do the selection/diffusion on the individual patches based on if they got used or not.
currentmdiffus = mdiffus * (1 - mdom[-1])
for x in range(1,int(millet+1)):
if x < eatmillet:
patch_adjust.append([currentmdiffus-mselect, cultiv])
else:
patch_adjust.append([currentmdiffus, -cultiv])
patch_adjustdf = pd.DataFrame(patch_adjust, columns=['sel', 'cult']) #turn the matrix into a pandas dataframe for easy matrix math
#patch_adjustdf.to_csv("patch_changes.csv") ## this is here for debugging purposes. Uncomment if you want the patch adjustments to be written to a file.
millet_df['mpatch'] = millet_df['mpatch'] = millet_df['mpatch'].where((millet_df['mpatch'] + patch_adjustdf['cult'] > maxpatch - cultiv) | (millet_df['mpatch'] + patch_adjustdf['cult'] < mpatch + cultiv), other=millet_df['mpatch'] + patch_adjustdf['cult']) # adjust the patch density column, but only if the value will stay between mpatch and maxpatch.
millet_df['mprop'] = millet_df['mprop'].where((millet_df['mprop'] + patch_adjustdf['sel'] > 1 - currentmdiffus) | (millet_df['mprop'] + patch_adjustdf['sel'] < 0 + mselect), other=millet_df['mprop'] + patch_adjustdf['sel']) # adjust the selection coefficient column, but only if the value will stay between 1 and 0.
#millet_df.to_csv('supposed_new_patches.csv') ## this is here for debugging purposes. Uncomment if you want the new patches to be written to a file.
#update the patch time-series dataframes with the current year's data
patchdens_ts[year] = millet_df.mpatch
patchprop_ts[year] = millet_df.mprop
######## Okay, now update the data containers
yr.append(year)
hpop.append(people)
hkcald.append((people * hkcal) - kcalneed)
dpop.append(deer)
mpop.append(np.sum(millet_df.mpatch)/1000.)
dkil.append(eatdeer)
mexp.append(eatmillet)
mdom.append(1 - np.mean(millet_df.mprop))
mdens.append((np.mean(millet_df.mpatch))/1000.)
######
###### Simulation has ended, write stats
gsf = '%s%sSimulation_general_stats.%s.csv' % (os.getcwd(), os.sep, label)
mstatsout = pd.DataFrame(data=np.array([yr, hpop, hkcald, dpop, dkil, mpop, mexp, mdom, mdens]).T, columns = ["Year","Total Human Population","Human Kcal Deficit","Total Deer Population","Number of Deer Eaten","Total Millet Population (*10^3)","Number of Millet Patches Exploited","Proportion of Domestic-Type Millet","Average Millet Patch Density (*10^3)"]) # put the main stats data in a pandas data frame for easy formatting
mstatsout.to_csv(gsf, float_format='%.5f')
msf1 = '%s%sSimulation_millet_patch_density_stats.%s.csv' % (os.getcwd(), os.sep, label)
patchdens_ts.to_csv(msf1, float_format='%.5f')
msf2 = '%s%sSimulation_millet_patch_domestic_proportion_stats.%s.csv' % (os.getcwd(), os.sep, label)
patchprop_ts.to_csv(msf2, float_format='%.5f')
sys.exit(0)
| gpl-2.0 |
murali-munna/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
jyeatman/dipy | doc/examples/piesno.py | 11 | 3970 | """
=============================
Noise estimation using PIESNO
=============================
Often, one is interested in estimating the noise in the diffusion signal. One
of the methods to do this is the Probabilistic Identification and Estimation of
Noise (PIESNO) framework [Koay2009]_. Using this method, one can detect the
standard deviation of the noise from diffusion-weighted imaging (DWI). PIESNO
also works with multiple channel DWI datasets that are acquired from N array
coils for both SENSE and GRAPPA reconstructions.
The PIESNO method works in two steps:
1) First, it finds voxels that are most likely background voxels. Intuitively,
these voxels have very similar diffusion-weighted intensities (up to some noise)
in the fourth dimension of the DWI dataset. White matter, gray matter or CSF
voxels have diffusion intensities that vary quite a lot across different
directions.
2) From these estimated background voxels and the input number of coils N,
PIESNO finds what sigma each Gaussian from each of the N coils would have
generated the observed Rician (N=1) or non-central Chi (N>1) distributed noise
profile in the DWI datasets.
PIESNO makes an important assumption: the Gaussian noise standard deviation is
assumed to be uniform. The noise is uniform across multiple slice locations or
across multiple images of the same location.
For the full details, please refer to the original paper.
In this example, we will demonstrate the use of PIESNO with a 3-shell data-set.
We start by importing necessary modules and functions and loading the data:
"""
import nibabel as nib
import numpy as np
from dipy.denoise.noise_estimate import piesno
from dipy.data import fetch_sherbrooke_3shell, read_sherbrooke_3shell
fetch_sherbrooke_3shell()
img, gtab = read_sherbrooke_3shell()
data = img.get_data()
"""
Now that we have fetched a dataset, we must call PIESNO with the right number
of coils used to acquire this dataset. It is also important to know what
was the parallel reconstruction algorithm used. Here, the data comes from a
GRAPPA reconstruction, was acquired with a 12-elements head coil available on
the Tim Trio Siemens, for which the 12 coil elements are combined into 4 groups
of 3 coil elements each. The signal is therefore received through 4 distinct
groups of receiver channels, yielding N = 4. Had we used a GE acquisition, we
would have used N=1 even if multiple channel coils are used because GE uses a
SENSE reconstruction, which has a Rician noise nature and thus N is always 1.
"""
sigma, mask = piesno(data, N=4, return_mask=True)
axial = data[:, :, data.shape[2] / 2, 0].T
axial_piesno = mask[:, :, data.shape[2] / 2].T
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2)
ax[0].imshow(axial, cmap='gray', origin='lower')
ax[0].set_title('Axial slice of the b=0 data')
ax[1].imshow(axial_piesno, cmap='gray', origin='lower')
ax[1].set_title('Background voxels from the data')
for a in ax:
a.set_axis_off()
plt.savefig('piesno.png', bbox_inches='tight')
"""
.. figure:: piesno.png
:align: center
**Showing the mid axial slice of the b=0 image (left) and estimated
background voxels (right) used to estimate the noise standard deviation**.
"""
nib.save(nib.Nifti1Image(mask, img.get_affine(), img.get_header()),
'mask_piesno.nii.gz')
print('The noise standard deviation is sigma= ', sigma)
print('The std of the background is =', np.std(data[mask[...,None].astype(np.bool)]))
"""
Here, we obtained a noise standard deviation of 7.26. For comparison, a simple
standard deviation of all voxels in the estimated mask (as done in the previous
example :ref:`example_snr_in_cc`) gives a value of 6.1.
"""
"""
.. [Koay2009] Koay C.G., E. Ozarslan, C. Pierpaoli. Probabilistic
Identification and Estimation of Noise (PIESNO): A
self-consistent approach and its applications in MRI.
JMR, 199(1):94-103, 2009.
.. include:: ../links_names.inc
"""
| bsd-3-clause |
krez13/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 22 | 1848 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
mrgloom/h2o-3 | h2o-py/h2o/model/model_base.py | 1 | 19585 | """
This module implements the base model class. All model things inherit from this class.
"""
import h2o
from . import H2OFrame
from . import H2OConnection
class ModelBase(object):
def __init__(self, dest_key, model_json, metrics_class):
self._id = dest_key
self._model_json = model_json
self._metrics_class = metrics_class
self._is_xvalidated=False
self._xval_keys=None
# build Metric objects out of each metrics
for metric in ["training_metrics", "validation_metrics", "cross_validation_metrics"]:
if metric in model_json["output"]:
if model_json["output"][metric] is not None:
if metric=="cross_validation_metrics":
self._is_xvalidated=True
model_json["output"][metric] = metrics_class(model_json["output"][metric],metric,model_json["algo"])
if self._is_xvalidated: self._xval_keys= [i["name"] for i in model_json["output"]["cross_validation_models"]]
# build a useful dict of the params
self._params={}
for p in self._model_json["parameters"]: self._params[p["label"]]=p
@property
def params(self):
"""
Get the parameters and the actual/default values only.
:return: A dictionary of parameters used to build this model.
"""
params = {}
for p in self._params:
params[p] = {"default":self._params[p]["default_value"], "actual":self._params[p]["actual_value"]}
return params
@property
def full_parameters(self):
"""
Get the full specification of all parameters.
:return: a dictionary of parameters used to build this model.
"""
return self._params
def __repr__(self):
self.show()
return ""
def predict(self, test_data):
"""
Predict on a dataset.
:param test_data: Data to be predicted on.
:return: A new H2OFrame filled with predictions.
"""
if not test_data: raise ValueError("Must specify test data")
test_data._eager()
j = H2OConnection.post_json("Predictions/models/" + self._id + "/frames/" + test_data._id)
prediction_frame_id = j["model_metrics"][0]["predictions"]["frame_id"]["name"]
return h2o.get_frame(prediction_frame_id)
def is_cross_validated(self):
"""
:return: True if the model was cross-validated.
"""
return self._is_xvalidated
def xval_keys(self):
"""
:return: The model keys for the cross-validated model.
"""
return self._xval_keys
def get_xval_models(self,key=None):
"""
Return a Model object.
:param key: If None, return all cross-validated models; otherwise return the model that key points to.
:return: A model or list of models.
"""
return h2o.get_model(key) if key is not None else [h2o.get_model(k) for k in self._xval_keys]
@property
def xvals(self):
"""
Return a list of the cross-validated models.
:return: A list of models
"""
return self.get_xval_models()
def deepfeatures(self, test_data, layer):
"""
Return hidden layer details
:param test_data: Data to create a feature space on
:param layer: 0 index hidden layer
"""
if test_data is None: raise ValueError("Must specify test data")
test_data._eager()
j = H2OConnection.post_json("Predictions/models/" + self._id + "/frames/" + test_data._id, deep_features_hidden_layer=layer)
return h2o.get_frame(j["predictions_frame"]["name"])
def weights(self, matrix_id=0):
"""
Return the frame for the respective weight matrix
:param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
:return: an H2OFrame which represents the weight matrix identified by matrix_id
"""
num_weight_matrices = len(self._model_json['output']['weights'])
if matrix_id not in range(num_weight_matrices):
raise ValueError("Weight matrix does not exist. Model has {0} weight matrices (0-based indexing), but matrix {1} "
"was requested.".format(num_weight_matrices, matrix_id))
return h2o.get_frame(self._model_json['output']['weights'][matrix_id]['URL'].split('/')[3])
def biases(self, vector_id=0):
"""
Return the frame for the respective bias vector
:param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.
:return: an H2OFrame which represents the bias vector identified by vector_id
"""
num_bias_vectors = len(self._model_json['output']['biases'])
if vector_id not in range(num_bias_vectors):
raise ValueError("Bias vector does not exist. Model has {0} bias vectors (0-based indexing), but vector {1} "
"was requested.".format(num_bias_vectors, vector_id))
return h2o.get_frame(self._model_json['output']['biases'][vector_id]['URL'].split('/')[3])
def model_performance(self, test_data=None, train=False, valid=False):
"""
Generate model metrics for this model on test_data.
:param test_data: Data set for which model metrics shall be computed against. Both train and valid arguments are ignored if test_data is not None.
:param train: Report the training metrics for the model. If the test_data is the training data, the training metrics are returned.
:param valid: Report the validation metrics for the model. If train and valid are True, then it defaults to True.
:return: An object of class H2OModelMetrics.
"""
if test_data is None:
if not train and not valid: train = True # default to train
if train: return self._model_json["output"]["training_metrics"]
if valid: return self._model_json["output"]["validation_metrics"]
else: # cases dealing with test_data not None
if not isinstance(test_data, H2OFrame):
raise ValueError("`test_data` must be of type H2OFrame. Got: " + type(test_data))
test_data._eager()
res = H2OConnection.post_json("ModelMetrics/models/" + self._id + "/frames/" + test_data._id)
# FIXME need to do the client-side filtering... PUBDEV-874: https://0xdata.atlassian.net/browse/PUBDEV-874
raw_metrics = None
for mm in res["model_metrics"]:
if mm["frame"]["name"] == test_data._id:
raw_metrics = mm
break
return self._metrics_class(raw_metrics,algo=self._model_json["algo"])
def score_history(self):
"""
Retrieve Model Score History
:return: the score history (H2OTwoDimTable)
"""
model = self._model_json["output"]
if 'scoring_history' in model.keys() and model["scoring_history"] != None:
s = model["scoring_history"]
if h2o.can_use_pandas():
import pandas
pandas.options.display.max_rows = 20
return pandas.DataFrame(s.cell_values,columns=s.col_header)
return model["scoring_history"]
else: print "No score history for this model"
def summary(self):
"""
Print a detailed summary of the model.
:return:
"""
model = self._model_json["output"]
if model["model_summary"]:
model["model_summary"].show() # H2OTwoDimTable object
def show(self):
"""
Print innards of model, without regards to type
:return: None
"""
model = self._model_json["output"]
print "Model Details"
print "============="
print self.__class__.__name__, ": ", self._model_json["algo_full_name"]
print "Model Key: ", self._id
self.summary()
print
# training metrics
tm = model["training_metrics"]
if tm: tm.show()
vm = model["validation_metrics"]
if vm: vm.show()
xm = model["cross_validation_metrics"]
if xm: xm.show()
if "scoring_history" in model.keys() and model["scoring_history"]: model["scoring_history"].show()
if "variable_importances" in model.keys() and model["variable_importances"]: model["variable_importances"].show()
def varimp(self, return_list=False):
"""
Pretty print the variable importances, or return them in a list
:param return_list: if True, then return the variable importances in an list (ordered from most important to least
important). Each entry in the list is a 4-tuple of (variable, relative_importance, scaled_importance, percentage).
:return: None or ordered list
"""
model = self._model_json["output"]
if "variable_importances" in model.keys() and model["variable_importances"]:
if not return_list: return model["variable_importances"].show()
else: return model["variable_importances"].cell_values
else:
print "Warning: This model doesn't have variable importances"
def residual_deviance(self,train=False,valid=False,xval=False):
"""
Retreive the residual deviance if this model has the attribute, or None otherwise.
:param train: Get the residual deviance for the training set. If both train and valid are False, then train is selected by default.
:param valid: Get the residual deviance for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the residual deviance, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].residual_deviance() if train else self._model_json["output"]["validation_metrics"].residual_deviance()
def residual_degrees_of_freedom(self,train=False,valid=False,xval=False):
"""
Retreive the residual degress of freedom if this model has the attribute, or None otherwise.
:param train: Get the residual dof for the training set. If both train and valid are False, then train is selected by default.
:param valid: Get the residual dof for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the residual dof, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].residual_degrees_of_freedom() if train else self._model_json["output"]["validation_metrics"].residual_degrees_of_freedom()
def null_deviance(self,train=False,valid=False,xval=False):
"""
Retreive the null deviance if this model has the attribute, or None otherwise.
:param: train Get the null deviance for the training set. If both train and valid are False, then train is selected by default.
:param: valid Get the null deviance for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the null deviance, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].null_deviance() if train else self._model_json["output"]["validation_metrics"].null_deviance()
def null_degrees_of_freedom(self,train=False,valid=False,xval=False):
"""
Retreive the null degress of freedom if this model has the attribute, or None otherwise.
:param train: Get the null dof for the training set. If both train and valid are False, then train is selected by default.
:param valid: Get the null dof for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the null dof, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].null_degrees_of_freedom() if train else self._model_json["output"]["validation_metrics"].null_degrees_of_freedom()
def pprint_coef(self):
"""
Pretty print the coefficents table (includes normalized coefficients)
:return: None
"""
print self._model_json["output"]["coefficients_table"] # will return None if no coefs!
def coef(self):
"""
:return: Return the coefficients for this model.
"""
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None: return None
tbl = tbl.cell_values
return {a[0]:a[1] for a in tbl}
def coef_norm(self):
"""
:return: Return the normalized coefficients
"""
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None: return None
tbl = tbl.cell_values
return {a[0]:a[2] for a in tbl}
def r2(self, train=False, valid=False, xval=False):
"""
Return the R^2 for this regression model.
The R^2 value is defined to be 1 - MSE/var,
where var is computed as sigma*sigma.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the R^2 value for the training data.
:param valid: If valid is True, then return the R^2 value for the validation data.
:param xval: If xval is True, then return the R^2 value for the cross validation data.
:return: The R^2 for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.r2()
return m.values()[0] if len(m) == 1 else m
def mse(self, train=False, valid=False, xval=False):
"""
Get the MSE(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the MSE value for the training data.
:param valid: If valid is True, then return the MSE value for the validation data.
:param xval: If xval is True, then return the MSE value for the cross validation data.
:return: The MSE for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.mse()
return m.values()[0] if len(m) == 1 else m
def logloss(self, train=False, valid=False, xval=False):
"""
Get the Log Loss(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the Log Loss value for the training data.
:param valid: If valid is True, then return the Log Loss value for the validation data.
:param xval: If xval is True, then return the Log Loss value for the cross validation data.
:return: The Log Loss for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.logloss()
return m.values()[0] if len(m) == 1 else m
def mean_residual_deviance(self, train=False, valid=False, xval=False):
"""
Get the Mean Residual Deviances(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the Mean Residual Deviance value for the training data.
:param valid: If valid is True, then return the Mean Residual Deviance value for the validation data.
:param xval: If xval is True, then return the Mean Residual Deviance value for the cross validation data.
:return: The Mean Residual Deviance for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.mean_residual_deviance()
return m.values()[0] if len(m) == 1 else m
def auc(self, train=False, valid=False, xval=False):
"""
Get the AUC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the AUC value for the training data.
:param valid: If valid is True, then return the AUC value for the validation data.
:param xval: If xval is True, then return the AUC value for the validation data.
:return: The AUC.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.auc()
return m.values()[0] if len(m) == 1 else m
def aic(self, train=False, valid=False, xval=False):
"""
Get the AIC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the AIC value for the training data.
:param valid: If valid is True, then return the AIC value for the validation data.
:param xval: If xval is True, then return the AIC value for the validation data.
:return: The AIC.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.aic()
return m.values()[0] if len(m) == 1 else m
def giniCoef(self, train=False, valid=False, xval=False):
"""
Get the Gini Coefficient(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the Gini Coefficient value for the training data.
:param valid: If valid is True, then return the Gini Coefficient value for the validation data.
:param xval: If xval is True, then return the Gini Coefficient value for the cross validation data.
:return: The Gini Coefficient for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.giniCoef()
return m.values()[0] if len(m) == 1 else m
def download_pojo(self,path=""):
"""
Download the POJO for this model to the directory specified by path (no trailing slash!).
If path is "", then dump to screen.
:param model: Retrieve this model's scoring POJO.
:param path: An absolute path to the directory where POJO should be saved.
:return: None
"""
h2o.download_pojo(self,path) # call the "package" function
@staticmethod
def _get_metrics(o, train, valid, xval):
metrics = {}
if train: metrics["train"] = o._model_json["output"]["training_metrics"]
if valid: metrics["valid"] = o._model_json["output"]["validation_metrics"]
if xval : metrics["xval"] = o._model_json["output"]["cross_validation_metrics"]
if len(metrics) == 0: metrics["train"] = o._model_json["output"]["training_metrics"]
return metrics
# Delete from cluster as model goes out of scope
# def __del__(self):
# h2o.remove(self._id)
@staticmethod
def _has(dictionary, key):
return key in dictionary and dictionary[key] is not None
| apache-2.0 |
jkarnows/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
aaannndddyyy/Replicating-DeepMind | libraries/cuda-convnet2/convdata.py | 174 | 14675 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
| gpl-3.0 |
wlamond/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
sonofmun/DissProject | Data_Production/make_dendro.py | 1 | 10814 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from collections import OrderedDict
from plotly import exceptions, optional_imports
from plotly.graph_objs import graph_objs
# Optional imports, may be None for users that only use our core functionality.
np = optional_imports.get_module('numpy')
scp = optional_imports.get_module('scipy')
sch = optional_imports.get_module('scipy.cluster.hierarchy')
scs = optional_imports.get_module('scipy.spatial')
def create_dendrogram(X, orientation="bottom", labels=None,
colorscale=None, distfun=None,
linkagefun=lambda x: sch.linkage(x, 'complete'),
threshold=None):
"""
BETA function that returns a dendrogram Plotly figure object.
:param (ndarray) X: Matrix of observations as array of arrays
:param (str) orientation: 'top', 'right', 'bottom', or 'left'
:param (list) labels: List of axis category labels(observation labels)
:param (list) colorscale: Optional colorscale for dendrogram tree
:param (function) distfun: Function to compute the pairwise distance from
the observations
:param (function) linkagefun: Function to compute the linkage matrix from
the pairwise distances
:param (double) threshold: The threshold below which the dendrogram will color groups.
See 'color_threshold' under scipy.cluster.hierarchy.dendrogram for more information.
clusters
Example 1: Simple bottom oriented dendrogram
```
import plotly.plotly as py
from plotly.figure_factory import create_dendrogram
import numpy as np
X = np.random.rand(10,10)
dendro = create_dendrogram(X)
plot_url = py.plot(dendro, filename='simple-dendrogram')
```
Example 2: Dendrogram to put on the left of the heatmap
```
import plotly.plotly as py
from plotly.figure_factory import create_dendrogram
import numpy as np
X = np.random.rand(5,5)
names = ['Jack', 'Oxana', 'John', 'Chelsea', 'Mark']
dendro = create_dendrogram(X, orientation='right', labels=names)
dendro['layout'].update({'width':700, 'height':500})
py.iplot(dendro, filename='vertical-dendrogram')
```
Example 3: Dendrogram with Pandas
```
import plotly.plotly as py
from plotly.figure_factory import create_dendrogram
import numpy as np
import pandas as pd
Index= ['A','B','C','D','E','F','G','H','I','J']
df = pd.DataFrame(abs(np.random.randn(10, 10)), index=Index)
fig = create_dendrogram(df, labels=Index)
url = py.plot(fig, filename='pandas-dendrogram')
```
"""
if not scp or not scs or not sch:
raise ImportError("FigureFactory.create_dendrogram requires scipy, \
scipy.spatial and scipy.hierarchy")
s = X.shape
if len(s) != 2:
exceptions.PlotlyError("X should be 2-dimensional array.")
if distfun is None:
distfun = scs.distance.pdist
dendrogram = _Dendrogram(X, orientation, labels, colorscale, threshold,
distfun=distfun, linkagefun=linkagefun)
return {'layout': dendrogram.layout,
'data': dendrogram.data}
class _Dendrogram(object):
"""Refer to FigureFactory.create_dendrogram() for docstring."""
def __init__(self, X, orientation='bottom', labels=None, colorscale=None, threshold=None,
width="100%", height="100%", xaxis='xaxis', yaxis='yaxis',
distfun=None,
linkagefun=lambda x: sch.linkage(x, 'complete')):
self.orientation = orientation
self.labels = labels
self.xaxis = xaxis
self.yaxis = yaxis
self.data = []
self.leaves = []
self.sign = {self.xaxis: 1, self.yaxis: 1}
self.layout = {self.xaxis: {}, self.yaxis: {}}
if self.orientation in ['left', 'bottom']:
self.sign[self.xaxis] = 1
else:
self.sign[self.xaxis] = -1
if self.orientation in ['right', 'bottom']:
self.sign[self.yaxis] = 1
else:
self.sign[self.yaxis] = -1
if distfun is None:
distfun = scs.distance.pdist
(dd_traces, xvals, yvals,
ordered_labels, leaves) = self.get_dendrogram_traces(X, colorscale,
distfun,
linkagefun, threshold)
self.labels = ordered_labels
self.leaves = leaves
yvals_flat = yvals.flatten()
xvals_flat = xvals.flatten()
self.zero_vals = []
for i in range(len(yvals_flat)):
if yvals_flat[i] == 0.0 and xvals_flat[i] not in self.zero_vals:
self.zero_vals.append(xvals_flat[i])
self.zero_vals.sort()
self.layout = self.set_figure_layout(width, height)
self.data = graph_objs.Data(dd_traces)
def get_color_dict(self, colorscale):
"""
Returns colorscale used for dendrogram tree clusters.
:param (list) colorscale: Colors to use for the plot in rgb format.
:rtype (dict): A dict of default colors mapped to the user colorscale.
"""
# These are the color codes returned for dendrograms
# We're replacing them with nicer colors
d = {'r': 'red',
'g': 'green',
'b': 'blue',
'c': 'cyan',
'm': 'magenta',
'y': 'yellow',
'k': 'black',
'w': 'white'}
default_colors = OrderedDict(sorted(d.items(), key=lambda t: t[0]))
if colorscale is None:
colorscale = [
'rgb(0,116,217)', # blue
'rgb(35,205,205)', # cyan
'rgb(61,153,112)', # green
'rgb(40,35,35)', # black
'rgb(133,20,75)', # magenta
'rgb(255,65,54)', # red
'rgb(255,255,255)', # white
'rgb(255,220,0)'] # yellow
for i in range(len(default_colors.keys())):
k = list(default_colors.keys())[i] # PY3 won't index keys
if i < len(colorscale):
default_colors[k] = colorscale[i]
return default_colors
def set_axis_layout(self, axis_key):
"""
Sets and returns default axis object for dendrogram figure.
:param (str) axis_key: E.g., 'xaxis', 'xaxis1', 'yaxis', yaxis1', etc.
:rtype (dict): An axis_key dictionary with set parameters.
"""
axis_defaults = {
'type': 'linear',
'ticks': 'outside',
'mirror': 'allticks',
'rangemode': 'tozero',
'showticklabels': True,
'zeroline': False,
'showgrid': False,
'showline': True,
}
if len(self.labels) != 0:
axis_key_labels = self.xaxis
if self.orientation in ['left', 'right']:
axis_key_labels = self.yaxis
if axis_key_labels not in self.layout:
self.layout[axis_key_labels] = {}
self.layout[axis_key_labels]['tickvals'] = \
[zv*self.sign[axis_key] for zv in self.zero_vals]
self.layout[axis_key_labels]['ticktext'] = self.labels
self.layout[axis_key_labels]['tickmode'] = 'array'
self.layout[axis_key].update(axis_defaults)
return self.layout[axis_key]
def set_figure_layout(self, width, height):
"""
Sets and returns default layout object for dendrogram figure.
"""
self.layout.update({
'showlegend': False,
'autosize': False,
'hovermode': 'closest',
'width': width,
'height': height
})
self.set_axis_layout(self.xaxis)
self.set_axis_layout(self.yaxis)
return self.layout
def get_dendrogram_traces(self, X, colorscale, distfun, linkagefun, threshold):
"""
Calculates all the elements needed for plotting a dendrogram.
:param (ndarray) X: Matrix of observations as array of arrays
:param (list) colorscale: Color scale for dendrogram tree clusters
:param (function) distfun: Function to compute the pairwise distance
from the observations
:param (function) linkagefun: Function to compute the linkage matrix
from the pairwise distances
:rtype (tuple): Contains all the traces in the following order:
(a) trace_list: List of Plotly trace objects for dendrogram tree
(b) icoord: All X points of the dendrogram tree as array of arrays
with length 4
(c) dcoord: All Y points of the dendrogram tree as array of arrays
with length 4
(d) ordered_labels: leaf labels in the order they are going to
appear on the plot
(e) P['leaves']: left-to-right traversal of the leaves
"""
d = distfun(X)
Z = linkagefun(d)
P = sch.dendrogram(Z, orientation=self.orientation, color_threshold=threshold,
labels=self.labels, no_plot=True)
icoord = scp.array(P['icoord'])
dcoord = scp.array(P['dcoord'])
ordered_labels = scp.array(P['ivl'])
color_list = scp.array(P['color_list'])
colors = self.get_color_dict(colorscale)
trace_list = []
for i in range(len(icoord)):
# xs and ys are arrays of 4 points that make up the '∩' shapes
# of the dendrogram tree
if self.orientation in ['top', 'bottom']:
xs = icoord[i]
else:
xs = dcoord[i]
if self.orientation in ['top', 'bottom']:
ys = dcoord[i]
else:
ys = icoord[i]
color_key = color_list[i]
trace = graph_objs.Scatter(
x=np.multiply(self.sign[self.xaxis], xs),
y=np.multiply(self.sign[self.yaxis], ys),
mode='lines',
marker=graph_objs.Marker(color=colors[color_key])
)
try:
x_index = int(self.xaxis[-1])
except ValueError:
x_index = ''
try:
y_index = int(self.yaxis[-1])
except ValueError:
y_index = ''
trace['xaxis'] = 'x' + x_index
trace['yaxis'] = 'y' + y_index
trace_list.append(trace)
return trace_list, icoord, dcoord, ordered_labels, P['leaves']
| gpl-3.0 |
Vimos/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 94 | 10801 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
def test_same_multiple_output_sparse_dense():
for normalize in [True, False]:
l = ElasticNet(normalize=normalize)
X = [[0, 1, 2, 3, 4],
[0, 2, 5, 8, 11],
[9, 10, 11, 12, 13],
[10, 11, 12, 13, 14]]
y = [[1, 2, 3, 4, 5],
[1, 3, 6, 9, 12],
[10, 11, 12, 13, 14],
[11, 12, 13, 14, 15]]
ignore_warnings(l.fit)(X, y)
sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1)
predict_dense = l.predict(sample)
l_sp = ElasticNet(normalize=normalize)
X_sp = sp.coo_matrix(X)
ignore_warnings(l_sp.fit)(X_sp, y)
sample_sparse = sp.coo_matrix(sample)
predict_sparse = l_sp.predict(sample_sparse)
assert_array_almost_equal(predict_sparse, predict_dense)
| bsd-3-clause |
adamlek/distributionalsemantics | tester.py | 1 | 6716 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 25 18:51:47 2016
@author: Adam Ek
"""
from WSM import DataReader
from WSM import RandomVectorizer
from WSM import TermRelevance
from WSM import Contexter
from WSM import Similarity
from WSM import DataOptions
import tsne
import matplotlib.pyplot as plt
import numpy as np
import csv
import scipy.stats as st
def main():
# load_oldsave()
new_save()
def load_oldsave():
do = DataOptions()
vectors, documents, data_info = do.load('tester')
sim = Similarity(vectors)
words1 = ['the', 'when', 'speak', 'run', 'high', 'flow', 'love', 'he']
words2 = ['and', 'where', 'talk', 'walk', 'low', 'water', 'hate', 'she']
for w1, w2 in zip(words1, words2):
print(w1, w2)
s = sim.cosine_similarity(w1, w2)
print(s)
def new_save():
plotting = False
teststuff = True
save = False
# dataset1 = ['/home/usr1/git/dist_data/test_doc_5.txt']
# dataset1 = ['/home/usr1/git/dist_data/austen-emma.txt', '/home/usr1/git/dist_data/austen-persuasion.txt', '/home/usr1/git/dist_data/austen-sense.txt']
# dataset1 = ['/home/usr1/git/dist_data/reut1.txt', '/home/usr1/git/dist_data/reut2.txt']
# dataset1 = ['/home/usr1/git/dist_data/formatted2.txt', '/home/usr1/git/dist_data/reut1.txt', '/home/usr1/git/dist_data/reut2.txt']
dataset1 = ['/home/usr1/git/dist_data/form2.txt']
#DATAREADER
##################################################
dr = DataReader(docsentences=False)
#read the file dataset1 and output all sentences, all words, and information about word count/documents
sentences, vocabulary, documents = dr.preprocess_data(dataset1)
# for sent in sentences:
# print(sent, '\n')
print('sentences:\t', len(sentences))
t = 0
for v in documents:
t += sum(documents[v].values())
print('total tokens:\t', t)
print('total types:\t', len(vocabulary))
print('reading file done\n')
##################################################
#SETTINGS
w, t, s = 1, 0, 2
d, r = 1024, 6
si1, si2, si3 = 1, 5, 10
print('weighting:\t', w, t, s)
print('vectors: \t', d, r)
print('sizes: \t', si1, si2, si3)
##RANDOMVECTORIZER
###################################################
rv = RandomVectorizer(dimensions=d, random_elements=r)
#create word and random vectors for the strings in vocabulary
vectors = rv.vocabulary_vectorizer(vocabulary)
print('Vectors done')
###################################################
#
##WEIGHTER
###################################################
#init Weighter, with scheme 0 and don't do idf
tr = TermRelevance(documents, scheme=w, doidf=False, smooth_idf=False)
#weight the dictionary of vectors
vectors = tr.weight(vectors)
print('Weights done')
for v in tr.word_weights:
for d in documents:
cv = documents[d][v]
print(v, tr.word_weights[v], cv)
##################################################
#
##CONTEXTER
##################################################
#Init Contexter
cont1 = Contexter(vectors, contexttype=t, window=si1, context_scope=s)
# cont5 = Contexter(vectors, contexttype=t, window=si2, context_scope=s)
# cont10 = Contexter(vectors, contexttype=t, window=si3, context_scope=s)
vector_vocabulary1 = cont1.process_data(sentences)
# vector_vocabulary5 = cont5.process_data(sentences)
# vector_vocabulary10 = cont10.process_data(sentences)
#poor computah :()
print('Reading contexts done')
# cont_dict1 = cont1.vocabt
# cont_dict5 = cont5.vocabt
# cont_dict10 = cont10.vocabt
# ###PPMI of co-occurence
# xpmi1 = cont1.PPMImatrix(cont_dict1, documents)
# xpmi5 = cont5.PPMImatrix(cont_dict5, documents)
# xpmi10 = cont10.PPMImatrix(cont_dict10, documents)
###################################################
#
##DATAOPTIONS
###################################################
# # initialize DataOptions for saving/extracting information
# do = DataOptions()
# # save the data
# do.save('tester', vector_vocabulary5, documents, cont5.data_info, wgt.weight_setup)
###################################################
##SIMILARITY
###################################################
#Initialize similarity class
# sim1 = Similarity(vector_vocabulary1, pmi = xpmi1)
# sim5 = Similarity(vector_vocabulary5, pmi = xpmi5)
# sim10 = Similarity(vector_vocabulary10, pmi = xpmi10)
sim1 = Similarity(vector_vocabulary1)
# sim5 = Similarity(vector_vocabulary5)
# sim10 = Similarity(vector_vocabulary10)
if teststuff:
humanv = []
riv1 = []
riv5 = []
riv10 = []
with open('/home/usr1/git/dist_data/combined.csv') as f:
for i, ln in enumerate(f):
ln = ln.lower().rstrip().split(',')
try:
riv1.append(float(sim1.cosine_similarity(ln[0], ln[1])))
# riv5.append(float(sim5.cosine_similarity(ln[0], ln[1])))
# riv10.append(float(sim10.cosine_similarity(ln[0], ln[1])))
humanv.append(float(ln[2]))
except Exception as e:
continue
print(len(humanv), len(riv1), len(riv5), len(riv10))
print(st.stats.spearmanr(humanv, riv1))
print('pearson r, p-val', st.pearsonr(humanv,riv1), si1, '\n')
# print(st.stats.spearmanr(humanv, riv5))
# print('pearson r, p-val', st.pearsonr(humanv,riv5), si2, '\n')
# print(st.stats.spearmanr(humanv, riv10))
# print('pearson r, p-val', st.pearsonr(humanv,riv10), si3, '\n')
#PEARSON SPEARMAN TESTING
#################################################
# if save:
# with open('/home/usr1/git/dist_data/combined1.csv') as f:
# csv_w = csv.writer(f, delimiter=',')
# for i, v in humanv:
# scv_w.writerow(v, riv1[i], riv5[i], riv10[i])
#TSNE PLOTTING
##################################################
if plotting:
ar = []
lbs = []
for i, v in enumerate(vector_vocabulary1):
if i%100 == 0:
ar.append(vector_vocabulary1[v])
lbs.append(v)
Y = tsne.tsne(np.array(ar), 2, 50, 20.0)
fig, ax = plt.subplots()
ax.scatter(Y[:,0], Y[:,1], 20)
for i, name in enumerate(lbs):
ax.annotate(name, (Y[i][0], Y[i][1]))
plt.show()
if __name__ == '__main__':
main() | mit |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/scipy/ndimage/filters.py | 24 | 42327 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
from . import _ni_support
from . import _nd_image
from scipy.misc import doccer
from scipy._lib._version import NumpyVersion
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
_input_doc = \
"""input : array_like
Input array to filter."""
_axis_doc = \
"""axis : int, optional
The axis of `input` along which to calculate. Default is -1."""
_output_doc = \
"""output : array, optional
The `output` parameter passes an array in which to store the
filter output."""
_size_foot_doc = \
"""size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
"""
_mode_doc = \
"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'"""
_cval_doc = \
"""cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0"""
_origin_doc = \
"""origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0.0."""
_extra_arguments_doc = \
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function"""
_extra_keywords_doc = \
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function"""
docdict = {
'input': _input_doc,
'axis': _axis_doc,
'output': _output_doc,
'size_foot': _size_foot_doc,
'mode': _mode_doc,
'cval': _cval_doc,
'origin': _origin_doc,
'extra_arguments': _extra_arguments_doc,
'extra_keywords': _extra_keywords_doc,
}
docfiller = doccer.filldoc(docdict)
@docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if (len(weights) // 2 + origin < 0) or (len(weights) // 2 +
origin > len(weights)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return return_value
@docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
@docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : {0, 1, 2, 3}, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. An order of 1, 2, or 3 corresponds to convolution with
the first, second or third derivatives of a Gaussian. Higher
order derivatives are not implemented
%(output)s
%(mode)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
"""
if order not in range(4):
raise ValueError('Order outside 0..3 not implemented')
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd = sd * sd
# calculate the kernel:
for ii in range(1, lw + 1):
tmp = math.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
# implement first, second and third order derivatives:
if order == 1: # first derivative
weights[lw] = 0.0
for ii in range(1, lw + 1):
x = float(ii)
tmp = -x / sd * weights[lw + ii]
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
elif order == 2: # second derivative
weights[lw] *= -1.0 / sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd
weights[lw + ii] = tmp
weights[lw - ii] = tmp
elif order == 3: # third derivative
weights[lw] = 0.0
sd2 = sd * sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
return correlate1d(input, weights, axis, output, mode, cval, 0)
@docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : {0, 1, 2, 3} or sequence from same set, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. An order of 1, 2, or 3
corresponds to convolution with the first, second or third
derivatives of a Gaussian. Higher order derivatives are not
implemented
%(output)s
%(mode)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> from scipy.ndimage import gaussian_filter
>>> a = np.arange(50, step=2).reshape((5,5))
>>> a
array([[ 0, 2, 4, 6, 8],
[10, 12, 14, 16, 18],
[20, 22, 24, 26, 28],
[30, 32, 34, 36, 38],
[40, 42, 44, 46, 48]])
>>> gaussian_filter(a, sigma=1)
array([[ 4, 6, 8, 9, 11],
[10, 12, 14, 15, 17],
[20, 22, 24, 25, 27],
[29, 31, 33, 34, 36],
[35, 37, 39, 40, 42]])
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
if not set(orders).issubset(set(range(4))):
raise ValueError('Order outside 0..4 not implemented')
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> result = ndimage.prewitt(ascent)
>>> plt.gray() # show the filtered result in grayscale
>>> plt.imshow(result)
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, mode, cval, 0,)
return return_value
@docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> plt.gray() # show the filtered result in grayscale
>>> plt.imshow(result)
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, mode, cval, 0)
return return_value
@docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords = None):
"""N-dimensional Laplace filter using a provided second derivative function
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
derivative2(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return return_value
@docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-dimensional Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> result = ndimage.laplace(ascent)
>>> plt.gray() # show the filtered result in grayscale
>>> plt.imshow(result)
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> result = ndimage.gaussian_laplace(ascent, sigma=1)
>>> ax1.imshow(result)
>>> result = ndimage.gaussian_laplace(ascent, sigma=3)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords = None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
derivative(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return return_value
@docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return return_value
@docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
input : array-like
input array to filter
weights : ndarray
array of weights, same number of dimensions as input
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
input : array_like
Input array to filter.
weights : array_like
Array of weights, same number of dimensions as input
output : ndarray, optional
The `output` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
the `mode` parameter determines how the array borders are
handled. For 'constant' mode, values beyond borders are set to be
`cval`. Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : array_like, optional
The `origin` parameter controls the placement of the filter,
relative to the centre of the current element of the input.
Default of 0 is equivalent to ``(0,)*input.ndim``.
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return return_value
@docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return return_value
@docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return return_value
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
if numpy.alltrue(numpy.ravel(footprint), axis=0):
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return return_value
@docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return return_value
@docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculates a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
median_filter : ndarray
Return of same shape as `input`.
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords = None):
"""Calculate a one-dimensional filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : callable
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments, extra_keywords)
return return_value
@docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords = None):
"""Calculates a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : callable
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return return_value
| gpl-3.0 |
scott-maddox/openbandparams | src/openbandparams/examples/Plot_Conduction_Band_Offset_vs_Lattice_Constant.py | 1 | 3735 | #
# Copyright (c) 2013-2014, Scott J Maddox
#
# This file is part of openbandparams.
#
# openbandparams is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# openbandparams is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with openbandparams. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
# Make sure we import the local openbandparams version
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from openbandparams import *
import matplotlib.pyplot as plt
import numpy
T = 300
T_lattice = 300
# initialize the plot
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlabel('Lattice Parameter at %g K ($\AA$)' % T_lattice)
plt.ylabel('Conduction Band Offsets at %g K (eV)' % T)
# Define colors
red = '#FE0303'
green = '#04A004'
blue = '#0404FF'
red_green = '#8D8D04'
red_blue = '#8D048D'
green_blue = '#04AEAE'
# list the binaries
phosphide_binaries = [AlP, GaP, InP] # red
arsenide_binaries = [AlAs, GaAs, InAs] # green
antimonide_binaries = [AlSb, GaSb, InSb] # blue
# list the ternaries
phosphide_ternaries = [AlGaP, AlInP, GaInP] # red
arsenide_ternaries = [AlGaAs, AlInAs, GaInAs] # green
antimonide_ternaries = [AlGaSb, AlInSb, GaInSb] # blue
phosphide_arsenide_ternaries = [AlPAs, GaPAs, InPAs] # red + green
phosphide_antimonide_ternaries = [AlPSb, GaPSb, InPSb] # red + blue
arsenide_antimonide_ternaries = [AlAsSb, GaAsSb, InAsSb] # green + blue
# plot the ternaries
fractions = numpy.linspace(0, 1, 1000)
for ternaries, color in [(phosphide_ternaries, red),
(arsenide_ternaries, green),
(antimonide_ternaries, blue),
(phosphide_arsenide_ternaries, red_green),
(phosphide_antimonide_ternaries, red_blue),
(arsenide_antimonide_ternaries, green_blue)]:
for ternary in ternaries:
ax.plot([ternary(x=f).a(T=T_lattice) for f in fractions],
[ternary(x=f).VBO(T=T) + ternary(x=f).Eg(T=T)
for f in fractions],
color=color,
linewidth=1.2)
# plot and label the binaries
x = []
y = []
label = []
for binaries, color in [(phosphide_binaries, red),
(arsenide_binaries, green),
(antimonide_binaries, blue)]:
ax.plot([b.a(T=T_lattice) for b in binaries],
[b.VBO(T=T) + b.Eg(T=T) for b in binaries],
color=color,
linestyle=' ',
marker='o',
markersize=4,
markeredgecolor=color)
x.extend([b.a(T=T_lattice) for b in binaries])
y.extend([b.VBO(T=T) + b.Eg(T=T) for b in binaries])
label.extend([b.name for b in binaries])
for x, y, label in zip(x, y, label):
ax.annotate(label, xy=(x, y), xytext=(-5, 5), ha='right', va='bottom',
bbox=dict(linewidth=0, fc='white', alpha=0.9),
textcoords='offset points')
xmin, xmax = plt.xlim()
plt.xlim(xmin - 0.05, xmax)
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
output_filename = sys.argv[1]
plt.savefig(output_filename)
else:
plt.show() | agpl-3.0 |
rs2/pandas | pandas/tests/series/test_cumulative.py | 7 | 5549 | """
Tests for Series cumulative operations.
See also
--------
tests.frame.test_cumulative
"""
from itertools import product
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
def _check_accum_op(name, series, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(series).values, func(np.array(series)), check_dtype=check_dtype
)
# with missing values
ts = series.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
class TestSeriesCumulativeOps:
def test_cumsum(self, datetime_series):
_check_accum_op("cumsum", datetime_series)
def test_cumprod(self, datetime_series):
_check_accum_op("cumprod", datetime_series)
def test_cummin(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummin().values,
np.minimum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.dropna())
result.index = result.index._with_freq(None)
tm.assert_series_equal(result, expected)
def test_cummax(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummax().values,
np.maximum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.dropna())
result.index = result.index._with_freq(None)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummin_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-1", "2000-1-1", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummax_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-2", "NaT", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "1 min"])
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "1 min", "1 min", "1 min"])
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "2 min", "NaT", "3 min"])
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "2 min", "2 min", "3 min"])
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummethods_bool(self):
# GH#6270
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"cummin": np.minimum.accumulate,
"cummax": np.maximum.accumulate,
}
args = product((a, b, c, d), methods)
for s, method in args:
expected = pd.Series(methods[method](s.values))
result = getattr(s, method)()
tm.assert_series_equal(result, expected)
e = pd.Series([False, True, np.nan, False])
cse = pd.Series([0, 1, np.nan, 1], dtype=object)
cpe = pd.Series([False, 0, np.nan, 0])
cmin = pd.Series([False, False, np.nan, False])
cmax = pd.Series([False, True, np.nan, True])
expecteds = {"cumsum": cse, "cumprod": cpe, "cummin": cmin, "cummax": cmax}
for method in methods:
res = getattr(e, method)()
tm.assert_series_equal(res, expecteds[method])
| bsd-3-clause |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/matplotlib/blocking_input.py | 11 | 11636 | """
This provides several classes used for blocking interaction with figure
windows:
:class:`BlockingInput`
creates a callable object to retrieve events in a blocking way for
interactive sessions
:class:`BlockingKeyMouseInput`
creates a callable object to retrieve key or mouse clicks in a blocking
way for interactive sessions.
Note: Subclass of BlockingInput. Used by waitforbuttonpress
:class:`BlockingMouseInput`
creates a callable object to retrieve mouse clicks in a blocking way for
interactive sessions.
Note: Subclass of BlockingInput. Used by ginput
:class:`BlockingContourLabeler`
creates a callable object to retrieve mouse clicks in a blocking way that
will then be used to place labels on a ContourSet
Note: Subclass of BlockingMouseInput. Used by clabel
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import verbose
from matplotlib.cbook import is_sequence_of_strings
import matplotlib.lines as mlines
class BlockingInput(object):
"""
Class that creates a callable object to retrieve events in a
blocking way.
"""
def __init__(self, fig, eventslist=()):
self.fig = fig
assert is_sequence_of_strings(
eventslist), "Requires a sequence of event name strings"
self.eventslist = eventslist
def on_event(self, event):
"""
Event handler that will be passed to the current figure to
retrieve events.
"""
# Add a new event to list - using a separate function is
# overkill for the base class, but this is consistent with
# subclasses
self.add_event(event)
verbose.report("Event %i" % len(self.events))
# This will extract info from events
self.post_event()
# Check if we have enough events already
if len(self.events) >= self.n and self.n > 0:
self.fig.canvas.stop_event_loop()
def post_event(self):
"""For baseclass, do nothing but collect events"""
pass
def cleanup(self):
"""Disconnect all callbacks"""
for cb in self.callbacks:
self.fig.canvas.mpl_disconnect(cb)
self.callbacks = []
def add_event(self, event):
"""For base class, this just appends an event to events."""
self.events.append(event)
def pop_event(self, index=-1):
"""
This removes an event from the event list. Defaults to
removing last event, but an index can be supplied. Note that
this does not check that there are events, much like the
normal pop method. If not events exist, this will throw an
exception.
"""
self.events.pop(index)
def pop(self, index=-1):
self.pop_event(index)
pop.__doc__ = pop_event.__doc__
def __call__(self, n=1, timeout=30):
"""
Blocking call to retrieve n events
"""
assert isinstance(n, int), "Requires an integer argument"
self.n = n
self.events = []
self.callbacks = []
# Ensure that the figure is shown
self.fig.show()
# connect the events to the on_event function call
for n in self.eventslist:
self.callbacks.append(
self.fig.canvas.mpl_connect(n, self.on_event))
try:
# Start event loop
self.fig.canvas.start_event_loop(timeout=timeout)
finally: # Run even on exception like ctrl-c
# Disconnect the callbacks
self.cleanup()
# Return the events in this case
return self.events
class BlockingMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve mouse clicks in a
blocking way.
This class will also retrieve keyboard clicks and treat them like
appropriate mouse clicks (delete and backspace are like mouse button 3,
enter is like mouse button 2 and all others are like mouse button 1).
"""
button_add = 1
button_pop = 3
button_stop = 2
def __init__(self, fig, mouse_add=1, mouse_pop=3, mouse_stop=2):
BlockingInput.__init__(self, fig=fig,
eventslist=('button_press_event',
'key_press_event'))
self.button_add = mouse_add
self.button_pop = mouse_pop
self.button_stop = mouse_stop
def post_event(self):
"""
This will be called to process events
"""
assert len(self.events) > 0, "No events yet"
if self.events[-1].name == 'key_press_event':
self.key_event()
else:
self.mouse_event()
def mouse_event(self):
'''Process a mouse click event'''
event = self.events[-1]
button = event.button
if button == self.button_pop:
self.mouse_event_pop(event)
elif button == self.button_stop:
self.mouse_event_stop(event)
else:
self.mouse_event_add(event)
def key_event(self):
'''
Process a key click event. This maps certain keys to appropriate
mouse click events.
'''
event = self.events[-1]
if event.key is None:
# at least in mac os X gtk backend some key returns None.
return
key = event.key.lower()
if key in ['backspace', 'delete']:
self.mouse_event_pop(event)
elif key in ['escape', 'enter']:
# on windows XP and wxAgg, the enter key doesn't seem to register
self.mouse_event_stop(event)
else:
self.mouse_event_add(event)
def mouse_event_add(self, event):
"""
Will be called for any event involving a button other than
button 2 or 3. This will add a click if it is inside axes.
"""
if event.inaxes:
self.add_click(event)
else: # If not a valid click, remove from event list
BlockingInput.pop(self, -1)
def mouse_event_stop(self, event):
"""
Will be called for any event involving button 2.
Button 2 ends blocking input.
"""
# Remove last event just for cleanliness
BlockingInput.pop(self, -1)
# This will exit even if not in infinite mode. This is
# consistent with MATLAB and sometimes quite useful, but will
# require the user to test how many points were actually
# returned before using data.
self.fig.canvas.stop_event_loop()
def mouse_event_pop(self, event):
"""
Will be called for any event involving button 3.
Button 3 removes the last click.
"""
# Remove this last event
BlockingInput.pop(self, -1)
# Now remove any existing clicks if possible
if len(self.events) > 0:
self.pop(event, -1)
def add_click(self, event):
"""
This add the coordinates of an event to the list of clicks
"""
self.clicks.append((event.xdata, event.ydata))
verbose.report("input %i: %f,%f" %
(len(self.clicks), event.xdata, event.ydata))
# If desired plot up click
if self.show_clicks:
line = mlines.Line2D([event.xdata], [event.ydata],
marker='+', color='r')
event.inaxes.add_line(line)
self.marks.append(line)
self.fig.canvas.draw()
def pop_click(self, event, index=-1):
"""
This removes a click from the list of clicks. Defaults to
removing the last click.
"""
self.clicks.pop(index)
if self.show_clicks:
mark = self.marks.pop(index)
mark.remove()
self.fig.canvas.draw()
# NOTE: I do NOT understand why the above 3 lines does not work
# for the keyboard backspace event on windows XP wxAgg.
# maybe event.inaxes here is a COPY of the actual axes?
def pop(self, event, index=-1):
"""
This removes a click and the associated event from the object.
Defaults to removing the last click, but any index can be
supplied.
"""
self.pop_click(event, index)
BlockingInput.pop(self, index)
def cleanup(self, event=None):
# clean the figure
if self.show_clicks:
for mark in self.marks:
mark.remove()
self.marks = []
self.fig.canvas.draw()
# Call base class to remove callbacks
BlockingInput.cleanup(self)
def __call__(self, n=1, timeout=30, show_clicks=True):
"""
Blocking call to retrieve n coordinate pairs through mouse
clicks.
"""
self.show_clicks = show_clicks
self.clicks = []
self.marks = []
BlockingInput.__call__(self, n=n, timeout=timeout)
return self.clicks
class BlockingContourLabeler(BlockingMouseInput):
"""
Class that creates a callable object that uses mouse clicks or key
clicks on a figure window to place contour labels.
"""
def __init__(self, cs):
self.cs = cs
BlockingMouseInput.__init__(self, fig=cs.ax.figure)
def add_click(self, event):
self.button1(event)
def pop_click(self, event, index=-1):
self.button3(event)
def button1(self, event):
"""
This will be called if an event involving a button other than
2 or 3 occcurs. This will add a label to a contour.
"""
# Shorthand
if event.inaxes == self.cs.ax:
self.cs.add_label_near(event.x, event.y, self.inline,
inline_spacing=self.inline_spacing,
transform=False)
self.fig.canvas.draw()
else: # Remove event if not valid
BlockingInput.pop(self)
def button3(self, event):
"""
This will be called if button 3 is clicked. This will remove
a label if not in inline mode. Unfortunately, if one is doing
inline labels, then there is currently no way to fix the
broken contour - once humpty-dumpty is broken, he can't be put
back together. In inline mode, this does nothing.
"""
if self.inline:
pass
else:
self.cs.pop_label()
self.cs.ax.figure.canvas.draw()
def __call__(self, inline, inline_spacing=5, n=-1, timeout=-1):
self.inline = inline
self.inline_spacing = inline_spacing
BlockingMouseInput.__call__(self, n=n, timeout=timeout,
show_clicks=False)
class BlockingKeyMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve a single mouse or
keyboard click
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig, eventslist=(
'button_press_event', 'key_press_event'))
def post_event(self):
"""
Determines if it is a key event
"""
assert len(self.events) > 0, "No events yet"
self.keyormouse = self.events[-1].name == 'key_press_event'
def __call__(self, timeout=30):
"""
Blocking call to retrieve a single mouse or key click
Returns True if key click, False if mouse, or None if timeout
"""
self.keyormouse = None
BlockingInput.__call__(self, n=1, timeout=timeout)
return self.keyormouse
| mit |
kuleshov/deep-learning-models | models/dcgan.py | 1 | 9901 | import time
import pickle
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
import theano
import theano.tensor as T
import lasagne
from lasagne.layers import batch_norm
from model import Model
from helpers import *
# ----------------------------------------------------------------------------
class DCGAN(Model):
def __init__(self, n_dim, n_out, n_chan=1, n_superbatch=12800, model='bernoulli',
opt_alg='adam', opt_params={'lr' : 1e-3, 'b1': 0.9, 'b2': 0.99}):
# save model that wil be created
self.model = model
# create shared data variables
train_set_x = theano.shared(np.empty((n_superbatch, n_chan, n_dim, n_dim), dtype=theano.config.floatX), borrow=False)
val_set_x = theano.shared(np.empty((n_superbatch, n_chan, n_dim, n_dim), dtype=theano.config.floatX), borrow=False)
# create y-variables
train_set_y = theano.shared(np.empty((n_superbatch,), dtype=theano.config.floatX), borrow=False)
val_set_y = theano.shared(np.empty((n_superbatch,), dtype=theano.config.floatX), borrow=False)
train_set_y_int, val_set_y_int = T.cast(train_set_y, 'int32'), T.cast(val_set_y, 'int32')
# create input vars
X = T.tensor4(dtype=theano.config.floatX)
Z = T.matrix(dtype=theano.config.floatX)
idx1, idx2 = T.lscalar(), T.lscalar()
self.inputs = (X, Z, idx1, idx2)
# create lasagne model
self.network = self.create_model(X, Z, n_dim, n_out, n_chan)
l_g, l_d = self.network
# create objectives
loss_g, loss_d, p_real, p_fake = self.create_objectives(deterministic=False)
_, _, p_real_test, p_fake_test = self.create_objectives(deterministic=True)
# load params
params_g, params_d = self.get_params()
# create gradients
grads_g = theano.grad(loss_g, params_g)
grads_d = theano.grad(loss_d, params_d)
# create updates
alpha = T.scalar(dtype=theano.config.floatX) # adjustable learning rate
updates_g = self.create_updates(grads_g, params_g, alpha, opt_alg, opt_params)
updates_d = self.create_updates(grads_d, params_d, alpha, opt_alg, opt_params)
updates = OrderedDict(updates_g.items() + updates_d.items())
# create methods for training / prediction
self.train = theano.function([Z, idx1, idx2, alpha], [p_real, p_fake],
updates=updates, givens={X : train_set_x[idx1:idx2]})
self.loss = theano.function([X, Z], [p_real, p_fake])
self.loss_test = theano.function([X, Z], [p_real_test, p_fake_test])
self.gen = theano.function([Z], lasagne.layers.get_output(l_g, deterministic=True))
# save config
self.n_dim = n_dim
self.n_out = n_out
self.n_superbatch = n_superbatch
self.alg = opt_alg
# save data variables
self.train_set_x = train_set_x
self.train_set_y = train_set_y
self.val_set_x = val_set_x
self.val_set_y = val_set_y
self.data_loaded = False
# save neural network
self.params = self.get_params()
def fit(self, X_train, Y_train, X_val, Y_val, n_epoch=10, n_batch=100, logname='run'):
"""Train the model"""
alpha = 1.0 # learning rate, which can be adjusted later
n_data = len(X_train)
n_superbatch = self.n_superbatch
for epoch in range(n_epoch):
# In each epoch, we do a full pass over the training data:
train_batches, train_err, train_acc = 0, 0, 0
start_time = time.time()
if epoch >= n_epoch // 2:
progress = float(epoch) / n_epoch
alpha = 2*(1 - progress)
# iterate over superbatches to save time on GPU memory transfer
for X_sb, Y_sb in self.iterate_superbatches(X_train, Y_train, n_superbatch, datatype='train', shuffle=True):
for idx1, idx2 in iterate_minibatch_idx(len(X_sb), n_batch):
noise = lasagne.utils.floatX(np.random.rand(n_batch, 100))
p_real, p_fake = self.train(noise, idx1, idx2, alpha)
# collect metrics
train_batches += 1
train_err += p_real
train_acc += p_fake
if train_batches % 5 == 0:
n_total = epoch * n_data + n_batch * train_batches
metrics = [n_total, train_err / train_batches, train_acc / train_batches]
log_metrics(logname, metrics)
samples = self.gen(lasagne.utils.floatX(np.random.rand(42, 100)))
plt.imsave('mnist_samples.png',
(samples.reshape(6, 7, 28, 28)
.transpose(0, 2, 1, 3)
.reshape(6*28, 7*28)),
cmap='gray')
print "Epoch {} of {} took {:.3f}s ({} minibatches)".format(
epoch + 1, n_epoch, time.time() - start_time, train_batches)
# make a full pass over the training data and record metrics:
Z_train = lasagne.utils.floatX(np.random.rand(len(X_train), 100))
Z_val = lasagne.utils.floatX(np.random.rand(len(X_val), 100))
# train_err, train_acc = evaluate(self.loss, X_train, Z_train, batchsize=1000)
train_err /= train_batches
train_acc /= train_batches
val_err, val_acc = evaluate(self.loss_test, X_val, Z_val, batchsize=1000)
print " training loss/acc:\t\t{:.6f}\t{:.6f}".format(train_err, train_acc)
print " validation loss/acc:\t\t{:.6f}\t{:.6f}".format(val_err, val_acc)
metrics = [ epoch, train_err, train_acc, val_err, val_acc ]
log_metrics(logname + '.val', metrics)
def create_model(self, X, Z, n_dim, n_out, n_chan=1):
# params
n_lat = 100 # latent variables
n_g_hid1 = 1024 # size of hidden layer in generator layer 1
n_g_hid2 = 128 # size of hidden layer in generator layer 2
n_out = n_dim * n_dim * n_chan # total dimensionality of output
if self.model == 'gaussian':
raise Exception('Gaussian variables currently nor supported in GAN')
# create the generator network
l_g_in = lasagne.layers.InputLayer(shape=(None, n_lat), input_var=Z)
l_g_hid1 = batch_norm(lasagne.layers.DenseLayer(l_g_in, n_g_hid1))
l_g_hid2 = batch_norm(lasagne.layers.DenseLayer(l_g_hid1, n_g_hid2*7*7))
l_g_hid2 = lasagne.layers.ReshapeLayer(l_g_hid2, ([0], n_g_hid2, 7, 7))
l_g_dc1 = batch_norm(Deconv2DLayer(l_g_hid2, 64, 5, stride=2, pad=2))
l_g = Deconv2DLayer(l_g_dc1, n_chan, 5, stride=2, pad=2,
nonlinearity=lasagne.nonlinearities.sigmoid)
print ("Generator output:", l_g.output_shape)
# create the discriminator network
lrelu = lasagne.nonlinearities.LeakyRectify(0.2)
l_d_in = lasagne.layers.InputLayer(shape=(None, n_chan, n_dim, n_dim),
input_var=X)
l_d_hid1 = batch_norm(lasagne.layers.Conv2DLayer(
l_d_in, num_filters=64, filter_size=5, stride=2, pad=2,
nonlinearity=lrelu, name='l_d_hid1'))
l_d_hid2 = batch_norm(lasagne.layers.Conv2DLayer(
l_d_hid1, num_filters=128, filter_size=5, stride=2, pad=2,
nonlinearity=lrelu, name='l_d_hid2'))
l_d_hid3 = batch_norm(lasagne.layers.DenseLayer(l_d_hid2, 1024, nonlinearity=lrelu))
l_d = lasagne.layers.DenseLayer(l_d_hid3, 1, nonlinearity=lasagne.nonlinearities.sigmoid)
print ("Discriminator output:", l_d.output_shape)
return l_g, l_d
def create_objectives(self, deterministic=False):
# load network
l_g, l_d = self.network
# load ouput
g = lasagne.layers.get_output(l_g, deterministic=deterministic)
d_real = lasagne.layers.get_output(l_d, deterministic=deterministic)
d_fake = lasagne.layers.get_output(l_d, g, deterministic=deterministic)
# define loss
loss_g = lasagne.objectives.binary_crossentropy(d_fake, 1).mean()
loss_d = ( lasagne.objectives.binary_crossentropy(d_real, 1)
+ lasagne.objectives.binary_crossentropy(d_fake, 0) ).mean()
# compute and store discriminator probabilities
p_real = (d_real > 0.5).mean()
p_fake = (d_fake < 0.5).mean()
return loss_g, loss_d, p_real, p_fake
def get_params(self):
l_g, l_d = self.network
params_g = lasagne.layers.get_all_params(l_g, trainable=True)
params_d = lasagne.layers.get_all_params(l_d, trainable=True)
return params_g, params_d
class Deconv2DLayer(lasagne.layers.Layer):
def __init__(self, incoming, num_filters, filter_size, stride=1, pad=0,
nonlinearity=lasagne.nonlinearities.rectify, **kwargs):
super(Deconv2DLayer, self).__init__(incoming, **kwargs)
self.num_filters = num_filters
self.filter_size = lasagne.utils.as_tuple(filter_size, 2, int)
self.stride = lasagne.utils.as_tuple(stride, 2, int)
self.pad = lasagne.utils.as_tuple(pad, 2, int)
self.W = self.add_param(lasagne.init.Orthogonal(),
(self.input_shape[1], num_filters) + self.filter_size,
name='W')
self.b = self.add_param(lasagne.init.Constant(0),
(num_filters,),
name='b')
if nonlinearity is None:
nonlinearity = lasagne.nonlinearities.identity
self.nonlinearity = nonlinearity
def get_output_shape_for(self, input_shape):
shape = tuple(i*s - 2*p + f - 1
for i, s, p, f in zip(input_shape[2:],
self.stride,
self.pad,
self.filter_size))
return (input_shape[0], self.num_filters) + shape
def get_output_for(self, input, **kwargs):
op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(
imshp=self.output_shape,
kshp=(self.input_shape[1], self.num_filters) + self.filter_size,
subsample=self.stride, border_mode=self.pad)
conved = op(self.W, input, self.output_shape[2:])
if self.b is not None:
conved += self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(conved) | mit |
luoyetx/mxnet | example/gluon/kaggle_k_fold_cross_validation.py | 26 | 6854 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This example provides an end-to-end pipeline for a common Kaggle competition.
# The entire pipeline includes common utilities such as k-fold cross validation
# and data pre-processing.
#
# Specifically, the example studies the `House Prices: Advanced Regression
# Techniques` challenge as a case study.
#
# The link to the problem on Kaggle:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques
import numpy as np
import pandas as pd
from mxnet import autograd
from mxnet import gluon
from mxnet import ndarray as nd
# After logging in www.kaggle.com, the training and testing data sets can be downloaded at:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/train.csv
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/test.csv
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
all_X = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'],
test.loc[:, 'MSSubClass':'SaleCondition']))
# Get all the numerical features and apply standardization.
numeric_feas = all_X.dtypes[all_X.dtypes != "object"].index
all_X[numeric_feas] = all_X[numeric_feas].apply(lambda x:
(x - x.mean()) / (x.std()))
# Convert categorical feature values to numerical (including N/A).
all_X = pd.get_dummies(all_X, dummy_na=True)
# Approximate N/A feature value by the mean value of the current feature.
all_X = all_X.fillna(all_X.mean())
num_train = train.shape[0]
# Convert data formats to NDArrays to feed into gluon.
X_train = all_X[:num_train].as_matrix()
X_test = all_X[num_train:].as_matrix()
y_train = train.SalePrice.as_matrix()
X_train = nd.array(X_train)
y_train = nd.array(y_train)
y_train.reshape((num_train, 1))
X_test = nd.array(X_test)
square_loss = gluon.loss.L2Loss()
def get_rmse_log(net, X_train, y_train):
"""Gets root mse between the logarithms of the prediction and the truth."""
num_train = X_train.shape[0]
clipped_preds = nd.clip(net(X_train), 1, float('inf'))
return np.sqrt(2 * nd.sum(square_loss(
nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
def get_net():
"""Gets a neural network. Better results are obtained with modifications."""
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(50, activation="relu"))
net.add(gluon.nn.Dense(1))
net.initialize()
return net
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size):
"""Trains the model."""
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size,
shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': learning_rate,
'wd': weight_decay})
net.initialize(force_reinit=True)
for epoch in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
avg_loss = get_rmse_log(net, X_train, y_train)
if epoch > verbose_epoch:
print("Epoch %d, train loss: %f" % (epoch, avg_loss))
return avg_loss
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size):
"""Conducts k-fold cross validation for the model."""
assert k > 1
fold_size = X_train.shape[0] // k
train_loss_sum = 0.0
test_loss_sum = 0.0
for test_idx in range(k):
X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *
fold_size, :]
y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]
val_train_defined = False
for i in range(k):
if i != test_idx:
X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
if not val_train_defined:
X_val_train = X_cur_fold
y_val_train = y_cur_fold
val_train_defined = True
else:
X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
net = get_net()
train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,
learning_rate, weight_decay, batch_size)
train_loss_sum += train_loss
test_loss = get_rmse_log(net, X_val_test, y_val_test)
print("Test loss: %f" % test_loss)
test_loss_sum += test_loss
return train_loss_sum / k, test_loss_sum / k
# The sets of parameters. Better results are obtained with modifications.
# These parameters can be fine-tuned with k-fold cross-validation.
k = 5
epochs = 100
verbose_epoch = 95
learning_rate = 0.3
weight_decay = 100
batch_size = 100
train_loss, test_loss = \
k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size)
print("%d-fold validation: Avg train loss: %f, Avg test loss: %f" %
(k, train_loss, test_loss))
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size):
"""Trains the model and predicts on the test data set."""
net = get_net()
_ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size)
| apache-2.0 |
alexandonian/lightning | lightning_fuse/tsne.py | 1 | 5780 | #
# tsne.py
#
# Implementation of t-SNE in Python. The implementation was tested on Python 2.7.10, and it requires a working
# installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the
# results of this example, a working installation of matplotlib is required.
#
# The example can be run by executing: `ipython tsne.py`
#
#
# Created by Laurens van der Maaten on 20-12-08.
# Copyright (c) 2008 Tilburg University. All rights reserved.
import numpy as Math
import pylab as Plot
def Hbeta(D=Math.array([]), beta=1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = Math.exp(-D.copy() * beta)
sumP = sum(P)
H = Math.log(sumP) + beta * Math.sum(D * P) / sumP
P = P / sumP
return H, P
def x2p(X=Math.array([]), tol=1e-5, perplexity=30.0):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""
# Initialize some variables
print "Computing pairwise distances..."
(n, d) = X.shape
sum_X = Math.sum(Math.square(X), 1)
D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X)
P = Math.zeros((n, n))
beta = Math.ones((n, 1))
logU = Math.log(perplexity)
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print "Computing P-values for point ", i, " of ", n, "..."
# Compute the Gaussian kernel and entropy for the current precision
betamin = -Math.inf
betamax = Math.inf
Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i + 1:n]))]
(H, thisP) = Hbeta(Di, beta[i])
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU
tries = 0
while Math.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i].copy()
if betamax == Math.inf or betamax == -Math.inf:
beta[i] = beta[i] * 2
else:
beta[i] = (beta[i] + betamax) / 2
else:
betamax = beta[i].copy()
if betamin == Math.inf or betamin == -Math.inf:
beta[i] = beta[i] / 2
else:
beta[i] = (beta[i] + betamin) / 2
# Recompute the values
(H, thisP) = Hbeta(Di, beta[i])
Hdiff = H - logU
tries = tries + 1
# Set the final row of P
P[i, Math.concatenate((Math.r_[0:i], Math.r_[i + 1:n]))] = thisP
# Return final P-matrix
print "Mean value of sigma: ", Math.mean(Math.sqrt(1 / beta))
return P
def pca(X=Math.array([]), no_dims=50):
"""Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions."""
print "Preprocessing the data using PCA..."
(n, d) = X.shape
X = X - Math.tile(Math.mean(X, 0), (n, 1))
(l, M) = Math.linalg.eig(Math.dot(X.T, X))
Y = Math.dot(X, M[:, 0:no_dims])
return Y
def tsne(X=Math.array([]), no_dims=2, initial_dims=50, perplexity=30.0):
"""Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
The syntax of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""
# Check inputs
if isinstance(no_dims, float):
print "Error: array X should have type float."
return -1
if round(no_dims) != no_dims:
print "Error: number of dimensions should be an integer."
return -1
# Initialize variables
X = pca(X, initial_dims).real
(n, d) = X.shape
max_iter = 1000
initial_momentum = 0.5
final_momentum = 0.8
eta = 500
min_gain = 0.01
Y = Math.random.randn(n, no_dims)
dY = Math.zeros((n, no_dims))
iY = Math.zeros((n, no_dims))
gains = Math.ones((n, no_dims))
# Compute P-values
P = x2p(X, 1e-5, perplexity)
P = P + Math.transpose(P)
P = P / Math.sum(P)
P = P * 4; # early exaggeration
P = Math.maximum(P, 1e-12)
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = Math.sum(Math.square(Y), 1)
num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y))
num[range(n), range(n)] = 0
Q = num / Math.sum(num)
Q = Math.maximum(Q, 1e-12)
# Compute gradient
PQ = P - Q
for i in range(n):
dY[i, :] = Math.sum(Math.tile(PQ[:, i] * num[:, i], (no_dims, 1)).T * (Y[i, :] - Y), 0)
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0))
gains[gains < min_gain] = min_gain
iY = momentum * iY - eta * (gains * dY)
Y = Y + iY
Y = Y - Math.tile(Math.mean(Y, 0), (n, 1))
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = Math.sum(P * Math.log(P / Q))
print "Iteration ", (iter + 1), ": error is ", C
# Stop lying about P-values
if iter == 100:
P = P / 4
# Return solution
return Y
if __name__ == "__main__":
print "Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset."
print "Running example on 2,500 MNIST digits..."
X = Math.loadtxt("mnist2500_X.txt");
labels = Math.loadtxt("mnist2500_labels.txt");
Y = tsne(X, 2, 50, 20.0);
Plot.scatter(Y[:, 0], Y[:, 1], 20, labels);
Plot.show();
| apache-2.0 |
bmcfee/librosa | librosa/version.py | 2 | 1389 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Version info"""
import sys
import importlib
short_version = "0.8"
version = "0.8.1"
def __get_mod_version(modname):
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
try:
return mod.__version__
except AttributeError:
return "installed, no version number available"
except ImportError:
return None
def show_versions():
"""Return the version information for all librosa dependencies."""
core_deps = [
"audioread",
"numpy",
"scipy",
"sklearn",
"joblib",
"decorator",
"soundfile",
"resampy",
"numba",
]
extra_deps = [
"numpydoc",
"sphinx",
"sphinx_rtd_theme",
"sphinxcontrib.versioning",
"sphinx-gallery",
"pytest",
"pytest-mpl",
"pytest-cov",
"matplotlib",
"presets",
]
print("INSTALLED VERSIONS")
print("------------------")
print("python: {}\n".format(sys.version))
print("librosa: {}\n".format(version))
for dep in core_deps:
print("{}: {}".format(dep, __get_mod_version(dep)))
print("")
for dep in extra_deps:
print("{}: {}".format(dep, __get_mod_version(dep)))
| isc |
OshynSong/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
cloud-fan/spark | python/pyspark/pandas/utils.py | 3 | 33908 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Commonly used utils in pandas-on-Spark.
"""
import functools
from collections import OrderedDict
from contextlib import contextmanager
import os
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
TYPE_CHECKING,
cast,
no_type_check,
overload,
)
import warnings
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.types import DoubleType
import pandas as pd
from pandas.api.types import is_list_like
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas.typedef.typehints import as_spark_type
if TYPE_CHECKING:
# This is required in old Python 3.5 to prevent circular reference.
from pyspark.pandas.base import IndexOpsMixin # noqa: F401 (SPARK-34943)
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.internal import InternalFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
ERROR_MESSAGE_CANNOT_COMBINE = (
"Cannot combine the series or dataframe because it comes from a different dataframe. "
"In order to allow this operation, enable 'compute.ops_on_diff_frames' option."
)
SPARK_CONF_ARROW_ENABLED = "spark.sql.execution.arrow.pyspark.enabled"
def same_anchor(
this: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
that: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
) -> bool:
"""
Check if the anchors of the given DataFrame or Series are the same or not.
"""
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import InternalFrame
if isinstance(this, InternalFrame):
this_internal = this
else:
assert isinstance(this, (DataFrame, IndexOpsMixin)), type(this)
this_internal = this._internal
if isinstance(that, InternalFrame):
that_internal = that
else:
assert isinstance(that, (DataFrame, IndexOpsMixin)), type(that)
that_internal = that._internal
return (
this_internal.spark_frame is that_internal.spark_frame
and this_internal.index_level == that_internal.index_level
and all(
spark_column_equals(this_scol, that_scol)
for this_scol, that_scol in zip(
this_internal.index_spark_columns, that_internal.index_spark_columns
)
)
)
def combine_frames(
this: "DataFrame",
*args: Union["DataFrame", "Series"],
how: str = "full",
preserve_order_column: bool = False
) -> "DataFrame":
"""
This method combines `this` DataFrame with a different `that` DataFrame or
Series from a different DataFrame.
It returns a DataFrame that has prefix `this_` and `that_` to distinct
the columns names from both DataFrames
It internally performs a join operation which can be expensive in general.
So, if `compute.ops_on_diff_frames` option is False,
this method throws an exception.
"""
from pyspark.pandas.config import get_option
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
)
from pyspark.pandas.series import Series
if all(isinstance(arg, Series) for arg in args):
assert all(
same_anchor(arg, args[0]) for arg in args
), "Currently only one different DataFrame (from given Series) is supported"
assert not same_anchor(this, args[0]), "We don't need to combine. All series is in this."
that = args[0]._psdf[list(args)]
elif len(args) == 1 and isinstance(args[0], DataFrame):
assert isinstance(args[0], DataFrame)
assert not same_anchor(
this, args[0]
), "We don't need to combine. `this` and `that` are same."
that = args[0]
else:
raise AssertionError("args should be single DataFrame or " "single/multiple Series")
if get_option("compute.ops_on_diff_frames"):
def resolve(internal: InternalFrame, side: str) -> InternalFrame:
rename = lambda col: "__{}_{}".format(side, col)
internal = internal.resolved_copy
sdf = internal.spark_frame
sdf = internal.spark_frame.select(
*[
scol_for(sdf, col).alias(rename(col))
for col in sdf.columns
if col not in HIDDEN_COLUMNS
],
*HIDDEN_COLUMNS
)
return internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.index_spark_column_names
],
index_fields=[
field.copy(name=rename(field.name)) for field in internal.index_fields
],
data_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.data_spark_column_names
],
data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],
)
this_internal = resolve(this._internal, "this")
that_internal = resolve(that._internal, "that")
this_index_map = list(
zip(
this_internal.index_spark_column_names,
this_internal.index_names,
this_internal.index_fields,
)
)
that_index_map = list(
zip(
that_internal.index_spark_column_names,
that_internal.index_names,
that_internal.index_fields,
)
)
assert len(this_index_map) == len(that_index_map)
join_scols = []
merged_index_scols = []
# Note that the order of each element in index_map is guaranteed according to the index
# level.
this_and_that_index_map = list(zip(this_index_map, that_index_map))
this_sdf = this_internal.spark_frame.alias("this")
that_sdf = that_internal.spark_frame.alias("that")
# If the same named index is found, that's used.
index_column_names = []
index_use_extension_dtypes = []
for (
i,
((this_column, this_name, this_field), (that_column, that_name, that_field)),
) in enumerate(this_and_that_index_map):
if this_name == that_name:
# We should merge the Spark columns into one
# to mimic pandas' behavior.
this_scol = scol_for(this_sdf, this_column)
that_scol = scol_for(that_sdf, that_column)
join_scol = this_scol == that_scol
join_scols.append(join_scol)
column_name = SPARK_INDEX_NAME_FORMAT(i)
index_column_names.append(column_name)
index_use_extension_dtypes.append(
any(field.is_extension_dtype for field in [this_field, that_field])
)
merged_index_scols.append(
F.when(this_scol.isNotNull(), this_scol).otherwise(that_scol).alias(column_name)
)
else:
raise ValueError("Index names must be exactly matched currently.")
assert len(join_scols) > 0, "cannot join with no overlapping index names"
joined_df = this_sdf.join(that_sdf, on=join_scols, how=how)
if preserve_order_column:
order_column = [scol_for(this_sdf, NATURAL_ORDER_COLUMN_NAME)]
else:
order_column = []
joined_df = joined_df.select(
*merged_index_scols,
*(
scol_for(this_sdf, this_internal.spark_column_name_for(label))
for label in this_internal.column_labels
),
*(
scol_for(that_sdf, that_internal.spark_column_name_for(label))
for label in that_internal.column_labels
),
*order_column
)
index_spark_columns = [scol_for(joined_df, col) for col in index_column_names]
index_columns = set(index_column_names)
new_data_columns = [
col
for col in joined_df.columns
if col not in index_columns and col != NATURAL_ORDER_COLUMN_NAME
]
schema = joined_df.select(*index_spark_columns, *new_data_columns).schema
index_fields = [
InternalField.from_struct_field(struct_field, use_extension_dtypes=use_extension_dtypes)
for struct_field, use_extension_dtypes in zip(
schema.fields[: len(index_spark_columns)], index_use_extension_dtypes
)
]
data_fields = [
InternalField.from_struct_field(
struct_field, use_extension_dtypes=field.is_extension_dtype
)
for struct_field, field in zip(
schema.fields[len(index_spark_columns) :],
this_internal.data_fields + that_internal.data_fields,
)
]
level = max(this_internal.column_labels_level, that_internal.column_labels_level)
def fill_label(label: Optional[Tuple]) -> List:
if label is None:
return ([""] * (level - 1)) + [None]
else:
return ([""] * (level - len(label))) + list(label)
column_labels = [
tuple(["this"] + fill_label(label)) for label in this_internal.column_labels
] + [tuple(["that"] + fill_label(label)) for label in that_internal.column_labels]
column_label_names = (
cast(List[Optional[Tuple]], [None]) * (1 + level - this_internal.column_labels_level)
) + this_internal.column_label_names
return DataFrame(
InternalFrame(
spark_frame=joined_df,
index_spark_columns=index_spark_columns,
index_names=this_internal.index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(joined_df, col) for col in new_data_columns],
data_fields=data_fields,
column_label_names=column_label_names,
)
)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
def align_diff_frames(
resolve_func: Callable[["DataFrame", List[Tuple], List[Tuple]], Tuple["Series", Tuple]],
this: "DataFrame",
that: "DataFrame",
fillna: bool = True,
how: str = "full",
preserve_order_column: bool = False,
) -> "DataFrame":
"""
This method aligns two different DataFrames with a given `func`. Columns are resolved and
handled within the given `func`.
To use this, `compute.ops_on_diff_frames` should be True, for now.
:param resolve_func: Takes aligned (joined) DataFrame, the column of the current DataFrame, and
the column of another DataFrame. It returns an iterable that produces Series.
>>> from pyspark.pandas.config import set_option, reset_option
>>>
>>> set_option("compute.ops_on_diff_frames", True)
>>>
>>> psdf1 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>> psdf2 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>>
>>> def func(psdf, this_column_labels, that_column_labels):
... psdf # conceptually this is A + B.
...
... # Within this function, Series from A or B can be performed against `psdf`.
... this_label = this_column_labels[0] # this is ('a',) from psdf1.
... that_label = that_column_labels[0] # this is ('a',) from psdf2.
... new_series = (psdf[this_label] - psdf[that_label]).rename(str(this_label))
...
... # This new series will be placed in new DataFrame.
... yield (new_series, this_label)
>>>
>>>
>>> align_diff_frames(func, psdf1, psdf2).sort_index()
a
0 0
1 0
2 0
3 0
4 0
5 0
6 0
7 0
8 0
>>> reset_option("compute.ops_on_diff_frames")
:param this: a DataFrame to align
:param that: another DataFrame to align
:param fillna: If True, it fills missing values in non-common columns in both `this` and `that`.
Otherwise, it returns as are.
:param how: join way. In addition, it affects how `resolve_func` resolves the column conflict.
- full: `resolve_func` should resolve only common columns from 'this' and 'that' DataFrames.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` and
'that_columns' in this function are B, C and B, C.
- left: `resolve_func` should resolve columns including that columns.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` is
B, C but `that_columns` are B, C, D.
- inner: Same as 'full' mode; however, internally performs inner join instead.
:return: Aligned DataFrame
"""
from pyspark.pandas.frame import DataFrame
assert how == "full" or how == "left" or how == "inner"
this_column_labels = this._internal.column_labels
that_column_labels = that._internal.column_labels
common_column_labels = set(this_column_labels).intersection(that_column_labels)
# 1. Perform the join given two dataframes.
combined = combine_frames(this, that, how=how, preserve_order_column=preserve_order_column)
# 2. Apply the given function to transform the columns in a batch and keep the new columns.
combined_column_labels = combined._internal.column_labels
that_columns_to_apply = []
this_columns_to_apply = []
additional_that_columns = []
columns_to_keep = []
column_labels_to_keep = []
for combined_label in combined_column_labels:
for common_label in common_column_labels:
if combined_label == tuple(["this", *common_label]):
this_columns_to_apply.append(combined_label)
break
elif combined_label == tuple(["that", *common_label]):
that_columns_to_apply.append(combined_label)
break
else:
if how == "left" and combined_label in [
tuple(["that", *label]) for label in that_column_labels
]:
# In this case, we will drop `that_columns` in `columns_to_keep` but passes
# it later to `func`. `func` should resolve it.
# Note that adding this into a separate list (`additional_that_columns`)
# is intentional so that `this_columns` and `that_columns` can be paired.
additional_that_columns.append(combined_label)
elif fillna:
columns_to_keep.append(F.lit(None).cast(DoubleType()).alias(str(combined_label)))
column_labels_to_keep.append(combined_label)
else:
columns_to_keep.append(combined._psser_for(combined_label))
column_labels_to_keep.append(combined_label)
that_columns_to_apply += additional_that_columns
# Should extract columns to apply and do it in a batch in case
# it adds new columns for example.
if len(this_columns_to_apply) > 0 or len(that_columns_to_apply) > 0:
psser_set, column_labels_set = zip(
*resolve_func(combined, this_columns_to_apply, that_columns_to_apply)
)
columns_applied = list(psser_set)
column_labels_applied = list(column_labels_set)
else:
columns_applied = []
column_labels_applied = []
applied = DataFrame(
combined._internal.with_new_columns(
columns_applied + columns_to_keep,
column_labels=column_labels_applied + column_labels_to_keep,
)
) # type: DataFrame
# 3. Restore the names back and deduplicate columns.
this_labels = OrderedDict()
# Add columns in an order of its original frame.
for this_label in this_column_labels:
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels and this_label == new_label[1:]:
this_labels[new_label[1:]] = new_label
# After that, we will add the rest columns.
other_labels = OrderedDict()
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels:
other_labels[new_label[1:]] = new_label
psdf = applied[list(this_labels.values()) + list(other_labels.values())]
psdf.columns = psdf.columns.droplevel()
return psdf
def is_testing() -> bool:
"""Indicates whether Spark is currently running tests."""
return "SPARK_TESTING" in os.environ
def default_session(conf: Optional[Dict[str, Any]] = None) -> spark.SparkSession:
if conf is None:
conf = dict()
builder = spark.SparkSession.builder.appName("pandas-on-Spark")
for key, value in conf.items():
builder = builder.config(key, value)
# Currently, pandas-on-Spark is dependent on such join due to 'compute.ops_on_diff_frames'
# configuration. This is needed with Spark 3.0+.
builder.config("spark.sql.analyzer.failAmbiguousSelfJoin", False)
if is_testing():
builder.config("spark.executor.allowSparkContext", False)
return builder.getOrCreate()
@contextmanager
def sql_conf(
pairs: Dict[str, Any], *, spark: Optional[spark.SparkSession] = None
) -> Iterator[None]:
"""
A convenient context manager to set `value` to the Spark SQL configuration `key` and
then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
if spark is None:
spark = default_session()
keys = pairs.keys()
new_values = pairs.values()
old_values = [spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
spark.conf.unset(key)
else:
spark.conf.set(key, old_value)
def validate_arguments_and_invoke_function(
pobj: Union[pd.DataFrame, pd.Series],
pandas_on_spark_func: Callable,
pandas_func: Callable,
input_args: Dict,
) -> Any:
"""
Invokes a pandas function.
This is created because different versions of pandas support different parameters, and as a
result when we code against the latest version, our users might get a confusing
"got an unexpected keyword argument" error if they are using an older version of pandas.
This function validates all the arguments, removes the ones that are not supported if they
are simply the default value (i.e. most likely the user didn't explicitly specify it). It
throws a TypeError if the user explicitly specify an argument that is not supported by the
pandas version available.
For example usage, look at DataFrame.to_html().
:param pobj: the pandas DataFrame or Series to operate on
:param pandas_on_spark_func: pandas-on-Spark function, used to get default parameter values
:param pandas_func: pandas function, used to check whether pandas supports all the arguments
:param input_args: arguments to pass to the pandas function, often created by using locals().
Make sure locals() call is at the top of the function so it captures only
input parameters, rather than local variables.
:return: whatever pandas_func returns
"""
import inspect
# Makes a copy since whatever passed in is likely created by locals(), and we can't delete
# 'self' key from that.
args = input_args.copy()
del args["self"]
if "kwargs" in args:
# explode kwargs
kwargs = args["kwargs"]
del args["kwargs"]
args = {**args, **kwargs}
pandas_on_spark_params = inspect.signature(pandas_on_spark_func).parameters
pandas_params = inspect.signature(pandas_func).parameters
for param in pandas_on_spark_params.values():
if param.name not in pandas_params:
if args[param.name] == param.default:
del args[param.name]
else:
raise TypeError(
(
"The pandas version [%s] available does not support parameter '%s' "
+ "for function '%s'."
)
% (pd.__version__, param.name, pandas_func.__name__)
)
args["self"] = pobj
return pandas_func(**args)
@no_type_check
def lazy_property(fn: Callable[[Any], Any]) -> property:
"""
Decorator that makes a property lazy-evaluated.
Copied from https://stevenloria.com/lazy-properties/
"""
attr_name = "_lazy_" + fn.__name__
@property
@functools.wraps(fn)
def wrapped_lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
def deleter(self):
if hasattr(self, attr_name):
delattr(self, attr_name)
return wrapped_lazy_property.deleter(deleter)
def scol_for(sdf: spark.DataFrame, column_name: str) -> spark.Column:
"""Return Spark Column for the given column name."""
return sdf["`{}`".format(column_name)]
def column_labels_level(column_labels: List[Tuple]) -> int:
"""Return the level of the column index."""
if len(column_labels) == 0:
return 1
else:
levels = set(1 if label is None else len(label) for label in column_labels)
assert len(levels) == 1, levels
return list(levels)[0]
def name_like_string(name: Optional[Union[Any, Tuple]]) -> str:
"""
Return the name-like strings from str or tuple of str
Examples
--------
>>> name = 'abc'
>>> name_like_string(name)
'abc'
>>> name = ('abc',)
>>> name_like_string(name)
'abc'
>>> name = ('a', 'b', 'c')
>>> name_like_string(name)
'(a, b, c)'
"""
if name is None:
name = ("__none__",)
elif is_list_like(name):
name = tuple([str(n) for n in name])
else:
name = (str(name),)
return ("(%s)" % ", ".join(name)) if len(name) > 1 else name[0]
def is_name_like_tuple(value: Any, allow_none: bool = True, check_type: bool = False) -> bool:
"""
Check the given tuple is be able to be used as a name.
Examples
--------
>>> is_name_like_tuple(('abc',))
True
>>> is_name_like_tuple((1,))
True
>>> is_name_like_tuple(('abc', 1, None))
True
>>> is_name_like_tuple(('abc', 1, None), check_type=True)
True
>>> is_name_like_tuple((1.0j,))
True
>>> is_name_like_tuple(tuple())
False
>>> is_name_like_tuple((list('abc'),))
False
>>> is_name_like_tuple(('abc', 1, None), allow_none=False)
False
>>> is_name_like_tuple((1.0j,), check_type=True)
False
"""
if value is None:
return allow_none
elif not isinstance(value, tuple):
return False
elif len(value) == 0:
return False
elif not allow_none and any(v is None for v in value):
return False
elif any(is_list_like(v) or isinstance(v, slice) for v in value):
return False
elif check_type:
return all(
v is None or as_spark_type(type(v), raise_error=False) is not None for v in value
)
else:
return True
def is_name_like_value(
value: Any, allow_none: bool = True, allow_tuple: bool = True, check_type: bool = False
) -> bool:
"""
Check the given value is like a name.
Examples
--------
>>> is_name_like_value('abc')
True
>>> is_name_like_value(1)
True
>>> is_name_like_value(None)
True
>>> is_name_like_value(('abc',))
True
>>> is_name_like_value(1.0j)
True
>>> is_name_like_value(list('abc'))
False
>>> is_name_like_value(None, allow_none=False)
False
>>> is_name_like_value(('abc',), allow_tuple=False)
False
>>> is_name_like_value(1.0j, check_type=True)
False
"""
if value is None:
return allow_none
elif isinstance(value, tuple):
return allow_tuple and is_name_like_tuple(
value, allow_none=allow_none, check_type=check_type
)
elif is_list_like(value) or isinstance(value, slice):
return False
elif check_type:
return as_spark_type(type(value), raise_error=False) is not None
else:
return True
def validate_axis(axis: Optional[Union[int, str]] = 0, none_axis: int = 0) -> int:
"""Check the given axis is valid."""
# convert to numeric axis
axis = cast(
Dict[Optional[Union[int, str]], int], {None: none_axis, "index": 0, "columns": 1}
).get(axis, axis)
if axis in (none_axis, 0, 1):
return cast(int, axis)
else:
raise ValueError("No axis named {0}".format(axis))
def validate_bool_kwarg(value: Any, arg_name: str) -> Optional[bool]:
"""Ensures that argument passed in arg_name is of type bool."""
if not (isinstance(value, bool) or value is None):
raise TypeError(
'For argument "{}" expected type bool, received '
"type {}.".format(arg_name, type(value).__name__)
)
return value
def validate_how(how: str) -> str:
"""Check the given how for join is valid."""
if how == "full":
warnings.warn(
"Warning: While pandas-on-Spark will accept 'full', you should use 'outer' "
+ "instead to be compatible with the pandas merge API",
UserWarning,
)
if how == "outer":
# 'outer' in pandas equals 'full' in Spark
how = "full"
if how not in ("inner", "left", "right", "full"):
raise ValueError(
"The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']",
)
return how
@overload
def verify_temp_column_name(df: spark.DataFrame, column_name_or_label: str) -> str:
...
@overload
def verify_temp_column_name(
df: "DataFrame", column_name_or_label: Union[Any, Tuple]
) -> Union[Any, Tuple]:
...
def verify_temp_column_name(
df: Union["DataFrame", spark.DataFrame], column_name_or_label: Union[Any, Tuple]
) -> Union[Any, Tuple]:
"""
Verify that the given column name does not exist in the given pandas-on-Spark or
Spark DataFrame.
The temporary column names should start and end with `__`. In addition, `column_name_or_label`
expects a single string, or column labels when `df` is a pandas-on-Spark DataFrame.
>>> psdf = ps.DataFrame({("x", "a"): ['a', 'b', 'c']})
>>> psdf["__dummy__"] = 0
>>> psdf[("", "__dummy__")] = 1
>>> psdf # doctest: +NORMALIZE_WHITESPACE
x __dummy__
a __dummy__
0 a 0 1
1 b 0 1
2 c 0 1
>>> verify_temp_column_name(psdf, '__tmp__')
('__tmp__', '')
>>> verify_temp_column_name(psdf, ('', '__tmp__'))
('', '__tmp__')
>>> verify_temp_column_name(psdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `(__dummy__, )` ...
>>> verify_temp_column_name(psdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: ... `(, __dummy__)` ...
>>> verify_temp_column_name(psdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('dummy', '')
>>> verify_temp_column_name(psdf, ('', 'dummy'))
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('', 'dummy')
>>> internal = psdf._internal.resolved_copy
>>> sdf = internal.spark_frame
>>> sdf.select(internal.data_spark_columns).show() # doctest: +NORMALIZE_WHITESPACE
+------+---------+-------------+
|(x, a)|__dummy__|(, __dummy__)|
+------+---------+-------------+
| a| 0| 1|
| b| 0| 1|
| c| 0| 1|
+------+---------+-------------+
>>> verify_temp_column_name(sdf, '__tmp__')
'__tmp__'
>>> verify_temp_column_name(sdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `__dummy__` ... '(x, a)', '__dummy__', '(, __dummy__)', ...
>>> verify_temp_column_name(sdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: <class 'tuple'>
>>> verify_temp_column_name(sdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should start and end with `__`: dummy
"""
from pyspark.pandas.frame import DataFrame
if isinstance(df, DataFrame):
if isinstance(column_name_or_label, str):
column_name = column_name_or_label
level = df._internal.column_labels_level
column_name_or_label = tuple([column_name_or_label] + ([""] * (level - 1)))
else:
column_name = name_like_string(column_name_or_label)
assert any(len(label) > 0 for label in column_name_or_label) and all(
label == "" or (label.startswith("__") and label.endswith("__"))
for label in column_name_or_label
), "The temporary column name should be empty or start and end with `__`: {}".format(
column_name_or_label
)
assert all(
column_name_or_label != label for label in df._internal.column_labels
), "The given column name `{}` already exists in the pandas-on-Spark DataFrame: {}".format(
name_like_string(column_name_or_label), df.columns
)
df = df._internal.resolved_copy.spark_frame
else:
assert isinstance(column_name_or_label, str), type(column_name_or_label)
assert column_name_or_label.startswith("__") and column_name_or_label.endswith(
"__"
), "The temporary column name should start and end with `__`: {}".format(
column_name_or_label
)
column_name = column_name_or_label
assert isinstance(df, spark.DataFrame), type(df)
assert (
column_name not in df.columns
), "The given column name `{}` already exists in the Spark DataFrame: {}".format(
column_name, df.columns
)
return column_name_or_label
def spark_column_equals(left: spark.Column, right: spark.Column) -> bool:
"""
Check both `left` and `right` have the same expressions.
>>> spark_column_equals(F.lit(0), F.lit(0))
True
>>> spark_column_equals(F.lit(0) + 1, F.lit(0) + 1)
True
>>> spark_column_equals(F.lit(0) + 1, F.lit(0) + 2)
False
>>> sdf1 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf1["x"] + 1)
True
>>> sdf2 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf2["x"] + 1)
False
"""
return left._jc.equals(right._jc) # type: ignore
def compare_null_first(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNull() & right.isNotNull()
)
def compare_null_last(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNotNull() & right.isNull()
)
def compare_disallow_null(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return left.isNotNull() & right.isNotNull() & comp(left, right)
def compare_allow_null(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return left.isNull() | right.isNull() | comp(left, right)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.utils
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.utils.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.utils tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.utils,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
ssaeger/scikit-learn | sklearn/utils/tests/test_class_weight.py | 50 | 13151 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
abhisg/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
unnati-xyz/ensemble-package | ensembles/Examples/Decision Tree/Decision Tree.py | 1 | 2388 |
# coding: utf-8
# In[ ]:
import ensembles as en
import pandas as pd
import numpy as np
import xgboost as xgb
import category_encoders as ce
from sklearn import datasets, linear_model, preprocessing, grid_search
from sklearn.preprocessing import Imputer, PolynomialFeatures, StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedKFold, KFold
from sklearn.preprocessing import OneHotEncoder
from sklearn.externals import joblib
from keras.layers import Dense, Activation, Dropout
from keras.models import Sequential
from keras.regularizers import l2, activity_l2
from sklearn.metrics import roc_auc_score, average_precision_score, f1_score, log_loss, accuracy_score, mean_absolute_error, mean_squared_error, r2_score
from sklearn.cross_validation import train_test_split
from joblib import Parallel, delayed
from sklearn.pipeline import Pipeline
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
from hyperas import optim
from hyperas.distributions import choice, uniform, conditional
from functools import partial
np.random.seed(1338)
# In[2]:
#Setting the parameters for the Decision Tree Model (Number Of Layers = 3)
# # Example 1
# In[3]:
#Default Values
param_dt = en.parameter_set_decision_tree()
print(param_dt)
# # Example 2
# In[4]:
#Setting max_depth, rest are default values
param_dt = en.parameter_set_decision_tree(max_depth = [6])
print(param_dt)
# # Example 3
# In[5]:
#Setting max_depth, criterion, rest are default values
#Hyper parameter optimisation - max_depth
param_dt = en.parameter_set_decision_tree(max_depth = [6, 10, 12], criterion = ['entropy'])
print(param_dt)
# # Example 4
# In[6]:
#Setting max_depth, splitter, rest are default values
#Hyper parameter optimisation - max_depth
#Hyper parameter optimisation - splitter
param_dt = en.parameter_set_decision_tree(max_depth = [6, 10, 12], splitter = ['best', 'random'])
print(param_dt)
# # Example 5
# In[7]:
#Setting max_depth, splitter, presort rest are default values
#Hyper parameter optimisation - max_depth
#Hyper parameter optimisation - splitter
param_dt = en.parameter_set_decision_tree(max_depth = [6, 10, 12, 15], splitter = ['best', 'random'], presort = True)
print(param_dt)
| mit |
aje/POT | docs/source/auto_examples/plot_gromov_barycenter.py | 4 | 7325 | # -*- coding: utf-8 -*-
"""
=====================================
Gromov-Wasserstein Barycenter example
=====================================
This example is designed to show how to use the Gromov-Wasserstein distance
computation in POT.
"""
# Author: Erwan Vautier <[email protected]>
# Nicolas Courty <[email protected]>
#
# License: MIT License
import numpy as np
import scipy as sp
import scipy.ndimage as spi
import matplotlib.pylab as pl
from sklearn import manifold
from sklearn.decomposition import PCA
import ot
##############################################################################
# Smacof MDS
# ----------
#
# This function allows to find an embedding of points given a dissimilarity matrix
# that will be given by the output of the algorithm
def smacof_mds(C, dim, max_iter=3000, eps=1e-9):
"""
Returns an interpolated point cloud following the dissimilarity matrix C
using SMACOF multidimensional scaling (MDS) in specific dimensionned
target space
Parameters
----------
C : ndarray, shape (ns, ns)
dissimilarity matrix
dim : int
dimension of the targeted space
max_iter : int
Maximum number of iterations of the SMACOF algorithm for a single run
eps : float
relative tolerance w.r.t stress to declare converge
Returns
-------
npos : ndarray, shape (R, dim)
Embedded coordinates of the interpolated point cloud (defined with
one isometry)
"""
rng = np.random.RandomState(seed=3)
mds = manifold.MDS(
dim,
max_iter=max_iter,
eps=1e-9,
dissimilarity='precomputed',
n_init=1)
pos = mds.fit(C).embedding_
nmds = manifold.MDS(
2,
max_iter=max_iter,
eps=1e-9,
dissimilarity="precomputed",
random_state=rng,
n_init=1)
npos = nmds.fit_transform(C, init=pos)
return npos
##############################################################################
# Data preparation
# ----------------
#
# The four distributions are constructed from 4 simple images
def im2mat(I):
"""Converts and image to matrix (one pixel per line)"""
return I.reshape((I.shape[0] * I.shape[1], I.shape[2]))
square = spi.imread('../data/square.png').astype(np.float64)[:, :, 2] / 256
cross = spi.imread('../data/cross.png').astype(np.float64)[:, :, 2] / 256
triangle = spi.imread('../data/triangle.png').astype(np.float64)[:, :, 2] / 256
star = spi.imread('../data/star.png').astype(np.float64)[:, :, 2] / 256
shapes = [square, cross, triangle, star]
S = 4
xs = [[] for i in range(S)]
for nb in range(4):
for i in range(8):
for j in range(8):
if shapes[nb][i, j] < 0.95:
xs[nb].append([j, 8 - i])
xs = np.array([np.array(xs[0]), np.array(xs[1]),
np.array(xs[2]), np.array(xs[3])])
##############################################################################
# Barycenter computation
# ----------------------
ns = [len(xs[s]) for s in range(S)]
n_samples = 30
"""Compute all distances matrices for the four shapes"""
Cs = [sp.spatial.distance.cdist(xs[s], xs[s]) for s in range(S)]
Cs = [cs / cs.max() for cs in Cs]
ps = [ot.unif(ns[s]) for s in range(S)]
p = ot.unif(n_samples)
lambdast = [[float(i) / 3, float(3 - i) / 3] for i in [1, 2]]
Ct01 = [0 for i in range(2)]
for i in range(2):
Ct01[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[0], Cs[1]],
[ps[0], ps[1]
], p, lambdast[i], 'square_loss', # 5e-4,
max_iter=100, tol=1e-3)
Ct02 = [0 for i in range(2)]
for i in range(2):
Ct02[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[0], Cs[2]],
[ps[0], ps[2]
], p, lambdast[i], 'square_loss', # 5e-4,
max_iter=100, tol=1e-3)
Ct13 = [0 for i in range(2)]
for i in range(2):
Ct13[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[1], Cs[3]],
[ps[1], ps[3]
], p, lambdast[i], 'square_loss', # 5e-4,
max_iter=100, tol=1e-3)
Ct23 = [0 for i in range(2)]
for i in range(2):
Ct23[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[2], Cs[3]],
[ps[2], ps[3]
], p, lambdast[i], 'square_loss', # 5e-4,
max_iter=100, tol=1e-3)
##############################################################################
# Visualization
# -------------
#
# The PCA helps in getting consistency between the rotations
clf = PCA(n_components=2)
npos = [0, 0, 0, 0]
npos = [smacof_mds(Cs[s], 2) for s in range(S)]
npost01 = [0, 0]
npost01 = [smacof_mds(Ct01[s], 2) for s in range(2)]
npost01 = [clf.fit_transform(npost01[s]) for s in range(2)]
npost02 = [0, 0]
npost02 = [smacof_mds(Ct02[s], 2) for s in range(2)]
npost02 = [clf.fit_transform(npost02[s]) for s in range(2)]
npost13 = [0, 0]
npost13 = [smacof_mds(Ct13[s], 2) for s in range(2)]
npost13 = [clf.fit_transform(npost13[s]) for s in range(2)]
npost23 = [0, 0]
npost23 = [smacof_mds(Ct23[s], 2) for s in range(2)]
npost23 = [clf.fit_transform(npost23[s]) for s in range(2)]
fig = pl.figure(figsize=(10, 10))
ax1 = pl.subplot2grid((4, 4), (0, 0))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax1.scatter(npos[0][:, 0], npos[0][:, 1], color='r')
ax2 = pl.subplot2grid((4, 4), (0, 1))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax2.scatter(npost01[1][:, 0], npost01[1][:, 1], color='b')
ax3 = pl.subplot2grid((4, 4), (0, 2))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax3.scatter(npost01[0][:, 0], npost01[0][:, 1], color='b')
ax4 = pl.subplot2grid((4, 4), (0, 3))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax4.scatter(npos[1][:, 0], npos[1][:, 1], color='r')
ax5 = pl.subplot2grid((4, 4), (1, 0))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax5.scatter(npost02[1][:, 0], npost02[1][:, 1], color='b')
ax6 = pl.subplot2grid((4, 4), (1, 3))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax6.scatter(npost13[1][:, 0], npost13[1][:, 1], color='b')
ax7 = pl.subplot2grid((4, 4), (2, 0))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax7.scatter(npost02[0][:, 0], npost02[0][:, 1], color='b')
ax8 = pl.subplot2grid((4, 4), (2, 3))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax8.scatter(npost13[0][:, 0], npost13[0][:, 1], color='b')
ax9 = pl.subplot2grid((4, 4), (3, 0))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax9.scatter(npos[2][:, 0], npos[2][:, 1], color='r')
ax10 = pl.subplot2grid((4, 4), (3, 1))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax10.scatter(npost23[1][:, 0], npost23[1][:, 1], color='b')
ax11 = pl.subplot2grid((4, 4), (3, 2))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax11.scatter(npost23[0][:, 0], npost23[0][:, 1], color='b')
ax12 = pl.subplot2grid((4, 4), (3, 3))
pl.xlim((-1, 1))
pl.ylim((-1, 1))
ax12.scatter(npos[3][:, 0], npos[3][:, 1], color='r')
| mit |
UW-Hydro/RVIC | rvic/core/plots.py | 2 | 5060 | # -*- coding: utf-8 -*-
'''
plots.py
'''
import os
import logging
from .log import LOG_NAME
import numpy as np
from datetime import date
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib_available = True
try:
from mpl_toolkits.basemap import Basemap
basemap_available = False
except ImportError:
basemap_available = False
except ImportError:
matplotlib_available = False
# -------------------------------------------------------------------- #
# create logger
log = logging.getLogger(LOG_NAME)
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
def uhs(data, title, case_id, plot_dir):
'''
Plot diagnostic plot showing all unit hydrographs
'''
pfname = _make_filename(title, case_id, plot_dir)
fig = plt.figure()
plt.plot(data)
plt.title(title)
plt.xlabel('timesteps')
plt.ylabel('unit-hydrograph')
fig.savefig(pfname)
plt.close()
return pfname
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
def _fractions_grid(data, dom_x, dom_y, title, case_id, plot_dir):
'''
Plot diagnostic plots of fraction variables
'''
# ---------------------------------------------------------------- #
# Plot Fractions
pfname = _make_filename(title, case_id, plot_dir)
mask = data <= 0.0
data = np.ma.array(data, mask=mask)
cmap = matplotlib.cm.cool
cmap.set_bad(color='w')
fig = plt.figure()
plt.pcolormesh(data, cmap=cmap)
plt.autoscale(tight=True)
plt.axis('tight')
plt.colorbar()
plt.title(title)
plt.xlabel('x')
plt.ylabel('y')
plt.ylim([0, dom_y.shape[0]])
plt.xlim([0, dom_x.shape[1]])
fig.savefig(pfname)
plt.close()
# ---------------------------------------------------------------- #
return pfname
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
def _fractions_map(data, dom_x, dom_y, title, case_id, plot_dir):
'''
Plot diagnostic plots of fraction variables using Basemap
'''
# ---------------------------------------------------------------- #
# Plot Fractions
pfname = _make_filename(title, case_id, plot_dir)
fig = plt.figure(figsize=(8, 8))
fig.add_axes([0.1, 0.1, 0.8, 0.8])
dom_x[dom_x < 0] += 360.0
mask = data <= 0.0
data = np.ma.array(data, mask=mask)
cmap = matplotlib.cm.cool
cmap.set_bad(color='w')
# define projection
midx = int(dom_x.shape[1] / 2)
midy = int(dom_x.shape[0] / 2)
projection = {'projection': 'stere',
'lon_0': dom_x[-1, midx],
'lat_0': dom_y[midy, midx],
'llcrnrlat': dom_y[-1, 0],
'urcrnrlat': dom_y[0, -1],
'llcrnrlon': dom_x[-1, 0],
'urcrnrlon': dom_x[0, -1],
'resolution': 'l'}
log.debug('Projection: %s', projection)
m = Basemap(**projection)
m.drawcoastlines()
m.drawcountries()
# draw parallels.
parallels = np.arange(-90., 90, 10.)
m.drawparallels(parallels, labels=[1, 0, 0, 0])
# draw meridians
meridians = np.arange(-180., 180., 10.)
m.drawmeridians(meridians, labels=[0, 0, 0, 1])
x, y = m(dom_x, dom_y) # compute map proj coordinates.
cs = m.pcolormesh(x, y, data, cmap=cmap)
m.colorbar(cs, location='right', pad='5%')
plt.title(title)
fig.savefig(pfname)
plt.close()
# ---------------------------------------------------------------- #
return pfname
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
def _make_filename(title, case_id, plot_dir):
today = date.today().strftime('%Y%m%d')
file_name = '{0}_{1}_{2}.png'.format(title.lower().replace(' ', '_'),
case_id.lower().replace(' ', '_'),
today)
pfname = os.path.join(plot_dir, file_name)
return pfname
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
def _fractions_dummy(*args):
'''
Pass on plotting
'''
return 'None <-- could not import matplotlib'
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# Set function aliases
if matplotlib_available and basemap_available:
fractions = _fractions_map
elif matplotlib_available and not basemap_available:
fractions = _fractions_grid
elif not matplotlib_available or not basemap_available:
fractions = _fractions_dummy
# -------------------------------------------------------------------- #
| gpl-3.0 |
js850/PyGMIN | pygmin/wham/histogram_reweighting1d.py | 1 | 4952 | import numpy as np #to access np.exp() not built int exp
#import timeseries # for timeseries analysis
#import commands
#import pdb;
#import pickle
from wham_potential import WhamPotential
#import matplotlib.pyplot as plt
#from matplotlib.pyplot import *
import wham_utils
class wham1d:
""" class to combine 1d histograms of energy E at
multiple temperatures into one best estimate for the histogram
input will be:
filenames : list of filenames where the data can be found
Tlist # Tlist[k] is the temperature of the simulation in filenames[k]
binenergy = zeros(nebins, float64) #lower edge of bin energy
visits1d = zeros([nrep,nebins], integer) #1d histograms of data
"""
#=============================================================================================
# Constructor.
#=============================================================================================
def __init__(self, Tlist, binenergy, visits1d):
#define some parameters
self.k_B=1.
self.nrep = len(Tlist)
self.nebins = len(binenergy)
self.Tlist = np.array(Tlist, dtype = np.float64)
self.binenergy = np.array(binenergy, dtype = np.float64)
self.visits1d = np.array(visits1d, dtype = np.integer)
def minimize(self):
nreps = self.nrep
nbins = self.nebins
visitsT = (self.visits1d)
#print "min vis", np.min(visitsT)
self.logP = np.where( visitsT != 0, np.log( visitsT ), 0 )
#print "minlogp", np.min(self.logP)
self.reduced_energy = self.binenergy[np.newaxis,:] / (self.Tlist[:,np.newaxis] * self.k_B)
self.whampot = WhamPotential(self.logP, self.reduced_energy)
X = np.random.rand( nreps + nbins )
E = self.whampot.getEnergy(X)
#print "energy", E
#print "quenching"
try:
from pygmin.optimize import mylbfgs as quench
ret = quench(X, self.whampot, iprint=-1, maxstep=1e4)
except ImportError:
from pygmin.optimize import lbfgs_scipy as quench
ret = quench(X, self.whampot)
#print "quench energy", ret.energy
X = ret.coords
self.logn_E = X[nreps:]
self.w_i_final = X[:nreps]
def globalMinimization(self):
"""
in experimentation i've never been able to find more than
one minimum
"""
nreps = self.nrep
nbins = self.nebins
visitsT = (self.visits1d)
#print "min vis", np.min(visitsT)
self.logP = np.where( visitsT != 0, np.log( visitsT ), 0 )
#print "minlogp", np.min(self.logP)
self.reduced_energy = self.binenergy[np.newaxis,:] / (self.Tlist[:,np.newaxis] * self.k_B)
self.whampot = WhamPotential(self.logP, self.reduced_energy)
X = np.random.rand( nreps + nbins )
E = self.whampot.getEnergy(X)
print "energy", E
print "quenching"
from pygmin.optimize import lbfgs_scipy as quench
ret = quench(X, self.whampot)
print "quench energy", ret.energy
from pygmin.basinhopping import BasinHopping
from pygmin.takestep.displace import RandomDisplacement
takestep = RandomDisplacement(stepsize=10)
takestep.useAdaptiveStep()
takestep.adaptive_class.f = 1.5 #i have no idea what a good stepsize should be
bh = BasinHopping(X, self.whampot, takestep )
import matplotlib.pyplot as plt
for i in range(10):
bh.run(2000)
self.logn_E = bh.coords[nreps:]
cvdata = self.calc_Cv(400)
plt.plot(cvdata[:,0], cvdata[:,5], '-')
plt.show()
X = bh.coords
self.logn_E = X[nreps:]
self.w_i_final = X[:nreps]
# def calc_Cv_no_wham(self):
# """ """
def calc_Cv_new(self, NDOF, TRANGE=None, NTEMP=100):
from pygmin.thermodynamics import dos_to_cv
dT = (self.Tlist[-1] - self.Tlist[0]) / NTEMP
Tlist = np.arange(self.Tlist[0], self.Tlist[-1], dT)
# print self.logn_E
lZ, U, U2, Cv = dos_to_cv(self.binenergy, self.logn_E, Tlist, K=NDOF)
cvdata = np.zeros([len(Tlist), 6])
cvdata[:,0] = Tlist
cvdata[:,1] = lZ
cvdata[:,2] = U # average potential energy
cvdata[:,3] = U2
cvdata[:,5] = Cv
eavg = U + float(NDOF) / 2 * Tlist # average energy including the kinetic degrees of freedom
cvdata[:,4] = eavg
return cvdata
def calc_Cv(self, NDOF, TRANGE=None, NTEMP=100, use_log_sum=None):
# return self.calc_Cv_new(NDOF, TRANGE, NTEMP)
return wham_utils.calc_Cv(self.logn_E, self.visits1d, self.binenergy,
NDOF, self.Tlist, self.k_B, TRANGE, NTEMP, use_log_sum=use_log_sum)
| gpl-3.0 |
zuotingbing/spark | python/pyspark/sql/tests/test_pandas_udf_typehints.py | 6 | 12109 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import unittest
import inspect
from pyspark.sql.functions import mean, lit
from pyspark.testing.sqlutils import ReusedSQLTestCase, \
have_pandas, have_pyarrow, pandas_requirement_message, \
pyarrow_requirement_message
from pyspark.sql.pandas.typehints import infer_eval_type
from pyspark.sql.pandas.functions import pandas_udf, PandasUDFType
if have_pandas:
import pandas as pd
from pandas.util.testing import assert_frame_equal
python_requirement_message = "pandas UDF with type hints are supported with Python 3.6+."
@unittest.skipIf(
not have_pandas or not have_pyarrow or sys.version_info[:2] < (3, 6),
pandas_requirement_message or pyarrow_requirement_message or python_requirement_message)
class PandasUDFTypeHintsTests(ReusedSQLTestCase):
# Note that, we should remove `exec` once we drop Python 2 in this class.
def setUp(self):
self.local = {'pd': pd}
def test_type_annotation_scalar(self):
exec(
"def func(col: pd.Series) -> pd.Series: pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.SCALAR)
exec(
"def func(col: pd.DataFrame, col1: pd.Series) -> pd.DataFrame: pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.SCALAR)
exec(
"def func(col: pd.DataFrame, *args: pd.Series) -> pd.Series: pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.SCALAR)
exec(
"def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> pd.Series:\n"
" pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.SCALAR)
exec(
"def func(col: pd.Series, *, col2: pd.DataFrame) -> pd.DataFrame:\n"
" pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.SCALAR)
exec(
"from typing import Union\n"
"def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> pd.Series:\n"
" pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.SCALAR)
def test_type_annotation_scalar_iter(self):
exec(
"from typing import Iterator\n"
"def func(iter: Iterator[pd.Series]) -> Iterator[pd.Series]: pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.SCALAR_ITER)
exec(
"from typing import Iterator, Tuple\n"
"def func(iter: Iterator[Tuple[pd.DataFrame, pd.Series]]) -> Iterator[pd.DataFrame]:\n"
" pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.SCALAR_ITER)
exec(
"from typing import Iterator, Tuple\n"
"def func(iter: Iterator[Tuple[pd.DataFrame, ...]]) -> Iterator[pd.Series]: pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.SCALAR_ITER)
exec(
"from typing import Iterator, Tuple, Union\n"
"def func(iter: Iterator[Tuple[Union[pd.DataFrame, pd.Series], ...]])"
" -> Iterator[pd.Series]: pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.SCALAR_ITER)
def test_type_annotation_group_agg(self):
exec(
"def func(col: pd.Series) -> str: pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.GROUPED_AGG)
exec(
"def func(col: pd.DataFrame, col1: pd.Series) -> int: pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.GROUPED_AGG)
exec(
"from pyspark.sql import Row\n"
"def func(col: pd.DataFrame, *args: pd.Series) -> Row: pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.GROUPED_AGG)
exec(
"def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> str:\n"
" pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.GROUPED_AGG)
exec(
"def func(col: pd.Series, *, col2: pd.DataFrame) -> float:\n"
" pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.GROUPED_AGG)
exec(
"from typing import Union\n"
"def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> float:\n"
" pass",
self.local)
self.assertEqual(
infer_eval_type(inspect.signature(self.local['func'])), PandasUDFType.GROUPED_AGG)
def test_type_annotation_negative(self):
exec(
"def func(col: str) -> pd.Series: pass",
self.local)
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(self.local['func']))
exec(
"def func(col: pd.DataFrame, col1: int) -> pd.DataFrame: pass",
self.local)
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*int",
infer_eval_type, inspect.signature(self.local['func']))
exec(
"from typing import Union\n"
"def func(col: Union[pd.DataFrame, str], col1: int) -> pd.DataFrame: pass",
self.local)
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(self.local['func']))
exec(
"from typing import Tuple\n"
"def func(col: pd.Series) -> Tuple[pd.DataFrame]: pass",
self.local)
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*Tuple",
infer_eval_type, inspect.signature(self.local['func']))
exec(
"def func(col, *args: pd.Series) -> pd.Series: pass",
self.local)
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(self.local['func']))
exec(
"def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame):\n"
" pass",
self.local)
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(self.local['func']))
exec(
"def func(col: pd.Series, *, col2) -> pd.DataFrame:\n"
" pass",
self.local)
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(self.local['func']))
def test_scalar_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
exec(
"import typing\n"
"def plus_one(v: typing.Union[pd.Series, pd.DataFrame]) -> pd.Series:\n"
" return v + 1",
self.local)
plus_one = pandas_udf("long")(self.local["plus_one"])
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_scalar_iter_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
exec(
"import typing\n"
"def plus_one(itr: typing.Iterator[pd.Series]) -> typing.Iterator[pd.Series]:\n"
" for s in itr:\n"
" yield s + 1",
self.local)
plus_one = pandas_udf("long")(self.local["plus_one"])
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_group_agg_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
exec(
"import numpy as np\n"
"def weighted_mean(v: pd.Series, w: pd.Series) -> float:\n"
" return np.average(v, weights=w)",
self.local)
weighted_mean = pandas_udf("double")(self.local["weighted_mean"])
actual = df.groupby('id').agg(weighted_mean(df.v, lit(1.0))).sort('id')
expected = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_group_apply_in_pandas(self):
df = self.spark.range(10)
exec(
"def pandas_plus_one(v: pd.DataFrame) -> pd.DataFrame:\n"
" return v + 1",
self.local)
pandas_plus_one = self.local["pandas_plus_one"]
actual = df.groupby('id').applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_cogroup_apply_in_pandas(self):
df = self.spark.range(10)
exec(
"def pandas_plus_one(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:\n"
" return left + 1",
self.local)
pandas_plus_one = self.local["pandas_plus_one"]
actual = df.groupby('id').cogroup(
self.spark.range(10).groupby("id")
).applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_map_in_pandas(self):
df = self.spark.range(10)
exec(
"from typing import Iterator\n"
"def pandas_plus_one(iter: Iterator[pd.DataFrame]) -> Iterator[pd.DataFrame]:\n"
" return map(lambda v: v + 1, iter)",
self.local)
pandas_plus_one = self.local["pandas_plus_one"]
actual = df.mapInPandas(pandas_plus_one, schema=df.schema)
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_typehints import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
bartvm/pylearn2 | pylearn2/scripts/datasets/browse_norb.py | 44 | 15741 | #!/usr/bin/env python
"""
A browser for the NORB and small NORB datasets. Navigate the images by
choosing the values for the label vector. Note that for the 'big' NORB
dataset, you can only set the first 5 label dimensions. You can then cycle
through the 3-12 images that fit those labels.
"""
import sys
import os
import argparse
import numpy
import warnings
try:
import matplotlib
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
matplotlib = None
pyplot = None
from pylearn2.datasets.new_norb import NORB
from pylearn2.utils import safe_zip, serial
def _parse_args():
parser = argparse.ArgumentParser(
description="Browser for NORB dataset.")
parser.add_argument('--which_norb',
type=str,
required=False,
choices=('big', 'small'),
help="'Selects the (big) NORB, or the Small NORB.")
parser.add_argument('--which_set',
type=str,
required=False,
choices=('train', 'test', 'both'),
help="'train', or 'test'")
parser.add_argument('--pkl',
type=str,
required=False,
help=".pkl file of NORB dataset")
parser.add_argument('--stereo_viewer',
action='store_true',
help="Swaps left and right stereo images, so you "
"can see them in 3D by crossing your eyes.")
parser.add_argument('--no_norm',
action='store_true',
help="Don't normalize pixel values")
result = parser.parse_args()
if (result.pkl is not None) == (result.which_norb is not None or
result.which_set is not None):
print("Must supply either --pkl, or both --which_norb and "
"--which_set.")
sys.exit(1)
if (result.which_norb is None) != (result.which_set is None):
print("When not supplying --pkl, you must supply both "
"--which_norb and --which_set.")
sys.exit(1)
if result.pkl is not None:
if not result.pkl.endswith('.pkl'):
print("--pkl must be a filename that ends in .pkl")
sys.exit(1)
if not os.path.isfile(result.pkl):
print("couldn't find --pkl file '%s'" % result.pkl)
sys.exit(1)
return result
def _make_grid_to_short_label(dataset):
"""
Returns an array x such that x[a][b] gives label index a's b'th unique
value. In other words, it maps label grid indices a, b to the
corresponding label value.
"""
unique_values = [sorted(list(frozenset(column)))
for column
in dataset.y[:, :5].transpose()]
# If dataset contains blank images, removes the '-1' labels
# corresponding to blank images, since they aren't contained in the
# label grid.
category_index = dataset.label_name_to_index['category']
unique_categories = unique_values[category_index]
category_to_name = dataset.label_to_value_funcs[category_index]
if any(category_to_name(category) == 'blank'
for category in unique_categories):
for d in range(1, len(unique_values)):
assert unique_values[d][0] == -1, ("unique_values: %s" %
str(unique_values))
unique_values[d] = unique_values[d][1:]
return unique_values
def _get_blank_label(dataset):
"""
Returns the label vector associated with blank images.
If dataset is a Small NORB (i.e. it has no blank images), this returns
None.
"""
category_index = dataset.label_name_to_index['category']
category_to_name = dataset.label_to_value_funcs[category_index]
blank_label = 5
try:
blank_name = category_to_name(blank_label)
except ValueError:
# Returns None if there is no 'blank' category (e.g. if we're using
# the small NORB dataset.
return None
assert blank_name == 'blank'
blank_rowmask = dataset.y[:, category_index] == blank_label
blank_labels = dataset.y[blank_rowmask, :]
if not blank_rowmask.any():
return None
if not numpy.all(blank_labels[0, :] == blank_labels[1:, :]):
raise ValueError("Expected all labels of category 'blank' to have "
"the same value, but they differed.")
return blank_labels[0, :].copy()
def _make_label_to_row_indices(labels):
"""
Returns a map from short labels (the first 5 elements of the label
vector) to the list of row indices of rows in the dense design matrix
with that label.
For Small NORB, all unique short labels have exactly one row index.
For big NORB, a short label can have 0-N row indices.
"""
result = {}
for row_index, label in enumerate(labels):
short_label = tuple(label[:5])
if result.get(short_label, None) is None:
result[short_label] = []
result[short_label].append(row_index)
return result
def main():
"""Top-level function."""
args = _parse_args()
if args.pkl is not None:
dataset = serial.load(args.pkl)
else:
dataset = NORB(args.which_norb, args.which_set)
# Indexes into the first 5 labels, which live on a 5-D grid.
grid_indices = [0, ] * 5
grid_to_short_label = _make_grid_to_short_label(dataset)
# Maps 5-D label vector to a list of row indices for dataset.X, dataset.y
# that have those labels.
label_to_row_indices = _make_label_to_row_indices(dataset.y)
# Indexes into the row index lists returned by label_to_row_indices.
object_image_index = [0, ]
blank_image_index = [0, ]
blank_label = _get_blank_label(dataset)
# Index into grid_indices currently being edited
grid_dimension = [0, ]
dataset_is_stereo = 's' in dataset.view_converter.axes
figure, all_axes = pyplot.subplots(1,
3 if dataset_is_stereo else 2,
squeeze=True,
figsize=(10, 3.5))
set_name = (os.path.split(args.pkl)[1] if args.which_set is None
else "%sing set" % args.which_set)
figure.canvas.set_window_title("NORB dataset (%s)" % set_name)
label_text = figure.suptitle('Up/down arrows choose label, '
'left/right arrows change it',
x=0.1,
horizontalalignment="left")
# Hides axes' tick marks
for axes in all_axes:
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
text_axes, image_axes = (all_axes[0], all_axes[1:])
image_captions = (('left', 'right') if dataset_is_stereo
else ('mono image', ))
if args.stereo_viewer:
image_captions = tuple(reversed(image_captions))
for image_ax, caption in safe_zip(image_axes, image_captions):
image_ax.set_title(caption)
text_axes.set_frame_on(False) # Hides background of text_axes
def is_blank(grid_indices):
assert len(grid_indices) == 5
assert all(x >= 0 for x in grid_indices)
ci = dataset.label_name_to_index['category'] # category index
category = grid_to_short_label[ci][grid_indices[ci]]
category_name = dataset.label_to_value_funcs[ci](category)
return category_name == 'blank'
def get_short_label(grid_indices):
"""
Returns the first 5 elements of the label vector pointed to by
grid_indices. We use the first 5, since they're the labels used by
both the 'big' and Small NORB datasets.
"""
# Need to special-case the 'blank' category, since it lies outside of
# the grid.
if is_blank(grid_indices): # won't happen with SmallNORB
return tuple(blank_label[:5])
else:
return tuple(grid_to_short_label[i][g]
for i, g in enumerate(grid_indices))
def get_row_indices(grid_indices):
short_label = get_short_label(grid_indices)
return label_to_row_indices.get(short_label, None)
axes_to_pixels = {}
def redraw(redraw_text, redraw_images):
row_indices = get_row_indices(grid_indices)
if row_indices is None:
row_index = None
image_index = 0
num_images = 0
else:
image_index = (blank_image_index
if is_blank(grid_indices)
else object_image_index)[0]
row_index = row_indices[image_index]
num_images = len(row_indices)
def draw_text():
if row_indices is None:
padding_length = dataset.y.shape[1] - len(grid_indices)
current_label = (tuple(get_short_label(grid_indices)) +
(0, ) * padding_length)
else:
current_label = dataset.y[row_index, :]
label_names = dataset.label_index_to_name
label_values = [label_to_value(label) for label_to_value, label
in safe_zip(dataset.label_to_value_funcs,
current_label)]
lines = ['%s: %s' % (t, v)
for t, v
in safe_zip(label_names, label_values)]
if dataset.y.shape[1] > 5:
# Inserts image number & blank line between editable and
# fixed labels.
lines = (lines[:5] +
['No such image' if num_images == 0
else 'image: %d of %d' % (image_index + 1,
num_images),
'\n'] +
lines[5:])
# prepends the current index's line with an arrow.
lines[grid_dimension[0]] = '==> ' + lines[grid_dimension[0]]
text_axes.clear()
# "transAxes": 0, 0 = bottom-left, 1, 1 at upper-right.
text_axes.text(0, 0.5, # coords
'\n'.join(lines),
verticalalignment='center',
transform=text_axes.transAxes)
def draw_images():
if row_indices is None:
for axis in image_axes:
axis.clear()
else:
data_row = dataset.X[row_index:row_index + 1, :]
axes_names = dataset.view_converter.axes
assert len(axes_names) in (4, 5)
assert axes_names[0] == 'b'
assert axes_names[-3] == 0
assert axes_names[-2] == 1
assert axes_names[-1] == 'c'
def draw_image(image, axes):
assert len(image.shape) == 2
norm = matplotlib.colors.NoNorm() if args.no_norm else None
axes_to_pixels[axes] = image
axes.imshow(image, norm=norm, cmap='gray')
if 's' in axes_names:
image_pair = \
dataset.get_topological_view(mat=data_row,
single_tensor=True)
# Shaves off the singleton dimensions
# (batch # and channel #), leaving just 's', 0, and 1.
image_pair = tuple(image_pair[0, :, :, :, 0])
if args.stereo_viewer:
image_pair = tuple(reversed(image_pair))
for axis, image in safe_zip(image_axes, image_pair):
draw_image(image, axis)
else:
image = dataset.get_topological_view(mat=data_row)
image = image[0, :, :, 0]
draw_image(image, image_axes[0])
if redraw_text:
draw_text()
if redraw_images:
draw_images()
figure.canvas.draw()
default_status_text = ("mouseover image%s for pixel values" %
("" if len(image_axes) == 1 else "s"))
status_text = figure.text(0.5, 0.1, default_status_text)
def on_mouse_motion(event):
original_text = status_text.get_text()
if event.inaxes not in image_axes:
status_text.set_text(default_status_text)
else:
pixels = axes_to_pixels[event.inaxes]
row = int(event.ydata + .5)
col = int(event.xdata + .5)
status_text.set_text("Pixel value: %g" % pixels[row, col])
if status_text.get_text != original_text:
figure.canvas.draw()
def on_key_press(event):
def add_mod(arg, step, size):
return (arg + size + step) % size
def incr_index_type(step):
num_dimensions = len(grid_indices)
if dataset.y.shape[1] > 5:
# If dataset is big NORB, add one for the image index
num_dimensions += 1
grid_dimension[0] = add_mod(grid_dimension[0],
step,
num_dimensions)
def incr_index(step):
assert step in (0, -1, 1), ("Step was %d" % step)
image_index = (blank_image_index
if is_blank(grid_indices)
else object_image_index)
if grid_dimension[0] == 5: # i.e. the image index
row_indices = get_row_indices(grid_indices)
if row_indices is None:
image_index[0] = 0
else:
# increment the image index
image_index[0] = add_mod(image_index[0],
step,
len(row_indices))
else:
# increment one of the grid indices
gd = grid_dimension[0]
grid_indices[gd] = add_mod(grid_indices[gd],
step,
len(grid_to_short_label[gd]))
row_indices = get_row_indices(grid_indices)
if row_indices is None:
image_index[0] = 0
else:
# some grid indices have 2 images instead of 3.
image_index[0] = min(image_index[0], len(row_indices))
# Disables left/right key if we're currently showing a blank,
# and the current index type is neither 'category' (0) nor
# 'image number' (5)
disable_left_right = (is_blank(grid_indices) and
not (grid_dimension[0] in (0, 5)))
if event.key == 'up':
incr_index_type(-1)
redraw(True, False)
elif event.key == 'down':
incr_index_type(1)
redraw(True, False)
elif event.key == 'q':
sys.exit(0)
elif not disable_left_right:
if event.key == 'left':
incr_index(-1)
redraw(True, True)
elif event.key == 'right':
incr_index(1)
redraw(True, True)
figure.canvas.mpl_connect('key_press_event', on_key_press)
figure.canvas.mpl_connect('motion_notify_event', on_mouse_motion)
redraw(True, True)
pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
foreversand/QSTK | QSTK/qstklearn/mldiagnostics.py | 7 | 2315 | # (c) 2011, 2012 Georgia Tech Research Corporation
# This source code is released under the New BSD license. Please see
# http://wiki.quantsoftware.org/index.php?title=QSTK_License
# for license details.
#
# Created on Month day, Year
#
# @author: Vishal Shekhar
# @contact: [email protected]
# @summary: ML Algo Diagnostic Utility (plots performance of the Algo on Train Vs CV sets)
#
import copy
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
class MLDiagnostics:
"""
This class can be used to produce learning curves.
These are plots of evolution of Training Error and Cross Validation Error across lambda(in general a control param for model complexity).
This plot can help diagnose if the ML algorithmic model has high bias or a high variance problem and can
thus help decide the next course of action.
In general, ML Algorithm is of the form,
Y=f(t,X) + lambdaVal*|t|
where Y is the output, t is the model parameter vector, lambdaVal is the regularization parameter.
|t| is the size of model parameter vector.
"""
def __init__(self,learner,Xtrain,Ytrain,Xcv,Ycv,lambdaArray):
self.learner = learner
self.Xtrain = Xtrain
self.Ytrain = Ytrain
self.Xcv = Xcv
self.Ycv = Ycv
self.lambdaArray = lambdaArray
self.ErrTrain = np.zeros((len(lambdaArray),1))
self.ErrCV = copy.copy(self.ErrTrain)
def avgsqerror(self,Y,Ypred):
return np.sum((Y-Ypred)**2)/len(Y)
def plotCurves(self,filename):
Xrange = [i*self.step for i in range(1,len(self.ErrTrain)+1)]
plt.plot(Xrange,self.ErrTrain,label = "Train Error")
plt.plot(Xrange,self.ErrCV,label="CV Error")
plt.title('Learning Curves')
plt.xlabel('# of Training Examples')
plt.ylabel('Average Error')
plt.draw()
savefig(filename,format='pdf')
def runDiagnostics(self,filename):
for i,lambdaVal in zip(range(len(self.lambdaArray)),self.lambdaArray):
learner = copy.copy(self.learner())# is deep copy required
# setLambda needs to be a supported function for all ML strategies.
learner.setLambda(lambdaVal)
learner.addEvidence(self.Xtrain,self.Ytrain)
YtrPred = learner.query(self.Xtrain)
self.ErrTrain[i] = self.avgsqerror(self.Ytrain,YtrPred)
YcvPred = learner.query(self.Xcv)
self.ErrCV[i] = self.avgsqerror(self.Ycv,YcvPred)
self.plotCurves(filename)
| bsd-3-clause |
clemkoa/scikit-learn | sklearn/mixture/gmm.py | 8 | 32593 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
# Important note for the deprecation cleaning of 0.20 :
# All the functions and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
# - 'sklearn/mixture/test_gmm.py'
from time import time
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import check_random_state, check_array, deprecated
from ..utils.fixes import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
@deprecated("The function log_multivariate_normal_density is deprecated in 0.18"
" and will be removed in 0.20.")
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
@deprecated("The function sample_gaussian is deprecated in 0.18"
" and will be removed in 0.20."
" Use numpy.random.multivariate_normal instead.")
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array
Randomly generated sample. The shape depends on `n_samples`:
(n_features,) if `1`
(n_features, n_samples) otherwise
"""
_sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None)
def _sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class _GMMBase(BaseEstimator):
"""Gaussian Mixture Model.
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. The best results is kept.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the Dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) # doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) # doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance."""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = _sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: Due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate.
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
"""Perform the Mstep of the EM algorithm and return the cluster weights.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic : float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic : float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
@deprecated("The class GMM is deprecated in 0.18 and will be "
" removed in 0.20. Use class GaussianMixture instead.")
class GMM(_GMMBase):
"""
Legacy Gaussian Mixture Model
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.GaussianMixture` instead.
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
super(GMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
random_state=random_state, tol=tol, min_covar=min_covar,
n_iter=n_iter, n_init=n_init, params=params,
init_params=init_params, verbose=verbose)
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model."""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model."""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if cv.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model."""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values."""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
@deprecated("The function distribute_covar_matrix_to_match_covariance_type"
"is deprecated in 0.18 and will be removed in 0.20.")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for diagonal cases."""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Perform the covariance M step for spherical cases."""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for full cases."""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for tied cases."""
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
MartinSavc/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
pkreissl/espresso | doc/tutorials/convert.py | 1 | 12724 | #
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This script processes Jupyter notebooks. External Python scripts
can be inserted as new code cells (e.g. solutions to exercises).
Hidden solutions from the ``exercise2`` plugin can be converted
to code cells. The notebook may also be executed, if necessary
with modified global variables to reduce runtime. The processed
notebook can then be converted to HTML externally.
"""
import argparse
import nbformat
import re
import os
import ast
import sys
import uuid
sys.path.append('@CMAKE_SOURCE_DIR@/testsuite/scripts')
import importlib_wrapper as iw
def get_code_cells(nb):
return [c['source'] for c in nb['cells'] if c['cell_type'] == 'code']
def set_code_cells(nb, new_cells):
i = 0
for c in nb['cells']:
if c['cell_type'] == 'code':
c['source'] = new_cells[i]
i += 1
def add_cell_from_script(nb, filepath):
"""
Create new code cell at the end of a notebook and populate it with
the content of a script.
"""
with open(filepath, encoding='utf-8') as f:
code = f.read()
# remove ESPResSo copyright header
m = re.search('# Copyright \(C\) [\d\-,]+ The ESPResSo project\n.+?'
'If not, see <http://www\.gnu\.org/licenses/>\.\n', code, re.DOTALL)
if m and all(x.startswith('#') for x in m.group(0).strip().split('\n')):
code = re.sub('^(#\n)+', '', code.replace(m.group(0), ''), re.M)
# strip first component in relative paths
code = re.sub('(?<=[\'\"])\.\./', './', code)
# create new cells
filename = os.path.relpath(os.path.realpath(filepath))
if len(filename) > len(filepath):
filename = filepath
cell_md = nbformat.v4.new_markdown_cell(source='Solution from ' + filename)
nb['cells'].append(cell_md)
cell_code = nbformat.v4.new_code_cell(source=code.strip())
nb['cells'].append(cell_code)
def remove_empty_cells(nb):
for i in range(len(nb['cells']) - 1, 0, -1):
cell = nb['cells'][i]
if cell['source'].strip() == '':
nb['cells'].pop(i)
def disable_plot_interactivity(nb):
"""
Replace all occurrences of the magic command ``%matplotlib notebook``
by ``%matplotlib inline``.
"""
for cell in nb['cells']:
if cell['cell_type'] == 'code' and 'matplotlib' in cell['source']:
cell['source'] = re.sub('^%matplotlib +notebook',
'%matplotlib inline',
cell['source'], flags=re.M)
def split_matplotlib_cells(nb):
"""
If a cell imports matplotlib, split the cell to keep the
import statement separate from the code that uses matplotlib.
This prevents a known bug in the Jupyter backend which causes
the plot object to be represented as a string instead of a canvas
when created in the cell where matplotlib is imported for the
first time (https://github.com/jupyter/notebook/issues/3523).
"""
for i in range(len(nb['cells']) - 1, -1, -1):
cell = nb['cells'][i]
if cell['cell_type'] == 'code' and 'matplotlib' in cell['source']:
code = iw.protect_ipython_magics(cell['source'])
# split cells after matplotlib imports
mapping = iw.delimit_statements(code)
tree = ast.parse(code)
visitor = iw.GetMatplotlibPyplot()
visitor.visit(tree)
if visitor.matplotlib_first:
code = iw.deprotect_ipython_magics(code)
lines = code.split('\n')
lineno_end = mapping[visitor.matplotlib_first]
split_code = '\n'.join(lines[lineno_end:]).lstrip('\n')
if split_code:
new_cell = nbformat.v4.new_code_cell(source=split_code)
nb['cells'].insert(i + 1, new_cell)
lines = lines[:lineno_end]
nb['cells'][i]['source'] = '\n'.join(lines).rstrip('\n')
def convert_exercise2_to_code(nb):
"""
Walk through the notebook cells and convert exercise2 Markdown cells
containing fenced python code to exercise2 code cells.
"""
for i, cell in enumerate(nb['cells']):
if 'solution2' in cell['metadata']:
cell['metadata']['solution2'] = 'shown'
# convert solution markdown cells into code cells
if cell['cell_type'] == 'markdown' and 'solution2' in cell['metadata'] \
and 'solution2_first' not in cell['metadata']:
lines = cell['source'].strip().split('\n')
if lines[0].strip() == '```python' and lines[-1].strip() == '```':
source = '\n'.join(lines[1:-1]).strip()
nb['cells'][i] = nbformat.v4.new_code_cell(source=source)
nb['cells'][i]['metadata'] = cell['metadata']
nb['cells'][i]['metadata']['solution2'] = 'shown'
def convert_exercise2_to_markdown(nb):
"""
Walk through the notebook cells and convert exercise2 Python cells
to exercise2 Markdown cells using a fenced code block.
"""
for i, cell in enumerate(nb['cells']):
if 'solution2' in cell['metadata']:
cell['metadata']['solution2'] = 'hidden'
# convert solution code cells into markdown cells
if cell['cell_type'] == 'code' and 'solution2' in cell['metadata']:
content = '```python\n' + cell['source'] + '\n```'
nb['cells'][i] = nbformat.v4.new_markdown_cell(source=content)
nb['cells'][i]['metadata'] = cell['metadata']
nb['cells'][i]['metadata']['solution2'] = 'hidden'
def apply_autopep8(nb):
import yaml
import autopep8
def get_autopep8_options():
options = {'aggressive': 0, 'ignore': [], 'max_line_length': 120}
with open('@CMAKE_SOURCE_DIR@/.pre-commit-config.yaml') as f:
pre_config = yaml.safe_load(f)
for repo in pre_config['repos']:
for hook in repo['hooks']:
if hook['id'] == 'autopep8':
for arg in hook['args']:
if arg == '--aggressive':
options['aggressive'] += 1
elif arg.startswith('--ignore='):
options['ignore'] = arg.split('=', 1)[0].split(',')
return options
return options
pep8_opts = get_autopep8_options()
for cell in nb['cells']:
source = None
header = ''
footer = ''
if cell['cell_type'] == 'code':
source = cell['source']
elif cell['cell_type'] == 'markdown' and 'solution2' in cell['metadata']:
lines = cell['source'].strip().split('\n')
if lines[0].strip() == '```python' and lines[-1].strip() == '```':
source = '\n'.join(lines[1:-1])
header = lines[0] + '\n'
footer = '\n' + lines[-1]
if source is not None:
source = autopep8.fix_code(source, options=pep8_opts).strip()
cell['source'] = header + source + footer
def execute_notebook(nb, src, cell_separator, notebook_filepath):
"""
Run the notebook in a python3 kernel. The ESPResSo visualizers are
disabled to prevent the kernel from crashing and to allow running
the notebook in a CI environment.
"""
import nbconvert.preprocessors
notebook_dirname = os.path.dirname(notebook_filepath)
# disable OpenGL/Mayavi GUI
src_no_gui = iw.mock_es_visualization(src)
# update notebook with new code
set_code_cells(nb, src_no_gui.split(cell_separator))
# execute notebook
ep = nbconvert.preprocessors.ExecutePreprocessor(
timeout=20 * 60, kernel_name='python3')
ep.preprocess(nb, {'metadata': {'path': notebook_dirname}})
# restore notebook with code before the GUI removal step
set_code_cells(nb, src.split(cell_separator))
def handle_ci_case(args):
notebook_filepath = args.input
if args.output:
notebook_filepath_edited = args.output
else:
notebook_filepath_edited = notebook_filepath + '~'
# parse original notebook
with open(notebook_filepath, encoding='utf-8') as f:
nb = nbformat.read(f, as_version=4)
# add new cells containing the solutions
if args.scripts:
for filepath in args.scripts:
add_cell_from_script(nb, filepath)
# convert solution cells to code cells
if args.exercise2:
convert_exercise2_to_code(nb)
# remove empty cells (e.g. those below exercise2 cells)
if args.remove_empty_cells:
remove_empty_cells(nb)
# disable plot interactivity
disable_plot_interactivity(nb)
# guard against a jupyter bug involving matplotlib
split_matplotlib_cells(nb)
if args.substitutions or args.execute:
# substitute global variables
cell_separator = '\n##{}\n'.format(uuid.uuid4().hex)
src = cell_separator.join(get_code_cells(nb))
new_values = args.substitutions or []
parameters = dict(x.split('=', 1) for x in new_values)
src = iw.substitute_variable_values(src, strings_as_is=True,
keep_original=False, **parameters)
set_code_cells(nb, src.split(cell_separator))
if args.execute:
execute_notebook(nb, src, cell_separator, args.input)
# write edited notebook
with open(notebook_filepath_edited, 'w', encoding='utf-8') as f:
nbformat.write(nb, f)
def handle_exercise2_case(args):
# parse original notebook
with open(args.input, encoding='utf-8') as f:
nb = nbformat.read(f, as_version=4)
if args.to_md:
convert_exercise2_to_markdown(nb)
elif args.to_py:
convert_exercise2_to_code(nb)
elif args.pep8:
convert_exercise2_to_code(nb)
apply_autopep8(nb)
convert_exercise2_to_markdown(nb)
# write edited notebook
with open(args.input, 'w', encoding='utf-8') as f:
nbformat.write(nb, f)
parser = argparse.ArgumentParser(description='Process Jupyter notebooks.',
epilog=__doc__)
subparsers = parser.add_subparsers(help='Submodules')
# CI module
parser_ci = subparsers.add_parser(
'ci', help='module for CI (variable substitution, code execution, etc.)')
parser_ci.add_argument('--input', type=str, required=True,
help='path to the original Jupyter notebook')
parser_ci.add_argument('--output', type=str,
help='path to the processed Jupyter notebook')
parser_ci.add_argument('--substitutions', nargs='*',
help='variables to substitute')
parser_ci.add_argument('--scripts', nargs='*',
help='scripts to insert in new cells')
parser_ci.add_argument('--exercise2', action='store_true',
help='convert exercise2 solutions into code cells')
parser_ci.add_argument('--remove-empty-cells', action='store_true',
help='remove empty cells')
parser_ci.add_argument('--execute', action='store_true',
help='run the notebook')
parser_ci.set_defaults(callback=handle_ci_case)
# exercise2 module
parser_exercise2 = subparsers.add_parser(
'exercise2', help='module for exercise2 conversion (Markdown <-> Python)')
parser_exercise2.add_argument('input', type=str, help='path to the Jupyter '
'notebook (in-place conversion)')
group_exercise2 = parser_exercise2.add_mutually_exclusive_group(required=True)
group_exercise2.add_argument('--to-md', action='store_true',
help='convert solution cells to Markdown')
group_exercise2.add_argument('--to-py', action='store_true',
help='convert solution cells to Python')
group_exercise2.add_argument('--pep8', action='store_true',
help='apply autopep8 formatting')
parser_exercise2.set_defaults(callback=handle_exercise2_case)
if __name__ == "__main__":
args = parser.parse_args()
args.callback(args)
| gpl-3.0 |
rcolasanti/CompaniesHouseScraper | DVLACompanyNmeMatchCoHoAPI.py | 1 | 4759 |
import requests
import json
import numpy as np
import pandas as pd
import CoHouseToken
from difflib import SequenceMatcher
# In[3]:
def exactMatch(line1, line2):
line1=line1.upper().rstrip()
line2=line2.upper().rstrip()
#print("|"+line1+"|"+line2+"|",line1==line2)
return line1==line2
def aStopWord(word):
return word.upper().replace("COMPANY","CO").replace("LIMITED","LTD").replace("&","AND").rstrip()
def spaces(word):
w = word.upper().replace("/"," ")
w = w.replace("."," ").replace(","," ").replace("-"," ").rstrip()
return w
def removeAStopWord(word):
w = word.upper().replace("LTD"," ").replace("CO"," ").replace("AND"," ").replace("("," ").replace("/"," ")
w = w.replace(")"," ").replace("."," ").replace(","," ").replace("-"," ").rstrip()
return w
def removeABlank(word):
w = word.replace(" ","")
return w
def removeABracket (line):
flag = False
word=""
for a in line:
if a=="(":
flag = True
a=""
if a==")":
a=""
flag = False
if flag:
a=""
word+=a
return word
def stopWord(line1, line2):
line1=aStopWord(line1)
line2=aStopWord(line2)
#print("|"+line1+"|"+line2+"|",line1==line2)
return line1==line2
def removeStopWord(line1, line2):
line1=spaces(line1)
line2=spaces(line2)
line1=aStopWord(line1)
line2=aStopWord(line2)
line1=removeAStopWord(line1)
line2=removeAStopWord(line2)
#print("|"+line1+"|"+line2+"|",line1==line2)
return line1==line2
def removeBlanks(line1, line2):
line1=spaces(line1)
line2=spaces(line2)
line1=aStopWord(line1)
line2=aStopWord(line2)
line1=removeAStopWord(line1)
line2=removeAStopWord(line2)
line1=removeABlank(line1)
line2=removeABlank(line2)
return line1==line2
def removeBrackets(line1, line2):
line1=removeABracket(line1)
line2=removeABracket(line2)
line1=spaces(line1)
line2=spaces(line2)
line1=aStopWord(line1)
line2=aStopWord(line2)
line1=removeAStopWord(line1)
line2=removeAStopWord(line2)
line1=removeABlank(line1)
line2=removeABlank(line2)
#print("|"+line1+"|"+line2+"|",line1==line2)
return line1==line2
def strip(line1, line2):
line1=removeABracket(line1)
line2=removeABracket(line2)
line1=spaces(line1)
line2=spaces(line2)
line1=aStopWord(line1)
line2=aStopWord(line2)
line1=removeAStopWord(line1)
line2=removeAStopWord(line2)
line1=removeABlank(line1)
line2=removeABlank(line2)
return line1,line2
def match(company,results):
for i in results['items']:
line = i['title']
number = i['company_number']
if(exactMatch(company,line)):
return True,line,number
for i in results['items']:
line = i['title']
number = i['company_number']
if(stopWord(company,line)):
return True,line,number
for i in results['items']:
line = i['title']
number = i['company_number']
if(removeStopWord(company,line)):
return True,line,number
for i in results['items']:
line = i['title']
number = i['company_number']
if(removeBlanks(company,line)):
return True,line,number
for i in results['items']:
line = i['title']
number = i['company_number']
if(removeBrackets(company,line)):
return True,line,number
#old_match(company,results)
return False,"",""
def main(args):
print(args[0])
search_url ="https://api.companieshouse.gov.uk/search/companies?q="
token = CoHouseToken.getToken()
pw = ''
base_url = 'https://api.companieshouse.gov.uk'
file = args[1]
df = pd.read_csv(file)
companies = df.Organisation
found = open("found.csv",'w')
missing = open("missing.csv",'w')
count=0
for c in companies:
c =c.upper().replace("&","AND")
c = c.split(" T/A ")[0]
c = c.split("WAS ")[0]
c= spaces(c)
url=search_url+c
#print(url)
results = json.loads(requests.get(url, auth=(token,pw)).text)
res,line,number = match(c,results)
if res:
found.write("%s,%s,%s,\n"%(c,line,number))
print("*",end="")
count+=1
else:
missing.write("%s\n"%(c))
print(".")
found.close()
missing.close()
print()
print(count/len(companies))
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| gpl-3.0 |
mhdella/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| gpl-2.0 |
h2educ/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
timcera/wdmtoolbox | wdmtoolbox/wdmtoolbox.py | 1 | 19910 | # -*- coding: utf-8 -*-
"""The component functions of the wdmtoolbox.
Used to manipulate Watershed Data Management files for time-series.
"""
from __future__ import print_function
import datetime
import os
import sys
# Third party imports
from builtins import range, str
from collections import OrderedDict
import mando
import numpy as np
import pandas as pd
from dateutil.parser import parse as dateparser
from mando.rst_text_formatter import RSTHelpFormatter
# Local imports
# Load in WDM subroutines
from tstoolbox import tsutils
from . import wdmutil
WDM = wdmutil.WDM()
def _describedsn(wdmpath, dsn):
"""Private function used by routines that need a description of DSN."""
return WDM.describe_dsn(wdmpath, int(dsn))
def _copy_dsn(inwdmpath, indsn, outwdmpath, outdsn):
"""Copy a DSN label and data."""
_copy_dsn_label(inwdmpath, indsn, outwdmpath, outdsn)
nts = WDM.read_dsn(inwdmpath, indsn)
if len(nts) > 0:
WDM.write_dsn(outwdmpath, int(outdsn), nts)
def _copy_dsn_label(inwdmpath, indsn, outwdmpath, outdsn):
"""Copy a DSN label."""
WDM.copydsnlabel(inwdmpath, indsn, outwdmpath, outdsn)
def _copydsn_core(inwdmpath, indsn, outwdmpath, outdsn, func, overwrite=False):
if overwrite is True:
deletedsn(outwdmpath, outdsn)
if inwdmpath == outwdmpath:
import tempfile
tempdir = tempfile.mkdtemp()
tmpwdmpath = os.path.join(tempdir, "temp.wdm")
createnewwdm(tmpwdmpath)
func(inwdmpath, indsn, tmpwdmpath, outdsn)
func(tmpwdmpath, outdsn, outwdmpath, outdsn)
os.remove(tmpwdmpath)
os.removedirs(tempdir)
else:
func(inwdmpath, indsn, outwdmpath, outdsn)
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def copydsnlabel(inwdmpath, indsn, outwdmpath, outdsn, overwrite=False):
"""Make a copy of a DSN label (no data).
Parameters
----------
inwdmpath
Path to input WDM
file.
indsn
Source
DSN.
outwdmpath
Path to clean copy WDM
file.
outdsn
Target
DSN.
overwrite
Whether to overwrite the target DSN if it
exists.
"""
_copydsn_core(
inwdmpath, indsn, outwdmpath, outdsn, _copy_dsn_label, overwrite=False
)
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def copydsn(inwdmpath, indsn, outwdmpath, outdsn, overwrite=False):
"""Make a copy of a DSN.
Parameters
----------
inwdmpath
Path to input WDM
file.
indsn
Source
DSN.
outwdmpath
Path to clean copy WDM
file.
outdsn
Target
DSN.
overwrite
Whether to overwrite the target DSN if it
exists.
"""
_copydsn_core(inwdmpath, indsn, outwdmpath, outdsn, _copy_dsn, overwrite=False)
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def cleancopywdm(inwdmpath, outwdmpath, overwrite=False):
"""Make a clean copy of a WDM file.
Parameters
----------
inwdmpath
Path to input WDM
file.
outwdmpath
Path to clean copy WDM
file.
overwrite
Whether to overwrite an existing
outwdmpath.
"""
if inwdmpath == outwdmpath:
raise ValueError(
tsutils.error_wrapper(
"""
The "inwdmpath" cannot be the same as "outwdmpath".
"""
)
)
createnewwdm(outwdmpath, overwrite=overwrite)
activedsn = []
for i in range(1, 32000):
try:
activedsn.append(_describedsn(inwdmpath, i)["dsn"])
except wdmutil.WDMError:
continue
# Copy labels (which copies DSN metadata and data)
for i in activedsn:
try:
_copy_dsn(inwdmpath, i, outwdmpath, i)
except wdmutil.WDMError:
pass
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def renumberdsn(wdmpath, olddsn, newdsn):
"""Renumber olddsn to newdsn.
Parameters
----------
wdmpath : str
Path and WDM
filename.
olddsn : int
Old DSN to
renumber.
newdsn : int
New DSN to change old DSN
to.
"""
WDM.renumber_dsn(wdmpath, olddsn, newdsn)
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def deletedsn(wdmpath, dsn):
"""Delete DSN.
Parameters
----------
wdmpath
Path and WDM
filename.
dsn
DSN to
delete.
"""
WDM.delete_dsn(wdmpath, dsn)
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def wdmtoswmm5rdii(wdmpath, *dsns, **kwds):
"""Print out DSN data to the screen in SWMM5 RDII format.
Parameters
----------
wdmpath
Path and WDM
filename.
dsns
The Data Set Numbers in the WDM
file.
{start_date}
{end_date}
"""
start_date = kwds.setdefault("start_date", None)
end_date = kwds.setdefault("end_date", None)
# Need to make sure that all DSNs are the same interval and all are
# within start and end dates.
collect_tcodes = {}
collect_tsteps = {}
collect_keys = []
for dsn in dsns:
dsn_desc = _describedsn(wdmpath, dsn)
collect_tcodes[dsn_desc["tcode"]] = 1
collect_tsteps[dsn_desc["tstep"]] = 1
if start_date:
assert dateparser(start_date) >= dateparser(dsn_desc["start_date"])
if end_date:
assert dateparser(end_date) <= dateparser(dsn_desc["end_date"])
collect_keys.append((dsn_desc["dsn"], dsn_desc["location"]))
assert len(collect_tcodes) == 1
assert len(collect_tsteps) == 1
collect_tcodes = list(collect_tcodes.keys())[0]
collect_tsteps = list(collect_tsteps.keys())[0]
collected_start_dates = []
collected_end_dates = []
collected_ts = {}
for dsn, location in collect_keys:
tmp = WDM.read_dsn(wdmpath, int(dsn), start_date=start_date, end_date=end_date)
collected_start_dates.append(tmp.index[0])
collected_end_dates.append(tmp.index[-1])
collected_ts[(dsn, location)] = tmp.values
maptcode = {1: 1, 2: 60, 3: 3600, 4: 86400}
print("SWMM5")
print("RDII dump of DSNS {0} from {1}".format(dsns, wdmpath))
print(maptcode[collect_tcodes] * collect_tsteps)
print(1)
print("FLOW CFS")
print(len(dsns))
for dsn, location in collect_keys:
print("{0}_{1}".format(dsn, location))
print("Node Year Mon Day Hr Min Sec Flow")
# Can pick any time series because they should all have the same interval
# and start and end dates.
for dex, date in enumerate(tmp.index):
for dsn, location in collect_keys:
print(
"{0}_{1} {2} {3:02} {4:02} {5:02} {6:02} {7:02} {8}".format(
dsn,
location,
date.year,
date.month,
date.day,
date.hour,
date.minute,
date.second,
collected_ts[(dsn, location)][dex],
)
)
def extract(*wdmpath, **kwds):
"""Print out DSN data to the screen with ISO-8601 dates.
This is the API version also used by 'extract_cli'
"""
# Adapt to both forms of presenting wdm files and DSNs
# Old form '... file.wdm 101 102 103 ...'
# New form '... file.wdm,101 adifferentfile.wdm,101 ...
try:
start_date = kwds.pop("start_date")
except KeyError:
start_date = None
try:
end_date = kwds.pop("end_date")
except KeyError:
end_date = None
if len(kwds) > 0:
raise ValueError(
tsutils.error_wrapper(
"""
The only allowed keywords are start_date and end_date. You
have given {0}.
""".format(
kwds
)
)
)
labels = []
for lab in wdmpath:
if "," in str(lab):
labels.append(lab.split(","))
else:
if lab == wdmpath[0]:
continue
labels.append([wdmpath[0], lab])
result = pd.DataFrame()
cnt = 0
for ind, lab in enumerate(labels):
wdmpath = lab[0]
dsn = lab[1]
nts = WDM.read_dsn(wdmpath, int(dsn), start_date=start_date, end_date=end_date)
if nts.columns[0] in result.columns:
cnt = cnt + 1
nts.columns = ["{0}_{1}".format(nts.columns[0], cnt)]
result = result.join(nts, how="outer")
return tsutils.asbestfreq(result)
@mando.command("extract", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def extract_cli(start_date=None, end_date=None, *wdmpath):
"""Print out DSN data to the screen with ISO-8601 dates.
Parameters
----------
wdmpath
Path and WDM filename followed by space separated list of
DSNs. For example::
'file.wdm 234 345 456'
OR
`wdmpath` can be space separated sets of 'wdmpath,dsn'.
'file.wdm,101 file2.wdm,104 file.wdm,227'
{start_date}
{end_date}
"""
return tsutils._printiso(
extract(*wdmpath, start_date=start_date, end_date=end_date)
)
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def wdmtostd(wdmpath, *dsns, **kwds): # start_date=None, end_date=None):
"""DEPRECATED: New scripts use 'extract'. Will be removed in the future."""
return extract(wdmpath, *dsns, **kwds)
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def describedsn(wdmpath, dsn):
"""Print out a description of a single DSN.
Parameters
----------
wdmpath
Path and WDM
filename.
dsn
The Data Set Number in the WDM
file.
"""
print(_describedsn(wdmpath, dsn))
@mando.command("listdsns", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def listdsns_cli(wdmpath):
"""Print out a table describing all DSNs in the WDM.
Parameters
----------
wdmpath
Path and WDM
filename.
"""
nvars = listdsns(wdmpath)
collect = OrderedDict()
for _, testv in nvars.items():
for key in [
"DSN",
"SCENARIO",
"LOCATION",
"CONSTITUENT",
"TSTYPE",
"START_DATE",
"END_DATE",
"TCODE",
"TSTEP",
]:
collect.setdefault(key, []).append(testv[key.lower()])
return tsutils._printiso(collect, tablefmt="plain")
def listdsns(wdmpath):
"""Print out a table describing all DSNs in the WDM."""
if not os.path.exists(wdmpath):
raise ValueError(
tsutils.error_wrapper(
"""
File {0} does not exist.
""".format(
wdmpath
)
)
)
collect = OrderedDict()
for i in range(1, 32001):
try:
testv = _describedsn(wdmpath, i)
except wdmutil.WDMError:
continue
collect[i] = testv
return collect
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def createnewwdm(wdmpath, overwrite=False):
"""Create a new WDM file, optional to overwrite.
Parameters
----------
wdmpath
Path and WDM
filename.
overwrite
Defaults to not overwrite existing
file.
"""
WDM.create_new_wdm(wdmpath, overwrite=overwrite)
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def createnewdsn(
wdmpath,
dsn,
tstype="",
base_year=1900,
tcode=4,
tsstep=1,
statid="",
scenario="",
location="",
description="",
constituent="",
tsfill=-999.0,
):
"""Create a new DSN.
Parameters
----------
wdmpath
Path and WDM filename. HSPF is limited to a path
and WDM file name of 64 characters. 'wdmtoolbox' is
only limited by the command line limits.
dsn
The Data Set Number in the WDM file. This number
must be greater or equal to 1 and less than or equal
to 32000. HSPF can only use for input or output
DSNs of 1 to 9999, inclusive.
tstype
Time series type. Can be any 4 character string, but if not
specified defaults to first 4 characters of 'constituent'. Must
match what is used in HSPF UCI file.
Limited to 4 characters.
base_year
Base year of time series, defaults to 1900. The DSN will not
accept any time-stamps before this date.
tcode
Time series code, (1=second, 2=minute, 3=hour, 4=day, 5=month,
6=year) defaults to 4=daily.
tsstep
Time series steps, defaults (and almost always is)
1.
statid
The station name, defaults to
''.
Limited to 16 characters.
scenario
The name of the scenario, defaults to ''. Can be anything, but
typically, 'OBSERVED' for calibration and input time-series and
'SIMULATE' for HSPF results.
Limited to 8 characters.
location
The location, defaults to
''.
Limited to 8 characters.
description
Descriptive text, defaults to
''.
Limited to 48 characters.
constituent
The constituent that the time series represents, defaults to
''.
Limited to 8 characters.
tsfill
The value used as placeholder for missing
values.
"""
if tstype == "" and len(constituent) > 0:
tstype = constituent[:4]
WDM.create_new_dsn(
wdmpath,
int(dsn),
tstype=tstype,
base_year=base_year,
tcode=tcode,
tsstep=tsstep,
statid=statid,
scenario=scenario,
location=location,
description=description,
constituent=constituent,
tsfill=tsfill,
)
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def hydhrseqtowdm(wdmpath, dsn, input_ts=sys.stdin, start_century=1900):
"""Write HYDHR sequential file to a DSN.
Parameters
----------
wdmpath
Path and WDM
filename.
dsn
The Data Set Number in the WDM
file.
input_ts
Input filename, defaults to standard
input.
start_century
Since 2 digit years are used, need century, defaults
to 1900.
"""
import pandas as pd
dsn = int(dsn)
if isinstance(input_ts, str):
input_ts = open(input_ts, "r")
dates = np.array([])
data = np.array([])
for line in input_ts:
words = line[8:]
words = words.split()
year = int(words[0]) + start_century
month = int(words[1])
day = int(words[2])
ampmflag = int(words[3])
if int(words[0]) == 99 and month == 12 and day == 31 and ampmflag == 2:
start_century = start_century + 100
data = np.append(data, [float(i) for i in words[4:16]])
try:
if ampmflag == 1:
dates = np.append(
dates,
[datetime.datetime(year, month, day, i) for i in range(0, 12)],
)
if ampmflag == 2:
dates = np.append(
dates,
[datetime.datetime(year, month, day, i) for i in range(12, 24)],
)
except ValueError:
print(start_century, line)
data = pd.DataFrame(data, index=dates)
_writetodsn(wdmpath, dsn, data)
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def stdtowdm(wdmpath, dsn, infile="-"):
"""DEPRECATED: Use 'csvtowdm'."""
csvtowdm(wdmpath, dsn, input_ts=infile)
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def csvtowdm(
wdmpath,
dsn,
start_date=None,
end_date=None,
columns=None,
force_freq=None,
groupby=None,
round_index=None,
clean=False,
target_units=None,
source_units=None,
input_ts="-",
):
"""Write data from a CSV file to a DSN.
File can have comma separated
'year', 'month', 'day', 'hour', 'minute', 'second', 'value'
OR
'date/time string', 'value'
Parameters
----------
wdmpath
Path and WDM
filename.
dsn
The Data Set Number in the WDM
file.
{input_ts}
{start_date}
{end_date}
{columns}
{force_freq}
{groupby}
{round_index}
{clean}
{target_units}
{source_units}
"""
tsd = tsutils.common_kwds(
input_ts,
start_date=start_date,
end_date=end_date,
pick=columns,
force_freq=force_freq,
groupby=groupby,
round_index=round_index,
clean=clean,
target_units=target_units,
source_units=source_units,
)
if len(tsd.columns) > 1:
raise ValueError(
tsutils.error_wrapper(
"""
The input data set must contain only 1 time series.
You gave {0}.
""".format(
len(tsd.columns)
)
)
)
_writetodsn(wdmpath, dsn, tsd)
def _writetodsn(wdmpath, dsn, data):
"""Local function to write Pandas data frame to DSN."""
data = tsutils.asbestfreq(data)
infer = data.index.freqstr
pandacode = infer.lstrip("0123456789")
tstep = infer[: infer.find(pandacode)]
try:
tstep = int(tstep)
except ValueError:
tstep = 1
invmapcode = {
1: "second",
2: "minute",
3: "hour",
4: "day",
5: "month",
6: "annual",
}
mapcode = {
"A": 6, # annual
"A-DEC": 6, # annual
"AS": 6, # annual start
"M": 5, # month
"MS": 5, # month start
"D": 4, # day
"H": 3, # hour
"T": 2, # minute
"S": 1, # second
}
try:
finterval = mapcode[pandacode]
except KeyError:
raise KeyError(
"""
*
* wdmtoolbox only understands PANDAS time intervals of :
* 'A', 'AS', 'A-DEC' for annual,
* 'M', 'MS' for monthly,
* 'D', 'H', 'T', 'S' for day, hour, minute, and second.
* wdmtoolbox thinks this series is {0}.
*
""".format(
pandacode
)
)
# Convert string to int
dsn = int(dsn)
# Make sure that input data metadata matches target DSN
desc_dsn = _describedsn(wdmpath, dsn)
dsntcode = desc_dsn["tcode"]
if finterval != dsntcode:
raise ValueError(
tsutils.error_wrapper(
"""
The DSN {2} has a tcode of {0} ({3}),
but the data has a tcode of {1} ({4}).
""".format(
dsntcode,
finterval,
dsn,
invmapcode[dsntcode],
invmapcode[finterval],
)
)
)
dsntstep = desc_dsn["tstep"]
if dsntstep != tstep:
raise ValueError(
tsutils.error_wrapper(
"""
The DSN has a tstep of {0}, but the data has a tstep of {1}.
""".format(
dsntstep, tstep
)
)
)
WDM.write_dsn(wdmpath, dsn, data)
def main():
"""Run the main function."""
if not os.path.exists("debug_wdmtoolbox"):
sys.tracebacklimit = 0
mando.main()
if __name__ == "__main__":
main()
| bsd-3-clause |
DistrictDataLabs/yellowbrick | tests/test_classifier/conftest.py | 1 | 3458 | # tests.test_classifier.conftest
# Provides fixtures for the classification tests module.
#
# Author: Benjamin Bengfort
# Created: Fri Mar 23 18:07:00 2018 -0400
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: conftest.py [1e04216] [email protected] $
"""
Provides fixtures for the classification tests module.
"""
##########################################################################
## Imports
##########################################################################
import pytest
from tests.fixtures import Dataset, Split
from yellowbrick.exceptions import NotFitted
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split as tts
##########################################################################
## Assertion Helpers
##########################################################################
ATTRS = ["classes_", "class_count_", "score_"]
def assert_not_fitted(estimator, attrs=ATTRS, X_test=None):
"""
Check that the estimator is not fitted by ensuring it does not have
any of the attributes specified in attrs. If X_test is specified,
it is passed to predict, which must also raise a NotFitted exception.
"""
__traceback_hide__ = True
for attr in attrs:
msg = "model is fitted, has {} attribute".format(attr)
assert not hasattr(estimator, attr), msg
if X_test is not None:
with pytest.raises((NotFitted, NotFittedError)):
estimator.predict(X_test)
def assert_fitted(estimator, attrs=ATTRS, X_test=None):
"""
Check that the estimator is fitted by ensuring it does have the attributes
passed in attrs. If X_test is specified, it is passed to predict which
must not raise a NotFitted exception.
"""
__traceback_hide__ = True
for attr in attrs:
msg = "model is not fitted, does not have {} attribute".format(attr)
assert hasattr(estimator, attr), msg
if X_test is not None:
try:
estimator.predict(X_test)
except (NotFitted, NotFittedError):
pytest.fail("estimator not fitted raised from predict")
##########################################################################
## Fixtures
##########################################################################
@pytest.fixture(scope="class")
def binary(request):
"""
Creates a random binary classification dataset fixture
"""
X, y = make_classification(
n_samples=500,
n_features=20,
n_informative=8,
n_redundant=2,
n_classes=2,
n_clusters_per_class=3,
random_state=87,
)
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=93)
dataset = Dataset(Split(X_train, X_test), Split(y_train, y_test))
request.cls.binary = dataset
@pytest.fixture(scope="class")
def multiclass(request):
"""
Creates a random multiclass classification dataset fixture
"""
X, y = make_classification(
n_samples=500,
n_features=20,
n_informative=8,
n_redundant=2,
n_classes=6,
n_clusters_per_class=3,
random_state=87,
)
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=93)
dataset = Dataset(Split(X_train, X_test), Split(y_train, y_test))
request.cls.multiclass = dataset
| apache-2.0 |
davidbroadwater/nyc-subway-datascience-project | project_4/exercise_2/data_visualization.py | 1 | 3577 | from pandas import *
from ggplot import *
from pandas import *
from ggplot import *
def plot_weather_data(turnstile_weather):
'''
Use ggplot to make another data visualization focused on the MTA and weather
data we used in assignment #3. You should make a type of visualization different
than you did in exercise #1, and try to use the data in a different way (e.g., if you
made a lineplot concerning ridership and time of day in exercise #1, maybe look at weather
and try to make a histogram in exercise #2).
You should feel free to implement something that we discussed in class
(e.g., scatterplots, line plots, or histograms) or attempt to implement
something more advanced if you'd like. Here are some suggestions for things
to investigate and illustrate:
* Ridership by time of day or day of week
* How ridership varies based on Subway station
* Which stations have more exits or entries at different times of day
If you'd like to learn more about ggplot and its capabilities, take
a look at the documentation at:
https://pypi.python.org/pypi/ggplot/
You can check out:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
To see all the columns and data points included in the turnstile_weather
dataframe.
However, due to the limitation of our Amazon EC2 server, we are giving you about 1/3
of the actual data in the turnstile_weather dataframe
'''
def bin_subway_hours(hour):
hour_formatted = int(hour//3)
return hour_formatted
def reformat_subway_dates(date):
date_formatted = int(datetime.strftime(datetime.strptime(date, "%Y-%m-%d"),'%w'))
return date_formatted
def weekend_or_weekday_test(day_of_week):
if (day_of_week == 0 | day_of_week == 6):
is_weekend = 1
else:
is_weekend = 0
return is_weekend
set_option('chained_assignment', None)
turnstile_weather['day_of_week'] = turnstile_weather['DATEn'].map(reformat_subway_dates)
set_option('chained_assignment', None)
turnstile_weather['weekend_or_weekday'] = turnstile_weather['day_of_week'].map(weekend_or_weekday_test)
set_option('chained_assignment', None)
turnstile_weather['hour_bin'] = turnstile_weather['Hour'].map(bin_subway_hours)
hourly_averages = turnstile_weather.groupby(['hour_bin', 'weekend_or_weekday'])['ENTRIESn_hourly'].mean()
hourly_averages = hourly_averages.reset_index()#
#print hourly_averages
plot = ggplot(aes(x='hour_bin',y='ENTRIESn_hourly', color='weekend_or_weekday'),data=hourly_averages) + \
geom_point() + \
geom_line() + \
ggtitle('Average NYC Subway Ridership by Hour and Weekend (Blue) vs Weekday (Red)') + \
xlab('Hour of the Day') + \
ylab('Average Number of Riders') +\
xlim(-0.1, 7.1) +\
scale_x_continuous( labels=("","0-2","3-5", "6-8", "9-11", "12-14", "15-17", "18-20","21-23"))
#plot = ggplot(turnstile_weather, aes('EXITSn_hourly', 'ENTRIESn_hourly')) + stat_smooth(span=.15, color='black', se=True)+ geom_point(color='lightblue') + ggtitle("MTA Entries By The Hour!") + xlab('Exits') + ylab('Entries')
return plot
if __name__ == "__main__":
image = "plot.png"
with open(image, "wb") as f:
turnstile_weather = pd.read_csv(input_filename)
turnstile_weather['datetime'] = turnstile_weather['DATEn'] + ' ' + turnstile_weather['TIMEn']
gg = plot_weather_data(turnstile_weather)
ggsave(f, gg) | mit |
shakamunyi/tensorflow | tensorflow/contrib/learn/python/learn/estimators/base_test.py | 2 | 6418 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test base estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators import base
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import log_loss
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
class BaseTest(tf.test.TestCase):
"""Test base estimators."""
def testOneDim(self):
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
feature_columns = learn.infer_real_valued_columns_from_input(x)
regressor = learn.LinearRegressor(feature_columns=feature_columns)
regressor.fit(x, y, max_steps=100)
score = mean_squared_error(y, np.array(list(regressor.predict(x))))
self.assertLess(score, 1.0, "Failed with score = {0}".format(score))
def testIris(self):
iris = datasets.load_iris()
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, [x for x in iris.target], max_steps=100)
score = accuracy_score(iris.target, list(classifier.predict(iris.data)))
self.assertGreater(score, 0.7, "Failed with score = {0}".format(score))
def testIrisAllVariables(self):
iris = datasets.load_iris()
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, [x for x in iris.target], max_steps=100)
self.assertEqual(
classifier.get_variable_names(),
["centered_bias_weight",
"centered_bias_weight/Adagrad",
"global_step",
# Double slashes appear because the column name is empty. If it was not
# empty, the variable names would be "linear/column_name/weight" etc.
"linear//weight",
"linear//weight/Ftrl",
"linear//weight/Ftrl_1",
"linear/bias_weight",
"linear/bias_weight/Ftrl",
"linear/bias_weight/Ftrl_1"])
def testIrisSummaries(self):
iris = datasets.load_iris()
output_dir = tempfile.mkdtemp() + "learn_tests/"
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3, model_dir=output_dir)
classifier.fit(iris.data, iris.target, max_steps=100)
score = accuracy_score(iris.target, list(classifier.predict(iris.data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
# TODO(ipolosukhin): Check that summaries are correctly written.
def testIrisContinueTraining(self):
iris = datasets.load_iris()
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, iris.target, steps=100)
score1 = accuracy_score(iris.target, list(classifier.predict(iris.data)))
classifier.fit(iris.data, iris.target, steps=500)
score2 = accuracy_score(iris.target, list(classifier.predict(iris.data)))
self.assertGreater(
score2, score1,
"Failed with score2 {0} <= score1 {1}".format(score2, score1))
def testIrisStreaming(self):
iris = datasets.load_iris()
def iris_data():
while True:
for x in iris.data:
yield x
def iris_predict_data():
for x in iris.data:
yield x
def iris_target():
while True:
for y in iris.target:
yield y
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris_data(), iris_target(), max_steps=500)
score1 = accuracy_score(iris.target,
list(classifier.predict(iris.data)))
score2 = accuracy_score(iris.target,
list(classifier.predict(iris_predict_data())))
self.assertGreater(score1, 0.5, "Failed with score = {0}".format(score1))
self.assertEqual(score2, score1, "Scores from {0} iterator doesn't "
"match score {1} from full "
"data.".format(score2, score1))
def testIris_proba(self):
# If sklearn available.
if log_loss:
random.seed(42)
iris = datasets.load_iris()
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, iris.target, max_steps=250)
score = log_loss(iris.target, list(classifier.predict_proba(iris.data)))
self.assertLess(score, 0.8, "Failed with score = {0}".format(score))
def testBoston(self):
random.seed(42)
boston = datasets.load_boston()
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(boston.data))
regressor.fit(boston.data, boston.target, max_steps=500)
score = mean_squared_error(
boston.target, np.array(list(regressor.predict(boston.data))))
self.assertLess(score, 150, "Failed with score = {0}".format(score))
def testUnfitted(self):
estimator = learn.TensorFlowEstimator(model_fn=None, n_classes=1)
with self.assertRaises(base.NotFittedError):
estimator.predict([1, 2, 3])
with self.assertRaises(base.NotFittedError):
estimator.save("/tmp/path")
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
datapythonista/pandas | pandas/tests/reshape/merge/test_merge_cross.py | 4 | 2807 | import pytest
from pandas import DataFrame
import pandas._testing as tm
from pandas.core.reshape.merge import (
MergeError,
merge,
)
@pytest.mark.parametrize(
("input_col", "output_cols"), [("b", ["a", "b"]), ("a", ["a_x", "a_y"])]
)
def test_merge_cross(input_col, output_cols):
# GH#5401
left = DataFrame({"a": [1, 3]})
right = DataFrame({input_col: [3, 4]})
left_copy = left.copy()
right_copy = right.copy()
result = merge(left, right, how="cross")
expected = DataFrame({output_cols[0]: [1, 1, 3, 3], output_cols[1]: [3, 4, 3, 4]})
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(left, left_copy)
tm.assert_frame_equal(right, right_copy)
@pytest.mark.parametrize(
"kwargs",
[
{"left_index": True},
{"right_index": True},
{"on": "a"},
{"left_on": "a"},
{"right_on": "b"},
],
)
def test_merge_cross_error_reporting(kwargs):
# GH#5401
left = DataFrame({"a": [1, 3]})
right = DataFrame({"b": [3, 4]})
msg = (
"Can not pass on, right_on, left_on or set right_index=True or "
"left_index=True"
)
with pytest.raises(MergeError, match=msg):
merge(left, right, how="cross", **kwargs)
def test_merge_cross_mixed_dtypes():
# GH#5401
left = DataFrame(["a", "b", "c"], columns=["A"])
right = DataFrame(range(2), columns=["B"])
result = merge(left, right, how="cross")
expected = DataFrame({"A": ["a", "a", "b", "b", "c", "c"], "B": [0, 1, 0, 1, 0, 1]})
tm.assert_frame_equal(result, expected)
def test_merge_cross_more_than_one_column():
# GH#5401
left = DataFrame({"A": list("ab"), "B": [2, 1]})
right = DataFrame({"C": range(2), "D": range(4, 6)})
result = merge(left, right, how="cross")
expected = DataFrame(
{
"A": ["a", "a", "b", "b"],
"B": [2, 2, 1, 1],
"C": [0, 1, 0, 1],
"D": [4, 5, 4, 5],
}
)
tm.assert_frame_equal(result, expected)
def test_merge_cross_null_values(nulls_fixture):
# GH#5401
left = DataFrame({"a": [1, nulls_fixture]})
right = DataFrame({"b": ["a", "b"], "c": [1.0, 2.0]})
result = merge(left, right, how="cross")
expected = DataFrame(
{
"a": [1, 1, nulls_fixture, nulls_fixture],
"b": ["a", "b", "a", "b"],
"c": [1.0, 2.0, 1.0, 2.0],
}
)
tm.assert_frame_equal(result, expected)
def test_join_cross_error_reporting():
# GH#5401
left = DataFrame({"a": [1, 3]})
right = DataFrame({"a": [3, 4]})
msg = (
"Can not pass on, right_on, left_on or set right_index=True or "
"left_index=True"
)
with pytest.raises(MergeError, match=msg):
left.join(right, how="cross", on="a")
| bsd-3-clause |
xzh86/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
Phobia0ptik/ThinkStats2 | code/hypothesis.py | 75 | 10162 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import nsfg
import nsfg2
import first
import thinkstats2
import thinkplot
import copy
import random
import numpy as np
import matplotlib.pyplot as pyplot
class CoinTest(thinkstats2.HypothesisTest):
"""Tests the hypothesis that a coin is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
heads, tails = data
test_stat = abs(heads - tails)
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
heads, tails = self.data
n = heads + tails
sample = [random.choice('HT') for _ in range(n)]
hist = thinkstats2.Hist(sample)
data = hist['H'], hist['T']
return data
class DiffMeansPermute(thinkstats2.HypothesisTest):
"""Tests a difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
group1, group2 = self.data
self.n, self.m = len(group1), len(group2)
self.pool = np.hstack((group1, group2))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
class DiffMeansOneSided(DiffMeansPermute):
"""Tests a one-sided difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.mean() - group2.mean()
return test_stat
class DiffStdPermute(DiffMeansPermute):
"""Tests a one-sided difference in standard deviation by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.std() - group2.std()
return test_stat
class CorrelationPermute(thinkstats2.HypothesisTest):
"""Tests correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
xs, ys = data
test_stat = abs(thinkstats2.Corr(xs, ys))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
xs, ys = self.data
xs = np.random.permutation(xs)
return xs, ys
class DiceTest(thinkstats2.HypothesisTest):
"""Tests whether a six-sided die is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum(abs(observed - expected))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
n = sum(self.data)
values = [1,2,3,4,5,6]
rolls = np.random.choice(values, n, replace=True)
hist = thinkstats2.Hist(rolls)
freqs = hist.Freqs(values)
return freqs
class DiceChiTest(DiceTest):
"""Tests a six-sided die using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum((observed - expected)**2 / expected)
return test_stat
class PregLengthTest(thinkstats2.HypothesisTest):
"""Tests difference in pregnancy length using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: pair of lists of pregnancy lengths
"""
firsts, others = data
stat = self.ChiSquared(firsts) + self.ChiSquared(others)
return stat
def ChiSquared(self, lengths):
"""Computes the chi-squared statistic.
lengths: sequence of lengths
returns: float
"""
hist = thinkstats2.Hist(lengths)
observed = np.array(hist.Freqs(self.values))
expected = self.expected_probs * len(lengths)
stat = sum((observed - expected)**2 / expected)
return stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
firsts, others = self.data
self.n = len(firsts)
self.pool = np.hstack((firsts, others))
pmf = thinkstats2.Pmf(self.pool)
self.values = range(35, 44)
self.expected_probs = np.array(pmf.Probs(self.values))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
def RunDiceTest():
"""Tests whether a die is fair.
"""
data = [8, 9, 19, 5, 8, 11]
dt = DiceTest(data)
print('dice test', dt.PValue(iters=10000))
dt = DiceChiTest(data)
print('dice chi test', dt.PValue(iters=10000))
def FalseNegRate(data, num_runs=1000):
"""Computes the chance of a false negative based on resampling.
data: pair of sequences
num_runs: how many experiments to simulate
returns: float false negative rate
"""
group1, group2 = data
count = 0
for i in range(num_runs):
sample1 = thinkstats2.Resample(group1)
sample2 = thinkstats2.Resample(group2)
ht = DiffMeansPermute((sample1, sample2))
p_value = ht.PValue(iters=101)
if p_value > 0.05:
count += 1
return count / num_runs
def PrintTest(p_value, ht):
"""Prints results from a hypothesis test.
p_value: float
ht: HypothesisTest
"""
print('p-value =', p_value)
print('actual =', ht.actual)
print('ts max =', ht.MaxTestStat())
def RunTests(data, iters=1000):
"""Runs several tests on the given data.
data: pair of sequences
iters: number of iterations to run
"""
# test the difference in means
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute two-sided')
PrintTest(p_value, ht)
ht.PlotCdf()
thinkplot.Save(root='hypothesis1',
title='Permutation test',
xlabel='difference in means (weeks)',
ylabel='CDF',
legend=False)
# test the difference in means one-sided
ht = DiffMeansOneSided(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute one-sided')
PrintTest(p_value, ht)
# test the difference in std
ht = DiffStdPermute(data)
p_value = ht.PValue(iters=iters)
print('\nstd permute one-sided')
PrintTest(p_value, ht)
def ReplicateTests():
"""Replicates tests with the new NSFG data."""
live, firsts, others = nsfg2.MakeFrames()
# compare pregnancy lengths
print('\nprglngth2')
data = firsts.prglngth.values, others.prglngth.values
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
print('\nbirth weight 2')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation 2')
PrintTest(p_value, ht)
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared 2')
PrintTest(p_value, ht)
def main():
thinkstats2.RandomSeed(17)
# run the coin test
ct = CoinTest((140, 110))
pvalue = ct.PValue()
print('coin test p-value', pvalue)
# compare pregnancy lengths
print('\nprglngth')
live, firsts, others = first.MakeFrames()
data = firsts.prglngth.values, others.prglngth.values
RunTests(data)
# compare birth weights
print('\nbirth weight')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation')
print('n=', len(live2))
PrintTest(p_value, ht)
# run the dice test
RunDiceTest()
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared')
PrintTest(p_value, ht)
# compute the false negative rate for difference in pregnancy length
data = firsts.prglngth.values, others.prglngth.values
neg_rate = FalseNegRate(data)
print('false neg rate', neg_rate)
# run the tests with new nsfg data
ReplicateTests()
if __name__ == "__main__":
main()
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.