repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ZhiangChen/bendix_dnn
|
front_radar/contractive_autoencoder.py
|
1
|
6044
|
#!/usr/bin/env python
'''
Zhiang Chen
Dec, 2016
'''
from six.moves import cPickle as pickle
import matplotlib.pyplot as plt
import os
import tensorflow as tf
import numpy as np
import time
'''Load Data'''
wd = os.getcwd()
file_name = wd+'/front_dist_data'
with open(file_name, 'rb') as f:
save = pickle.load(f)
pos_data = save['pos_data']
neg_data = save['neg_data']
del save
#print('pos_data: ',pos_data.shape)
#print('neg_data: ',neg_data.shape)
pos_nm, pos_dim = pos_data.shape
neg_nm, neg_dim = neg_data.shape
assert pos_dim == neg_dim
data_dim = pos_dim
dataset = np.concatenate((pos_data, neg_data), axis=0)
pos_labels = np.asarray([[1,0]]).repeat(pos_nm,axis=0)
neg_labels = np.asarray([[0,1]]).repeat(neg_nm,axis=0)
labels = np.concatenate((pos_labels, neg_labels), axis=0)
#print(np.amax(dataset))
#print(np.amin(dataset))
'''Randomize Data'''
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:]
shuffled_labels = labels[permutation,:]
return shuffled_dataset, shuffled_labels
dataset, labels = randomize(dataset, labels)
'''
for i in range(3):
index = np.random.randint(dataset.shape[0])
plt.plot(dataset[index,:])
print('label',labels[index,:])
plt.show()
'''
'''Assign Dataset'''
data_nm = dataset.shape[0]
train_dataset = dataset[0:int(0.9*data_nm),:].astype(np.float32)
train_labels = labels[0:int(0.9*data_nm),:].astype(np.float32)
valid_dataset = dataset[int(0.6*data_nm):int(0.8*data_nm),:].astype(np.float32)
valid_labels = labels[int(0.6*data_nm):int(0.8*data_nm),:].astype(np.float32)
test_dataset = dataset[int(0.8*data_nm):,:].astype(np.float32)
test_labels = labels[int(0.8*data_nm):,:].astype(np.float32)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
'''Define MSE & accuracy'''
def MSE(predictions, labels):
return np.mean((predictions-labels)**2)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))/ predictions.shape[0])
'''Build Net'''
batch_size = 20
hidden1_nm = 15
hidden2_nm = 3
hidden3_nm = 10
hidden_nm_r = 3
graph = tf.Graph()
with graph.as_default():
tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size,data_dim))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
e_weights1 = tf.Variable(tf.truncated_normal([20,hidden1_nm], stddev=1.0))
e_biases1 = tf.Variable(tf.zeros([hidden1_nm]))
e_weights2 = tf.Variable(tf.truncated_normal([hidden1_nm,hidden2_nm], stddev=1.0))
e_biases2 = tf.Variable(tf.zeros([hidden2_nm]))
d_weights1 = tf.Variable(tf.truncated_normal([hidden2_nm,hidden1_nm], stddev=1.0))
d_biases1 = tf.Variable(tf.zeros([hidden1_nm]))
d_weights2 = tf.Variable(tf.truncated_normal([hidden1_nm,20], stddev=1.0))
d_biases2 = tf.Variable(tf.zeros([20]))
e_weights1_r = tf.Variable(tf.truncated_normal([5,hidden_nm_r], stddev=1.0))
e_biases1_r = tf.Variable(tf.zeros([hidden_nm_r]))
d_weights1_r = tf.Variable(tf.truncated_normal([hidden_nm_r,5], stddev=1.0))
d_biases1_r = tf.Variable(tf.zeros([5]))
weights1 = tf.Variable(tf.truncated_normal([hidden2_nm*2,hidden3_nm], stddev=1.0))
biases1 = tf.Variable(tf.zeros([hidden3_nm]))
weights2 = tf.Variable(tf.truncated_normal([hidden3_nm,2], stddev=1.0))
biases2 = tf.Variable(tf.zeros([2]))
global_step = tf.Variable(0) # count the number of steps taken.
saver = tf.train.Saver()
def encoder(data_f, data_r):
hidden_in = tf.matmul(data_f, e_weights1) + e_biases1
hidden_out = tf.nn.sigmoid(hidden_in)
hidden_in = tf.matmul(hidden_out, e_weights2) + e_biases2
hidden_out_f = tf.nn.sigmoid(hidden_in)
hidden_in = tf.matmul(data_r, e_weights1_r) + e_biases1_r
hidden_out_r = tf.nn.sigmoid(hidden_in)
return hidden_out_f, hidden_out_r
def decoder(data_f, data_r):
hidden_in = tf.matmul(data_f, d_weights1) + d_biases1
hidden_out = tf.nn.sigmoid(hidden_in)
hidden_in = tf.matmul(hidden_out, d_weights2) + d_biases2
hidden_out_f = hidden_in
hidden_in = tf.matmul(data_r, d_weights1_r) + d_biases1_r
hidden_out_r = hidden_in
return hidden_out_f, hidden_out_r
def reconstruction(data):
data_f, data_r = data[:,:20], data[:,20:]
representation_f, representation_r = encoder(data_f,data_r)
pred_data_f, pred_data_r = decoder(representation_f, representation_r)
pred_data = tf.concat(1,[pred_data_f,pred_data_r])
return pred_data
cost = tf.reduce_mean(tf.pow(tf_train_dataset - reconstruction(tf_train_dataset), 2))
optimizer = tf.train.RMSPropOptimizer(0.01).minimize(cost)
valid_prediction = reconstruction(tf_valid_dataset)
test_prediction = reconstruction(tf_test_dataset)
start_time = time.time()
nm_steps = 15000
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
for step in range(nm_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data}
_, l = session.run([optimizer, cost], feed_dict=feed_dict)
if (step % 500 == 0):
print('*'*40)
print('Minibatch loss at step %d: %f' % (step, l))
print('Validation MSE: %f' % MSE(valid_prediction.eval(), valid_dataset))
print('Test MSE: %f' % MSE(test_prediction.eval(), test_dataset))
end_time = time.time()
duration = (end_time - start_time)/60
print("Excution time: %0.2fmin" % duration)
save_path = saver.save(session, "autoencoder.ckpt")
print("Model saved in file: %s" % save_path)
i_test = 0
for i_test in np.random.randint(test_dataset.shape[0],size=10).tolist():
plt.plot(test_dataset[i_test,:],color='red')
prd_test = reconstruction(test_dataset[i_test,:].reshape(-1,25)).eval()[0,:]
plt.plot(prd_test,color='blue')
plt.ylim([-0.5,0.5])
plt.show()
|
mit
|
mssalvador/ReadData
|
src/GridSearchLogRegAndKmeans.py
|
1
|
8418
|
'''
Created on Feb 7, 2017
@author: svanhmic
'''
from sklearn.linear_model import LogisticRegression as skLogistic
from pyspark.mllib.linalg import Vectors as oVector, VectorUDT as oVectorUDT
from pyspark.sql.types import StringType
from pyspark.sql import functions as F
from pyspark.sql import SQLContext
from pyspark import SparkContext
from spark_sklearn import GridSearchCV,Converter
from sklearn.cluster import KMeans as skKmeans
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import json
import sys
def getConfusion(label,prediction):
diff = abs(label-prediction)
if (diff == 0) and (label == 0):
return "TN"
elif (diff == 0) and (label == 1):
return "TP"
elif (diff == 1) and (label == 0):
return "FP"
elif (diff == 1) and (label == 1):
return "FN"
else:
return "Excluded"
subUdf = F.udf(lambda x,y: getConfusion(x,y),StringType())
def computeConfusion(df):
cols = [F.col(i) for i in ("cvrNummer","label","predictionLogReg")]
return (df
.select(*cols,subUdf(F.col("label"),F.col("predictionLogReg")).alias("diff"))
.groupby().pivot("diff",["TP","TN","FN","FP","Excluded"]).count()
.withColumn(col=((F.col("TP")+F.col("TN"))/(df.count()-F.col("Excluded"))),colName="accuracy")
)
def getStatus(df):
return computeConfusion(df)
def showStats(df):
accuracyDf = computeConfusion(df)
print(accuracyDf.head())
accDf = (accuracyDf
.select()
.collect()[0][0])
print("Accuracy: "+str(accDf))
def createPandasDf(sc,df,featuresCol="features",idCol="cvrNummer",*y,**x):
'''
improved convert to pandas dataframe
index and features are the bare minimum
'''
#print(type(df))
dfCols = df.columns
columnsInX = [i for i in list(x.values()) if i in dfCols]
columnsInY = [i for i in y if (i in dfCols) and (i not in columnsInX ) ]
allColumns = columnsInX+columnsInY
try:
allColumns.remove(featuresCol)
allColumns.remove(idCol)
except ValueError:
print("no extra cols")
featDict = checkVectorTypes(df.schema,featuresCol)
toOldVectorUdf = isVectorMLLib(featDict[1])
conv = Converter(sc)
return conv.toPandas(df.select([idCol,toOldVectorUdf(featDict[0]).alias(featuresCol)]+allColumns))
def checkVectorTypes(schema,featureList="features"):
assert schema.needConversion() == True , "it is not good"
dd = json.loads(schema.json())
mappedJson = map(lambda x: x["name"],dd["fields"])
assert featureList in mappedJson, featureList+" is not there."
try:
return list(map(lambda x: (x["name"],x["type"]["pyClass"]),filter(lambda x: x["name"] == featureList,dd["fields"])))[0]
except KeyError:
print("hmm")
return list(map(lambda x: (x["name"],x["type"]["class"]),filter(lambda x: x["name"] == featureList,dd["fields"])))[0]
def isVectorMLLib(typ):
if typ in "pyspark.ml.linalg.VectorUDT":
return F.udf(lambda x: oVector.dense(x.toArray()),oVectorUDT())
else:
return F.udf(lambda x: x,oVectorUDT())
def computeMaxSlope(slopes):
'''
Basically computes the simple slope of a series of points
Input:
slopes - npvector containing x,y vals
Output:
highest parameter pair slope
'''
diffSlops = np.diff(slopes[:,1])
diffs = list(zip(slopes[1:,0],diffSlops))
RowCol = np.where(diffs == np.max(diffSlops))
return diffs[RowCol[0][0]]
def labelOutliers(df,predictionCol="predictionKmeans",labelCol="label",threshold = 0.005,iterations=1):
'''
This method will label the data as outliers in the dataframe
Input:
df - pandas dataframe
Output:
df_out - pandas dataframe with outliers as labels
'''
assert "isOutlier" not in df.column, "Error isOutlier is not "
groupedClusters = df.groupby(predictionCol,as_index=False).count()
outliers = groupedClusters[groupedClusters[labelCol]<=threshold*len(df)]
#print(outliers["predictionKmeans"].values.tolist())
df.ix[df["predictionKmeans"].isin(outliers["predictionKmeans"].values.tolist()),"isOutlier"] = iterations
return df
def trainGridAndEval(sc,df,featureCol="features",parms = {'n_clusters':(2,3,4),}):
'''
This method should contain the kmeans gridsearch function from spark_sklearn,
such that the gridsearch is paralized.
Input:
df- Spark dataframe. Df should contain features in a ml dense vector format or mllib format
parms - Dictionary with parameters for the machine learning algorithm
**dfParams - Dictionary containing various information, e.g. columnsnames for feature and id
more can be added
output: returns the same data frame with predictions.
'''
#args is used here!
#panScaledDf = createPandasDf(df,featureCol,idCol,dfParams)
#Initialize the gridsearch
gs = GridSearchCV(sc,skKmeans(random_state=True),parms,error_score="numeric",cv=10)
#print(type(x))
#print(type(features))
#model =
return gs.fit(df[featureCol].tolist()) #panScaledDf.assign(predict=model.predict(x))
def onePass(sc,df,params={'n_clusters':(2,3),},featureCol="features",idCol="cvrNummer",labelCol="label",*extraDf):
'''
The famous method
'''
#testing if df has isoutliers column
assert ("isOutlier" in df.columns), "isOutlier is not in data frame: "+str(df.columns)
#create holdout dataframe to previous iterations
notOutliersDf = df[df["isOutlier"] == 0]
outliersDf = df[df["isOutlier"] > 0]
try:
del notOutliersDf["predictionKmeans"]
del notOutliersDf["predictionLogReg"]
except:
print(str(df.columns))
#use trainGridAndEval to find the best parameters for the clustering method
model = trainGridAndEval(sc,notOutliersDf,featureCol = featureCol,parms = params)
#extract the "best" parameters using the elbow-method
means = map(lambda x: [x[0]["n_clusters"],x[1]],model.grid_scores_)
clusters = np.array(list(means))
bestClusterParams = computeMaxSlope(clusters)
#run the kmeans model again, yes this is stupid, but i cannot get the elbow best
#parameter out of spark_sklearn, yet.
length = len(notOutliersDf)
bestKmeans = skKmeans(n_clusters = int(bestClusterParams[0]),random_state = True)
bestpredictionKmeans = bestKmeans.fit_predict(notOutliersDf[featureCol].tolist())
panScaledDf = notOutliersDf.assign(predictionKmeans = bestpredictionKmeans)
#comence the cutoff
groupedClusters = panScaledDf.groupby("predictionKmeans",as_index = False).count()
outliers = groupedClusters[groupedClusters["label"] <= float(extraDf[1])*length]
#print(outliers["predictionKmeans"].values.tolist())
panScaledDf.ix[panScaledDf["predictionKmeans"].isin(outliers["predictionKmeans"].values.tolist()),"isOutlier"] = int(extraDf[0])
panScaledDf = panScaledDf.assign(predictionLogReg = np.full(length,np.nan))
#print(outliers.columns)
#comence the logisticRegression train
trainPDf = panScaledDf[panScaledDf["isOutlier"] == 0]
notOutliersDfTrainCv,notOutliersDfTest = train_test_split(trainPDf,test_size = 0.2,stratify=trainPDf["label"])
#concate the two hold out dataframes together
outliersDf = outliersDf.append(panScaledDf[panScaledDf["isOutlier"] > 0],ignore_index = True)
print("Remaining data points: "+str(len(notOutliersDfTrainCv)))
print("Total data points: "+str(len(panScaledDf)))
print("Excluded data points: "+str(len(outliersDf)))
logisticParams = {"C":(0.1,0.333,0.666,0.999),}
logsiticGS = GridSearchCV(sc,skLogistic(),logisticParams)
bestpredictionLogReg = logsiticGS.fit(notOutliersDfTrainCv[featureCol].tolist(),notOutliersDfTrainCv[labelCol])
notOutliersDfTest = notOutliersDfTest.assign(predictionLogReg = bestpredictionLogReg.predict(notOutliersDfTest[featureCol].tolist()))
return pd.concat([notOutliersDfTrainCv,notOutliersDfTest,outliersDf])
if __name__ == '__main__':
sc = SparkContext(appName="regnData")
sqlContext = SQLContext(sc)
PATH = "/home/svanhmic/workspace/data/DABAI/sparkdata/json"
|
apache-2.0
|
lambokini/SmartSTrader
|
tremolo/assets/foundation.py
|
1
|
6793
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from numpy import int32
class Asset (object):
"""
各資産のデータ管理を行います.このクラスは抽象クラスです.
"""
@classmethod
def isRealAsset(cls):
"""
このクラスが抽象クラスでなく、
実際に動作するモジュールである場合にはTrueを返します.
"""
return False
@classmethod
def getType(cls):
"""
このクラスのget()が返す型を指定します.一意の場合はint,またはfloat型が返され、
一意でない場合はtuple型が返されます.
ただし、一番重要な値(始値など)は必ず一番前にもっていかなくてはなりません.
"""
raise NotImplementedError("the method 'gettype()' is not implemented.")
def __init__(self, parent):
self.parent = parent
self.isLocked = False
self.lock_time = 0
def get(self, t=-1):
"""
t地点における資産の値を返します.返す値の種類はgetType()が返す型に準拠します.
ただし、-1の場合は「最新」の資産の値を返します.
"""
raise NotImplementedError("the method 'get()' is not implemented.")
def getPreviousData(self, t, n=5):
"""
t時点からn回前までの複数のデータを取得し、リストで返します.
"""
tp = self.getType()
dat = []
if tp == tuple:
dat = [self.get(t-i)[0] for i in xrange(n)]
elif tp == int or tp == float:
dat = [self.get(t-i) for i in xrange(n)]
return dat
def getScalar(self, t):
"""
get()において返す値の種類にかかわらず、
t時点における、かならずスカラとなる値を返します.
"""
tp = self.getType()
if tp == tuple:
return self.get(t)[0]
else:
return self.get(t)
def __call__(self, t): self.get(t)
def getDescription(self):
"""
このオブジェクトの説明を返します.返す文字列はインスタンスの状況によって違います.
"""
raise NotImplementedError("the method 'getDescription()' is not implemented.")
description = property(getDescription)
def max(self, start, end):
"""
指定されたtの範囲での最大値を返します.
このメソッドはオーバーライドしなくても使えますが、低速です.
"""
t = self.getType()
if t == tuple:
return max( [self.get(t)[0] for t in xrange(start, end)] )
elif t == float or t == int:
return max( [self.get(t) for t in xrange(start, end)] )
else: raise TypeError("'getType()' returns '%s', not tuple,float,and int." % str(t))
def min(self, start, end):
"""
指定されたtの範囲での最小値を返します.
このメソッドはオーバーライドしなくても使えますが、低速です.
"""
t = self.getType()
if t == tuple:
return min( [self.get(t)[0] for t in xrange(start, end)] )
elif t == float or t == int:
return min( [self.get(t) for t in xrange(start, end)] )
else: raise TypeError("'getType()' returns '%s', not tuple,float,and int." % str(t))
def getCandlestick(start, end):
"""
指定されたtの範囲での始値、高値、安値、終値から成るタプルを返します.
"""
s = self.get(start)
h = self.max(start, end)
l = self.min(start, end)
e = self.get(end-1)
return (s,h,l,e)
def lock(self, t):
"""
指定されたt以降の情報にアクセスすることができなくなるように、
オブジェクトをロックします.
"""
if t >= 0:
self.isLocked = True
self.lock_time = t
elif t == -1 : self.isLocked = False
else: raise IndexError("list index out of range.")
def _check(self, t):
"""
オブジェクトが実際にロックされているかどうかを確認します.
また、t以降の情報にアクセスしようと試みている場合には例外を送出します.
"""
if not self.isLocked: return
if not (type(t) == int or type(t) == int32):
raise TypeError("the variable 't' is not valid.")
elif self.lock_time < t:
raise TypeError("this object is locked(t=%i)." % t)
elif (t != -1) and (t < 0):
raise IndexError("list index out of range.")
def unlock(self):
"""
オブジェクトをアンロックします.
"""
self.isLocked = False
def plot(self, start, end, legend=True, **kwargs):
"""
指定された範囲でプロットを行います.
start : 開始する値
end : 終了する値
legend : 説明文を挿入するかどうか.デフォルトはTrue
その他の引数を指定した場合、それらの引数はmatplotlibのplot()に引き継がれます.
"""
from pylab import plot, arange
from pylab import legend as leg
from matplotlib.font_manager import FontProperties
x = arange(start, end)
y = []
if self.getType() == tuple:
y = [self.get(t)[0] for t in xrange(start, end)]
else:
y = [self.get(t) for t in xrange(start, end)]
if legend:
plot(x, y, label=self.getDescription() ,**kwargs)
leg(loc=0, prop=FontProperties(size=10))
else:
plot(x, y, **kwargs)
class Assets (Asset):
"""
Assetクラスを束ねる、親クラスです.
"""
@classmethod
def isRealAsset(cls): return True
@classmethod
def getType(cls): return type(float)
def __init__(self):
"""コンストラクタです."""
Asset.__init__(self, self)
self.children = []
self.__index = 0
def get(self, t=-1):
"""各資産の平均値を返します."""
return sum([x.get(t) for x in self.children]) / float( len(self) )
def getDescription(self):
return "Assets(%i)" % len(self.children)
def addAsset(self, asset):
"""子となるAssetインスタンスを追加します."""
if issubclass(asset.__class__, Asset):
self.children.append(asset)
else: raise TypeError("value 'asset' is not valid.")
def lock(self, t):
"""オブジェクトの子をすべてロックします."""
for x in self.children: x.lock(t)
def unlock(self, t):
for x in self.children: x.unlock()
def __getitem__(self, key):
"""オブジェクトの子をすべてアンロックします."""
if (type(key) == int) and (0 <= key < len(self.children)):
return self.children[key]
else: raise IndexError("list index out of range.")
def plot(self, start, end, legend=True, **kwargs):
"""
指定された範囲ですべてのassetに対してプロットを行います.
"""
for child in self.children: child.plot(start, end, legend, **kwargs)
def __call__(self, t=-1): return self.get(t)
def __str__(self): return self.getDescription()
def __iter__(self): return self
def next(self):
if self.__index >= len(self.children):
self.__index = 0
raise StopIteration
result = self.children[self.__index]
self.__index += 1
return result
def __len__(self): return len(self.children)
|
mit
|
joshgabriel/dft-crossfilter
|
CompleteApp/crossfilter_prec_app/old_mains/working_main.py
|
3
|
7787
|
from os.path import dirname, join
import pandas as pd
import numpy as np
from bokeh.layouts import row, widgetbox, column
from bokeh.models import Select, Div, Column, HoverTool, ColumnDataSource
from bokeh.palettes import Spectral5
from bokeh.plotting import curdoc, figure
from bokeh.sampledata.periodic_table import elements
##### loading the data which the API needs to take care fo ######
#from bokeh.sampledata.autompg import autompg
#df = autompg.copy()
#print (df.columns)
SIZES = list(range(6, 22, 3))
COLORS = Spectral5
# data cleanup
#df.cyl = [str(x) for x in df.cyl]
#df.origin = [ORIGINS[x-1] for x in df.origin]
#df['year'] = [str(x) for x in df.yr]
#del df['yr']
#df['mfr'] = [x.split()[0] for x in df.name]
#df.loc[df.mfr=='chevy', 'mfr'] = 'chevrolet'
#df.loc[df.mfr=='chevroelt', 'mfr'] = 'chevrolet'
#df.loc[df.mfr=='maxda', 'mfr'] = 'mazda'
#df.loc[df.mfr=='mercedes-benz', 'mfr'] = 'mercedes'
#df.loc[df.mfr=='toyouta', 'mfr'] = 'toyota'
#df.loc[df.mfr=='vokswagen', 'mfr'] = 'volkswagen'
#df.loc[df.mfr=='vw', 'mfr'] = 'volkswagen'
#del df['name']
df_obs = pd.read_csv('./Data/Data.csv')
# single reference standard this can be an on request
# basis input as well
#df_ref = pd.read_json('./Data/Ref.json')
# dividing data into gneeral discretes, continuous and quantileables
columns = sorted(df_obs.columns) #+ sorted(df.columns)
#print columns
print ([(x, df_obs[x].dtype) for x in columns])
#print ([(x,df[x].dtype) for x in sorted(df.columns)])
discrete = [x for x in columns if df_obs[x].dtype == object] #+ [x for x in columns if df_obs[x].dtype == object]
print (discrete)
continuous = [x for x in columns if x not in discrete]
#print continuous
#quantileable = [x for x in continuous if len(df[x].unique()) > 20]
#print quantileable
####################################################################
##divide data into plottables and non plottables (aggregates or 3D plottables) ,
#keep it to 2D plottables for now, this is known from the column names themselves
plottables = ['k-point', 'value', 'smearing']
non_plottables = [ x for x in columns if x not in plottables ] # for aggregates
_elements = list(np.unique(df_obs['element']))
exchanges = list(np.unique(df_obs['exchange']))
properties = list(np.unique(df_obs['property']))
codes = list(np.unique(df_obs['code']))
# which sets of k-point and value to string together ? any unit transformations on the dataset values or k-point
## have another dataframe (mongo collection) for the reference standards to compute the accuracy (uniquely identified by the element SAME standard should apply to all codes/exchanges/elements.
def create_figure():
# original autpmpg test
xs =df_obs[x.value].values
# print (type(sorted(set(xs))))
# read the data from the df
# xs = df_obs[x.value].values
ys = df_obs[y.value].values
x_title = x.value.title()
y_title = y.value.title()
kw = dict()
# if x.value in continuous:
# kw['x_range'] = sorted(set(xs))
# print (type(kw['x_range']))
# if y.value in continuous:
# kw['y_range'] = sorted(set(ys))
# print (type(kw['y_range']))
kw['title'] = "%s vs %s" % (x_title, y_title)
p = figure(plot_height=600, plot_width=800, tools='pan,box_zoom,reset,hover', **kw)
p.xaxis.axis_label = x_title
p.yaxis.axis_label = y_title
# sets the xaxis
if x.value in continuous:
p.xaxis.major_label_orientation = pd.np.pi / 4
sz = 9
if size.value != 'None':
groups = pd.qcut(df[size.value].values, len(SIZES))
sz = [SIZES[xx] for xx in groups.codes]
c = "#31AADE"
if color.value != 'None':
groups = pd.qcut(df[color.value].values, len(COLORS))
c = [COLORS[xx] for xx in groups.codes]
p.circle(x=xs, y=ys, color=c, size=sz, line_color="white", alpha=1.0, hover_color='blue', hover_alpha=1.0)
return p
def update(attr, old, new):
layout.children[1] = create_figure()
############## Header Content from description.html #################
content_filename = join(dirname(__file__), "description.html")
description = Div(text=open(content_filename).read(),
render_as_text=False, width=600)
romans = ["I", "II", "III", "IV", "V", "VI", "VII"]
elements["atomic mass"] = elements["atomic mass"].astype(str)
elements["period"] = [romans[x-1] for x in elements.period]
elements = elements[elements.group != "-"]
group_range = [str(x) for x in range(1, 19)]
colormap = {
"alkali metal" : "#a6cee3",
"alkaline earth metal" : "#1f78b4",
"halogen" : "#fdbf6f",
"metal" : "#b2df8a",
"metalloid" : "#33a02c",
"noble gas" : "#bbbb88",
"nonmetal" : "#baa2a6",
"transition metal" : "#e08e79",
}
source = ColumnDataSource(
data=dict(
group=[str(x) for x in elements["group"]],
period=[str(y) for y in elements["period"]],
symx=[str(x)+":0.1" for x in elements["group"]],
numbery=[str(x)+":0.8" for x in elements["period"]],
massy=[str(x)+":0.15" for x in elements["period"]],
namey=[str(x)+":0.3" for x in elements["period"]],
sym=elements["symbol"],
name=elements["name"],
cpk=elements["CPK"],
atomic_number=elements["atomic number"],
electronic=elements["electronic configuration"],
mass=elements["atomic mass"],
type=elements["metal"],
type_color=[colormap[x] for x in elements["metal"]],
)
)
ptable = figure(title="Periodic Table", tools="hover,save",
x_range=group_range, y_range=list(reversed(romans)))
ptable.plot_width = 1200
ptable.toolbar_location = None
ptable.outline_line_color = None
ptable.rect("group", "period", 0.9, 0.9, source=source,
fill_alpha=0.6, color="type_color")
text_props = {
"source": source,
"angle": 0,
"color": "black",
"text_align": "left",
"text_baseline": "middle"
}
ptable.text(x="symx", y="period", text="sym",
text_font_style="bold", text_font_size="15pt", **text_props)
ptable.text(x="symx", y="numbery", text="atomic_number",
text_font_size="9pt", **text_props)
ptable.text(x="symx", y="namey", text="name",
text_font_size="6pt", **text_props)
ptable.text(x="symx", y="massy", text="mass",
text_font_size="5pt", **text_props)
ptable.grid.grid_line_color = None
ptable.select_one(HoverTool).tooltips = [
("name", "@name"),
("atomic number", "@atomic_number"),
("type", "@type"),
("atomic mass", "@mass"),
("electronic configuration", "@electronic"),
]
######### CREATES CROSSFILTER ##########################
# decide if all columns or crossfilter down to sub properties
# The crossfilter widgets
# first select code this crossfilters the available options to
# available exchanges and elements
code = Select(title='Code', value='vasp', options=codes)
code.on_change('value', update)
# second select exchange
element = Select(title='Element', value='Cu', options=_elements)
element.on_change('value', update)
exchange = Select()
# The plotter widgets
x = Select(title='X-Axis', value='k-point', options=plottables)
x.on_change('value', update)
y = Select(title='Y-Axis', value='value', options=plottables)
y.on_change('value', update)
z = Select(title='Z-Axis', value='None', options=plottables)
z.on_change('value', update)
size = Select(title='Size', value='None', options=['None'] )
size.on_change('value', update)
color = Select(title='Color', value='None', options=['None'] )
color.on_change('value', update)
controls = widgetbox([x, y, color, size], width=200)
layout = column(description, ptable, controls, create_figure())
curdoc().add_root(layout)
curdoc().title = "DFT Benchmark"
|
mit
|
xuewei4d/scikit-learn
|
asv_benchmarks/benchmarks/metrics.py
|
12
|
1420
|
from sklearn.metrics.pairwise import pairwise_distances
from .common import Benchmark
from .datasets import _random_dataset
class PairwiseDistancesBenchmark(Benchmark):
"""
Benchmarks for pairwise distances.
"""
param_names = ['representation', 'metric', 'n_jobs']
params = (['dense', 'sparse'],
['cosine', 'euclidean', 'manhattan', 'correlation'],
Benchmark.n_jobs_vals)
def setup(self, *params):
representation, metric, n_jobs = params
if representation == 'sparse' and metric == 'correlation':
raise NotImplementedError
if Benchmark.data_size == 'large':
if metric in ('manhattan', 'correlation'):
n_samples = 8000
else:
n_samples = 24000
else:
if metric in ('manhattan', 'correlation'):
n_samples = 4000
else:
n_samples = 12000
data = _random_dataset(n_samples=n_samples,
representation=representation)
self.X, self.X_val, self.y, self.y_val = data
self.pdist_params = {'metric': metric,
'n_jobs': n_jobs}
def time_pairwise_distances(self, *args):
pairwise_distances(self.X, **self.pdist_params)
def peakmem_pairwise_distances(self, *args):
pairwise_distances(self.X, **self.pdist_params)
|
bsd-3-clause
|
marcsans/cnn-physics-perception
|
phy/lib/python2.7/site-packages/matplotlib/sphinxext/tests/tinypages/conf.py
|
14
|
8466
|
# -*- coding: utf-8 -*-
#
# tinypages documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 18 11:58:34 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from os.path import join as pjoin, abspath
import sphinx
from distutils.version import LooseVersion
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, abspath(pjoin('..', '..')))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tinypages'
copyright = u'2014, Matplotlib developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if LooseVersion(sphinx.__version__) >= LooseVersion('1.3'):
html_theme = 'classic'
else:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tinypagesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'tinypages.tex', u'tinypages Documentation',
u'Matplotlib developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tinypages', u'tinypages Documentation',
[u'Matplotlib developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tinypages', u'tinypages Documentation',
u'Matplotlib developers', 'tinypages', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mit
|
GuessWhoSamFoo/pandas
|
pandas/tests/io/parser/test_header.py
|
2
|
14241
|
# -*- coding: utf-8 -*-
"""
Tests that the file header is properly handled or inferred
during parsing for all of the parsers defined in parsers.py
"""
from collections import namedtuple
import numpy as np
import pytest
from pandas.compat import StringIO, u
from pandas.errors import ParserError
from pandas import DataFrame, Index, MultiIndex
import pandas.util.testing as tm
def test_read_with_bad_header(all_parsers):
parser = all_parsers
msg = r"but only \d+ lines in file"
with pytest.raises(ValueError, match=msg):
s = StringIO(",,")
parser.read_csv(s, header=[10])
@pytest.mark.parametrize("header", [True, False])
def test_bool_header_arg(all_parsers, header):
# see gh-6114
parser = all_parsers
data = """\
MyColumn
a
b
a
b"""
msg = "Passing a bool to header is invalid"
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), header=header)
def test_no_header_prefix(all_parsers):
parser = all_parsers
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
result = parser.read_csv(StringIO(data), prefix="Field", header=None)
expected = DataFrame([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]],
columns=["Field0", "Field1", "Field2",
"Field3", "Field4"])
tm.assert_frame_equal(result, expected)
def test_header_with_index_col(all_parsers):
parser = all_parsers
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ["A", "B", "C"]
result = parser.read_csv(StringIO(data), names=names)
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_header_not_first_line(all_parsers):
parser = all_parsers
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
result = parser.read_csv(StringIO(data), header=2, index_col=0)
expected = parser.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(result, expected)
def test_header_multi_index(all_parsers):
parser = all_parsers
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
result = parser.read_csv(StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs,msg", [
(dict(index_col=["foo", "bar"]), ("index_col must only contain "
"row numbers when specifying "
"a multi-index header")),
(dict(index_col=[0, 1], names=["foo", "bar"]), ("cannot specify names "
"when specifying a "
"multi-index header")),
(dict(index_col=[0, 1], usecols=["foo", "bar"]), ("cannot specify "
"usecols when "
"specifying a "
"multi-index header")),
])
def test_header_multi_index_invalid(all_parsers, kwargs, msg):
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=[0, 1, 2, 3], **kwargs)
_TestTuple = namedtuple("names", ["first", "second"])
@pytest.mark.parametrize("kwargs", [
dict(header=[0, 1]),
dict(skiprows=3,
names=[("a", "q"), ("a", "r"), ("a", "s"),
("b", "t"), ("c", "u"), ("c", "v")]),
dict(skiprows=3,
names=[_TestTuple("a", "q"), _TestTuple("a", "r"),
_TestTuple("a", "s"), _TestTuple("b", "t"),
_TestTuple("c", "u"), _TestTuple("c", "v")])
])
def test_header_multi_index_common_format1(all_parsers, kwargs):
parser = all_parsers
expected = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=["one", "two"],
columns=MultiIndex.from_tuples(
[("a", "q"), ("a", "r"), ("a", "s"),
("b", "t"), ("c", "u"), ("c", "v")]))
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), index_col=0, **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [
dict(header=[0, 1]),
dict(skiprows=2,
names=[("a", "q"), ("a", "r"), ("a", "s"),
("b", "t"), ("c", "u"), ("c", "v")]),
dict(skiprows=2,
names=[_TestTuple("a", "q"), _TestTuple("a", "r"),
_TestTuple("a", "s"), _TestTuple("b", "t"),
_TestTuple("c", "u"), _TestTuple("c", "v")])
])
def test_header_multi_index_common_format2(all_parsers, kwargs):
parser = all_parsers
expected = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=["one", "two"],
columns=MultiIndex.from_tuples(
[("a", "q"), ("a", "r"), ("a", "s"),
("b", "t"), ("c", "u"), ("c", "v")]))
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), index_col=0, **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [
dict(header=[0, 1]),
dict(skiprows=2,
names=[("a", "q"), ("a", "r"), ("a", "s"),
("b", "t"), ("c", "u"), ("c", "v")]),
dict(skiprows=2,
names=[_TestTuple("a", "q"), _TestTuple("a", "r"),
_TestTuple("a", "s"), _TestTuple("b", "t"),
_TestTuple("c", "u"), _TestTuple("c", "v")])
])
def test_header_multi_index_common_format3(all_parsers, kwargs):
parser = all_parsers
expected = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=["one", "two"],
columns=MultiIndex.from_tuples(
[("a", "q"), ("a", "r"), ("a", "s"),
("b", "t"), ("c", "u"), ("c", "v")]))
expected = expected.reset_index(drop=True)
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), index_col=None, **kwargs)
tm.assert_frame_equal(result, expected)
def test_header_multi_index_common_format_malformed1(all_parsers):
parser = all_parsers
expected = DataFrame(np.array(
[[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype="int64"),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u("a"), u("b"), u("c")],
[u("r"), u("s"), u("t"),
u("u"), u("v")]],
codes=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=[u("a"), u("q")]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
def test_header_multi_index_common_format_malformed2(all_parsers):
parser = all_parsers
expected = DataFrame(np.array(
[[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype="int64"),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u("a"), u("b"), u("c")],
[u("r"), u("s"), u("t"),
u("u"), u("v")]],
codes=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=[None, u("q")]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
def test_header_multi_index_common_format_malformed3(all_parsers):
parser = all_parsers
expected = DataFrame(np.array(
[[3, 4, 5, 6], [9, 10, 11, 12]], dtype="int64"),
index=MultiIndex(levels=[[1, 7], [2, 8]],
codes=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u("a"), u("b"), u("c")],
[u("s"), u("t"), u("u"), u("v")]],
codes=[[0, 1, 2, 2], [0, 1, 2, 3]],
names=[None, u("q")]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("data,header", [
("1,2,3\n4,5,6", None),
("foo,bar,baz\n1,2,3\n4,5,6", 0),
])
def test_header_names_backward_compat(all_parsers, data, header):
# see gh-2539
parser = all_parsers
expected = parser.read_csv(StringIO("1,2,3\n4,5,6"),
names=["a", "b", "c"])
result = parser.read_csv(StringIO(data), names=["a", "b", "c"],
header=header)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [
dict(), dict(index_col=False)
])
def test_read_only_header_no_rows(all_parsers, kwargs):
# See gh-7773
parser = all_parsers
expected = DataFrame(columns=["a", "b", "c"])
result = parser.read_csv(StringIO("a,b,c"), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs,names", [
(dict(), [0, 1, 2, 3, 4]),
(dict(prefix="X"), ["X0", "X1", "X2", "X3", "X4"]),
(dict(names=["foo", "bar", "baz", "quux", "panda"]),
["foo", "bar", "baz", "quux", "panda"])
])
def test_no_header(all_parsers, kwargs, names):
parser = all_parsers
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = DataFrame([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], columns=names)
result = parser.read_csv(StringIO(data), header=None, **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("header", [
["a", "b"],
"string_header"
])
def test_non_int_header(all_parsers, header):
# see gh-16338
msg = "header must be integer or list of integers"
data = """1,2\n3,4"""
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=header)
def test_singleton_header(all_parsers):
# see gh-7757
data = """a,b,c\n0,1,2\n1,2,3"""
parser = all_parsers
expected = DataFrame({"a": [0, 1], "b": [1, 2], "c": [2, 3]})
result = parser.read_csv(StringIO(data), header=[0])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,expected", [
("A,A,A,B\none,one,one,two\n0,40,34,0.1",
DataFrame([[0, 40, 34, 0.1]],
columns=MultiIndex.from_tuples(
[("A", "one"), ("A", "one.1"),
("A", "one.2"), ("B", "two")]))),
("A,A,A,B\none,one,one.1,two\n0,40,34,0.1",
DataFrame([[0, 40, 34, 0.1]],
columns=MultiIndex.from_tuples(
[("A", "one"), ("A", "one.1"),
("A", "one.1.1"), ("B", "two")]))),
("A,A,A,B,B\none,one,one.1,two,two\n0,40,34,0.1,0.1",
DataFrame([[0, 40, 34, 0.1, 0.1]],
columns=MultiIndex.from_tuples(
[("A", "one"), ("A", "one.1"),
("A", "one.1.1"), ("B", "two"),
("B", "two.1")])))
])
def test_mangles_multi_index(all_parsers, data, expected):
# see gh-18062
parser = all_parsers
result = parser.read_csv(StringIO(data), header=[0, 1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [None, [0]])
@pytest.mark.parametrize("columns", [None,
(["", "Unnamed"]),
(["Unnamed", ""]),
(["Unnamed", "NotUnnamed"])])
def test_multi_index_unnamed(all_parsers, index_col, columns):
# see gh-23687
#
# When specifying a multi-index header, make sure that
# we don't error just because one of the rows in our header
# has ALL column names containing the string "Unnamed". The
# correct condition to check is whether the row contains
# ALL columns that did not have names (and instead were given
# placeholder ones).
parser = all_parsers
header = [0, 1]
if index_col is None:
data = ",".join(columns or ["", ""]) + "\n0,1\n2,3\n4,5\n"
else:
data = (",".join([""] + (columns or ["", ""])) +
"\n,0,1\n0,2,3\n1,4,5\n")
if columns is None:
msg = (r"Passed header=\[0,1\] are too "
r"many rows for this multi_index of columns")
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=header,
index_col=index_col)
else:
result = parser.read_csv(StringIO(data), header=header,
index_col=index_col)
template = "Unnamed: {i}_level_0"
exp_columns = []
for i, col in enumerate(columns):
if not col: # Unnamed.
col = template.format(i=i if index_col is None else i + 1)
exp_columns.append(col)
columns = MultiIndex.from_tuples(zip(exp_columns, ["0", "1"]))
expected = DataFrame([[2, 3], [4, 5]], columns=columns)
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
mrgloom/DL4H
|
lucid.py
|
3
|
1135
|
import numpy
import cPickle
from dnn import add_fit_and_score, DropoutNet, RegularizedNet, train_models
if __name__ == "__main__":
add_fit_and_score(DropoutNet)
add_fit_and_score(RegularizedNet)
from sklearn.preprocessing import LabelEncoder
import joblib
((X_train, y_train), (X_dev, y_dev), (X_test, y_test), lb) = joblib.load(
"LUCID_words.joblib")
nwords = len(lb.classes_)
print "building the model..."
train_models(X_train, y_train, X_test, y_test, X_train.shape[1],
nwords, x_dev=X_dev, y_dev=y_dev,
numpy_rng=numpy.random.RandomState(123),
svms=False, nb=False, deepnn=True, use_dropout=False, n_epochs=1000,
verbose=True, plot=True, name='_lucid_words_dnn_ReLUs_L2')
train_models(X_train, y_train, X_test, y_test, X_train.shape[1],
nwords, x_dev=X_dev, y_dev=y_dev,
numpy_rng=numpy.random.RandomState(123),
svms=False, nb=False, deepnn=True, use_dropout=True, n_epochs=1000,
verbose=True, plot=True, name='_lucid_words_dnn_dropout_ReLUs')
|
mit
|
mbayon/TFG-MachineLearning
|
vbig/lib/python2.7/site-packages/sklearn/tests/test_naive_bayes.py
|
11
|
21805
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1., 2.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683,
0.174696337838317]]), 8)
assert_array_equal(clf.class_prior_, np.array([0.3, 0.7]))
def test_gnb_wrong_nb_priors():
""" Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([.25, .25, .25, .25]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2., 1.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert_equal(clf.predict([[-0.1, -0.1]]), np.array([2]))
def test_check_update_with_no_data():
""" Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.
var = 1.
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean,
var, x_empty)
assert_equal(tmean, mean)
assert_equal(tvar, var)
def test_gnb_pfit_wrong_nb_features():
"""Test whether an error is raised when the number of feature changes
between two partial fit"""
clf = GaussianNB()
# Fit for the first time the GNB
clf.fit(X, y)
# Partial fit a second time with an incoherent X
assert_raises(ValueError, clf.partial_fit, np.hstack((X, X)), y)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_almost_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
def test_alpha():
# Setting alpha=0 should not output nan results when p(x_i|y_j)=0 is a case
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2./3, 1./3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test sparse X
X = scipy.sparse.csr_matrix(X)
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2./3, 1./3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test for alpha < 0
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
expected_msg = ('Smoothing parameter alpha = -1.0e-01. '
'alpha should be > 0.')
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.fit, X, y)
assert_raise_message(ValueError, expected_msg, m_nb.fit, X, y)
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.partial_fit,
X, y, classes=[0, 1])
assert_raise_message(ValueError, expected_msg, m_nb.partial_fit,
X, y, classes=[0, 1])
|
mit
|
blond-admin/BLonD
|
blond/input_parameters/ring_options.py
|
2
|
22443
|
# coding: utf8
# Copyright 2014-2017 CERN. This software is distributed under the
# terms of the GNU General Public License version 3 (GPL Version 3),
# copied verbatim in the file LICENSE.md.
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
**Function(s) for pre-processing input data**
:Authors: **Helga Timko**, **Alexandre Lasheen**, **Danilo Quartullo**,
**Simon Albright**
'''
from __future__ import division
from builtins import str, range
import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import c
from scipy.interpolate import splrep, splev
from ..plots.plot import fig_folder
class RingOptions(object):
r""" Class to preprocess the synchronous data for Ring, interpolating it to
every turn.
Parameters
----------
interpolation : str
Interpolation options for the data points. Available options are
'linear' (default), 'cubic', and 'derivative'
smoothing : float
Smoothing value for 'cubic' interpolation
flat_bottom : int
Number of turns to be added on flat bottom; default is 0. Constant
extrapolation is used for the synchronous data
flat_top : int
Number of turns to be added on flat top; default is 0. Constant
extrapolation is used for the synchronous data
t_start : int
Starting index from which the time array input should be taken into
account; default is 0
t_end : int
Last index up to which the time array input should be taken into
account; default is -1
plot : bool
Option to plot interpolated arrays; default is False
figdir : str
Directory to save optional plot; default is 'fig'
figname : str
Figure name to save optional plot; default is 'preprocess_ramp'
sampling : int
Decimation value for plotting; default is 1
"""
def __init__(self, interpolation='linear', smoothing=0, flat_bottom=0,
flat_top=0, t_start=None, t_end=None, plot=False,
figdir='fig', figname='preprocess_ramp', sampling=1):
if interpolation in ['linear', 'cubic', 'derivative']:
self.interpolation = str(interpolation)
else:
#InputDataError
raise RuntimeError("ERROR: Interpolation scheme in " +
"PreprocessRamp not recognised. Aborting...")
self.smoothing = float(smoothing)
if flat_bottom < 0:
#MomentumError
raise RuntimeError("ERROR: flat_bottom value in PreprocessRamp" +
" not recognised. Aborting...")
else:
self.flat_bottom = int(flat_bottom)
if flat_top < 0:
#MomentumError
raise RuntimeError("ERROR: flat_top value in PreprocessRamp" +
" not recognised. Aborting...")
else:
self.flat_top = int(flat_top)
self.t_start = t_start
self.t_end = t_end
if (plot is True) or (plot is False):
self.plot = bool(plot)
else:
#TypeError
raise RuntimeError("ERROR: plot value in PreprocessRamp" +
" not recognised. Aborting...")
self.figdir = str(figdir)
self.figname = str(figname)
if sampling > 0:
self.sampling = int(sampling)
else:
#TypeError
raise RuntimeError("ERROR: sampling value in PreprocessRamp" +
" not recognised. Aborting...")
def reshape_data(self, input_data, n_turns, n_sections,
interp_time='t_rev', input_to_momentum=False,
synchronous_data_type='momentum', mass=None, charge=None,
circumference=None, bending_radius=None):
r"""Checks whether the user input is consistent with the expectation
for the Ring object. The possibilites are detailed in the documentation
of the Ring object.
Parameters
----------
input_data : Ring.synchronous_data, Ring.alpha_0,1,2
Main input data to reshape
n_turns : Ring.n_turns
Number of turns the simulation should be. Note that if
the input_data is passed as a tuple it is expected that the
input_data is a program. Hence, the number of turns may not
correspond to the input one and will be overwritten
n_sections : Ring.n_sections
The number of sections of the ring. The simulation is stopped
if the input_data shape does not correspond to the expected number
of sections.
interp_time : str or float or float array [n_turns+1]
Optional : defines the time on which the program will be
interpolated. If 't_rev' is passed and if the input_data is
momentum (see input_to_momentum option) the momentum program
is interpolated on the revolution period (see preprocess()
function). If a float or a float array is passed, the program
is interpolated on that input ; default is 't_rev'
input_to_momentum : bool
Optional : flags if the input_data is the momentum program, the
options defined below become necessary for conversion
synchronous_data_type : str
Optional : to be passed to the convert_data function if
input_to_momentum ; default is 'momentum'
mass : Ring.Particle.mass
Optional : the mass of the particles in [eV/c**2] ; default is None
charge : Ring.Particle.charge
Optional : the charge of the particles in units of [e] ;
default is None
circumference : Ring.circumference
Optional : the circumference of the ring ; default is None
bending_radius : Ring.bending_radis
Optional : the bending radius of magnets ; default is None
Returns
-------
output_data
Returns the data with the adequate shape for the Ring object
"""
# TO BE IMPLEMENTED: if you pass a filename the function reads the file
# and reshape the data
if isinstance(input_data, str):
pass
# If single float, expands the value to match the input number of turns
# and sections
if isinstance(input_data, float) or isinstance(input_data, int):
input_data = float(input_data)
if input_to_momentum:
input_data = convert_data(input_data, mass, charge,
synchronous_data_type,
bending_radius)
output_data = input_data * np.ones((n_sections, n_turns+1))
# If tuple, separate time and synchronous data and check data
elif isinstance(input_data, tuple):
output_data = []
# If there is only one section, it is expected that the user passes
# a tuple with (time, data). However, the user can also pass a
# tuple which size is the number of section as ((time, data), ).
# and this if condition takes this into account
if (n_sections == 1) and (len(input_data) > 1):
input_data = (input_data, )
if len(input_data) != n_sections:
#InputDataError
raise RuntimeError("ERROR in Ring: the input data " +
"does not match the number of sections")
# Loops over all the sections to interpolate the programs, appends
# the results on the output_data list which is afterwards
# converted to a numpy.array
for index_section in range(n_sections):
input_data_time = input_data[index_section][0]
input_data_values = input_data[index_section][1]
if input_to_momentum:
input_data_values = convert_data(input_data_values, mass,
charge,
synchronous_data_type,
bending_radius)
if len(input_data_time) \
!= len(input_data_values):
#InputDataError
raise RuntimeError("ERROR in Ring: synchronous data " +
"does not match the time data")
if input_to_momentum and (interp_time == 't_rev'):
output_data.append(self.preprocess(
mass,
circumference,
input_data_time,
input_data_values)[1])
elif isinstance(interp_time, float) or \
isinstance(interp_time, int):
interp_time = float(interp_time)
interp_time = np.arange(
input_data_time[0],
input_data_time[-1],
interp_time)
output_data.append(np.interp(
interp_time,
input_data_time,
input_data_values))
elif isinstance(interp_time, np.ndarray):
output_data.append(np.interp(
interp_time,
input_data_time,
input_data_values))
output_data = np.array(output_data, ndmin=2, dtype=float)
# If array/list, compares with the input number of turns and
# if synchronous_data is a single value converts it into a (n_turns+1)
# array
elif isinstance(input_data, np.ndarray) or \
isinstance(input_data, list):
input_data = np.array(input_data, ndmin=2, dtype=float)
if input_to_momentum:
input_data = convert_data(input_data, mass, charge,
synchronous_data_type,
bending_radius)
output_data = np.zeros((n_sections, n_turns+1), dtype=float)
# If the number of points is exactly the same as n_rf, this means
# that the rf program for each harmonic is constant, reshaping
# the array so that the size is [n_sections,1] for successful
# reshaping
if input_data.size == n_sections:
input_data = input_data.reshape((n_sections, 1))
if len(input_data) != n_sections:
#InputDataError
raise RuntimeError("ERROR in Ring: the input data " +
"does not match the number of sections")
for index_section in range(len(input_data)):
if len(input_data[index_section]) == 1:
output_data[index_section] = input_data[index_section] * \
np.ones(n_turns+1)
elif len(input_data[index_section]) == (n_turns+1):
output_data[index_section] = np.array(
input_data[index_section])
else:
#InputDataError
raise RuntimeError("ERROR in Ring: The input data " +
"does not match the proper length " +
"(n_turns+1)")
return output_data
def preprocess(self, mass, circumference, time, momentum):
r"""Function to pre-process acceleration ramp data, interpolating it to
every turn. Currently it works only if the number of RF sections is
equal to one, to be extended for multiple RF sections.
Parameters
----------
mass : float
Particle mass [eV]
circumference : float
Ring circumference [m]
time : float array
Time points [s] corresponding to momentum data
momentum : float array
Particle momentum [eV/c]
Returns
-------
float array
Cumulative time [s]
float array
Interpolated momentum [eV/c]
"""
# Some checks on the options
if ((self.t_start is not None) and (self.t_start < time[0])) or \
((self.t_end is not None) and (self.t_end > time[-1])):
#InputDataError
raise RuntimeError("ERROR: [t_start, t_end] should be " +
"included in the passed time array.")
# Obtain flat bottom data, extrapolate to constant
beta_0 = np.sqrt(1/(1 + (mass/momentum[0])**2))
T0 = circumference/(beta_0*c) # Initial revolution period [s]
shift = time[0] - self.flat_bottom*T0
time_interp = shift + T0*np.arange(0, self.flat_bottom+1)
beta_interp = beta_0*np.ones(self.flat_bottom+1)
momentum_interp = momentum[0]*np.ones(self.flat_bottom+1)
time_interp = time_interp.tolist()
beta_interp = beta_interp.tolist()
momentum_interp = momentum_interp.tolist()
time_start_ramp = np.max(time[momentum == momentum[0]])
time_end_ramp = np.min(time[momentum == momentum[-1]])
# Interpolate data recursively
if self.interpolation == 'linear':
time_interp.append(time_interp[-1]
+ circumference/(beta_interp[0]*c))
i = self.flat_bottom
for k in range(1, len(time)):
while time_interp[i+1] <= time[k]:
momentum_interp.append(
momentum[k-1] + (momentum[k] - momentum[k-1]) *
(time_interp[i+1] - time[k-1]) /
(time[k] - time[k-1]))
beta_interp.append(
np.sqrt(1/(1 + (mass/momentum_interp[i+1])**2)))
time_interp.append(
time_interp[i+1] + circumference/(beta_interp[i+1]*c))
i += 1
elif self.interpolation == 'cubic':
interp_funtion_momentum = splrep(
time[(time >= time_start_ramp) * (time <= time_end_ramp)],
momentum[(time >= time_start_ramp) * (time <= time_end_ramp)],
s=self.smoothing)
i = self.flat_bottom
time_interp.append(
time_interp[-1] + circumference / (beta_interp[0]*c))
while time_interp[i] <= time[-1]:
if (time_interp[i+1] < time_start_ramp):
momentum_interp.append(momentum[0])
beta_interp.append(
np.sqrt(1/(1 + (mass/momentum_interp[i+1])**2)))
time_interp.append(
time_interp[i+1] + circumference/(beta_interp[i+1]*c))
elif (time_interp[i+1] > time_end_ramp):
momentum_interp.append(momentum[-1])
beta_interp.append(
np.sqrt(1/(1 + (mass/momentum_interp[i+1])**2)))
time_interp.append(
time_interp[i+1] + circumference/(beta_interp[i+1]*c))
else:
momentum_interp.append(
splev(time_interp[i+1], interp_funtion_momentum))
beta_interp.append(
np.sqrt(1/(1 + (mass/momentum_interp[i+1])**2)))
time_interp.append(
time_interp[i+1] + circumference/(beta_interp[i+1]*c))
i += 1
# Interpolate momentum in 1st derivative to maintain smooth B-dot
elif self.interpolation == 'derivative':
momentum_initial = momentum_interp[0]
momentum_derivative = np.gradient(momentum)/np.gradient(time)
momentum_derivative_interp = [0]*self.flat_bottom + \
[momentum_derivative[0]]
integral_point = momentum_initial
i = self.flat_bottom
time_interp.append(
time_interp[-1] + circumference/(beta_interp[0]*c))
while time_interp[i] <= time[-1]:
derivative_point = np.interp(time_interp[i+1], time,
momentum_derivative)
momentum_derivative_interp.append(derivative_point)
integral_point += (time_interp[i+1] - time_interp[i]) \
* derivative_point
momentum_interp.append(integral_point)
beta_interp.append(
np.sqrt(1/(1 + (mass/momentum_interp[i+1])**2)))
time_interp.append(
time_interp[i+1] + circumference/(beta_interp[i+1]*c))
i += 1
# Adjust result to get flat top energy correct as derivation and
# integration leads to ~10^-8 error in flat top momentum
momentum_interp = np.asarray(momentum_interp)
momentum_interp -= momentum_interp[0]
momentum_interp /= momentum_interp[-1]
momentum_interp *= momentum[-1] - momentum[0]
momentum_interp += momentum[0]
time_interp.pop()
time_interp = np.asarray(time_interp)
beta_interp = np.asarray(beta_interp)
momentum_interp = np.asarray(momentum_interp)
# Obtain flat top data, extrapolate to constant
if self.flat_top > 0:
time_interp = np.append(
time_interp,
time_interp[-1] + circumference*np.arange(1, self.flat_top+1)
/ (beta_interp[-1]*c))
beta_interp = np.append(
beta_interp, beta_interp[-1]*np.ones(self.flat_top))
momentum_interp = np.append(
momentum_interp,
momentum_interp[-1]*np.ones(self.flat_top))
# Cutting the input momentum on the desired cycle time
if self.t_start is not None:
initial_index = np.min(np.where(time_interp >= self.t_start)[0])
else:
initial_index = 0
if self.t_end is not None:
final_index = np.max(np.where(time_interp <= self.t_end)[0])+1
else:
final_index = len(time_interp)
time_interp = time_interp[initial_index:final_index]
momentum_interp = momentum_interp[initial_index:final_index]
if self.plot:
# Directory where longitudinal_plots will be stored
fig_folder(self.figdir)
# Plot
plt.figure(1, figsize=(8, 6))
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
ax.plot(time_interp[::self.sampling],
momentum_interp[::self.sampling],
label='Interpolated momentum')
ax.plot(time, momentum, '.', label='input momentum', color='r',
markersize=0.5)
ax.set_xlabel("Time [s]")
ax.set_ylabel("p [eV]")
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
ax.legend = plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
# Save figure
fign = self.figdir + '/preprocess_' + self.figname
plt.savefig(fign)
plt.clf()
return time_interp, momentum_interp
def convert_data(synchronous_data, mass, charge,
synchronous_data_type='momentum', bending_radius=None):
""" Function to convert synchronous data (i.e. energy program of the
synchrotron) into momentum.
Parameters
----------
synchronous_data : float array
The synchronous data to be converted to momentum
mass : float or Particle.mass
The mass of the particles in [eV/c**2]
charge : int or Particle.charge
The charge of the particles in units of [e]
synchronous_data_type : str
Type of input for the synchronous data ; can be 'momentum',
'total energy', 'kinetic energy' or 'bending field' (last case
requires bending_radius to be defined)
bending_radius : float
Bending radius in [m] in case synchronous_data_type is
'bending field'
Returns
-------
momentum : float array
The input synchronous_data converted into momentum [eV/c]
"""
if synchronous_data_type == 'momentum':
momentum = synchronous_data
elif synchronous_data_type == 'total energy':
momentum = np.sqrt(synchronous_data**2 - mass**2)
elif synchronous_data_type == 'kinetic energy':
momentum = np.sqrt((synchronous_data+mass)**2 - mass**2)
elif synchronous_data_type == 'bending field':
if bending_radius is None:
#InputDataError
raise RuntimeError("ERROR in Ring: bending_radius is not " +
"defined and is required to compute " +
"momentum")
momentum = synchronous_data*bending_radius*charge*c
else:
#InputDataError
raise RuntimeError("ERROR in Ring: Synchronous data" +
" type not recognized!")
return momentum
def load_data(filename, ignore=0, delimiter=None):
r"""Helper function to load column-by-column data from a txt file to numpy
arrays.
Parameters
----------
filename : str
Name of the file containing the data.
ignore : int
Number of lines to ignore from the head of the file.
delimiter : str
Delimiting character between columns.
Returns
-------
list of arrays
Input data, column by column.
"""
data = np.loadtxt(str(filename), skiprows=int(ignore),
delimiter=str(delimiter))
return [np.ascontiguousarray(data[:, i]) for i in range(len(data[0]))]
|
gpl-3.0
|
yonglehou/scikit-learn
|
examples/cluster/plot_kmeans_stability_low_dim_dense.py
|
338
|
4324
|
"""
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
|
bsd-3-clause
|
xiaoxiamii/scikit-learn
|
sklearn/feature_selection/variance_threshold.py
|
238
|
2594
|
# Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
|
bsd-3-clause
|
shahankhatch/scikit-learn
|
examples/linear_model/plot_logistic_l1_l2_sparsity.py
|
384
|
2601
|
"""
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
|
bsd-3-clause
|
zuku1985/scikit-learn
|
sklearn/linear_model/tests/test_ransac.py
|
17
|
20395
|
from scipy import sparse
import numpy as np
from scipy import sparse
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
y = rng.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 5)
assert_equal(ransac_estimator.n_skips_invalid_data_, 0)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_no_valid_data():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 5)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_no_valid_model():
def is_model_valid(estimator, X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_model_valid=is_model_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 0)
assert_equal(ransac_estimator.n_skips_invalid_model_, 5)
def test_ransac_exceed_max_skips():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5,
max_skips=3)
msg = ("RANSAC skipped more iterations than `max_skips`")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 4)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_warn_exceed_max_skips():
global cause_skip
cause_skip = False
def is_data_valid(X, y):
global cause_skip
if not cause_skip:
cause_skip = True
return True
else:
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_skips=3,
max_trials=5)
assert_warns(UserWarning, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 4)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# XXX: Remove in 0.20
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = Lasso()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
|
bsd-3-clause
|
hugobowne/scikit-learn
|
examples/cluster/plot_digits_linkage.py
|
369
|
2959
|
"""
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
|
bsd-3-clause
|
billy-inn/scikit-learn
|
sklearn/linear_model/omp.py
|
127
|
30417
|
"""Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
|
bsd-3-clause
|
vorwerkc/pymatgen
|
pymatgen/analysis/diffraction/tem.py
|
3
|
27128
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
# Credit to Dr. Shyue Ping Ong for the template of the calculator
"""
This module implements a TEM pattern calculator.
"""
import json
import os
from collections import namedtuple
from fractions import Fraction
from functools import lru_cache
from typing import Dict, List, Tuple, cast, Union
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import scipy.constants as sc
from pymatgen.analysis.diffraction.core import AbstractDiffractionPatternCalculator
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.string import latexify_spacegroup, unicodeify_spacegroup
with open(os.path.join(os.path.dirname(__file__), "atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
__author__ = "Frank Wan, Jason Liang"
__copyright__ = "Copyright 2020, The Materials Project"
__version__ = "0.22"
__maintainer__ = "Jason Liang"
__email__ = "[email protected], [email protected]"
__date__ = "03/31/2020"
class TEMCalculator(AbstractDiffractionPatternCalculator):
"""
Computes the TEM pattern of a crystal structure for multiple Laue zones.
Code partially inspired from XRD calculation implementation. X-ray factor to electron factor
conversion based on the International Table of Crystallography.
#TODO: Could add "number of iterations", "magnification", "critical value of beam",
"twin direction" for certain materials, "sample thickness", and "excitation error s"
"""
def __init__(
self,
symprec: float = None,
voltage: float = 200,
beam_direction: Tuple[int, int, int] = (0, 0, 1),
camera_length: int = 160,
debye_waller_factors: Dict[str, float] = None,
cs: float = 1,
) -> None:
"""
Args:
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
voltage (float): The wavelength is a function of the TEM microscope's
voltage. By default, set to 200 kV. Units in kV.
beam_direction (tuple): The direction of the electron beam fired onto the sample.
By default, set to [0,0,1], which corresponds to the normal direction
of the sample plane.
camera_length (int): The distance from the sample to the projected diffraction pattern.
By default, set to 160 cm. Units in cm.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
cs (float): the chromatic aberration coefficient. set by default to 1 mm.
"""
self.symprec = symprec
self.voltage = voltage
self.beam_direction = beam_direction
self.camera_length = camera_length
self.debye_waller_factors = debye_waller_factors or {}
self.cs = cs
@lru_cache(1)
def wavelength_rel(self) -> float:
"""
Calculates the wavelength of the electron beam with relativistic kinematic effects taken
into account.
Args:
none
Returns:
Relativistic Wavelength (in angstroms)
"""
wavelength_rel = (
sc.h
/ np.sqrt(
2 * sc.m_e * sc.e * 1000 * self.voltage * (1 + (sc.e * 1000 * self.voltage) / (2 * sc.m_e * sc.c ** 2))
)
* (10 ** 10)
)
return wavelength_rel
@staticmethod
def generate_points(coord_left: int = -10, coord_right: int = 10) -> np.ndarray:
"""
Generates a bunch of 3D points that span a cube.
Args:
coord_left (int): The minimum coordinate value.
coord_right (int): The maximum coordinate value.
Returns:
Numpy 2d array
"""
points = [0, 0, 0]
coord_values = np.arange(coord_left, coord_right + 1)
points[0], points[1], points[2] = np.meshgrid(coord_values, coord_values, coord_values)
points_matrix = (np.ravel(points[i]) for i in range(0, 3))
result = np.vstack(list(points_matrix)).transpose()
return result
def zone_axis_filter(
self, points: Union[List[Tuple[int, int, int]], np.ndarray], laue_zone: int = 0
) -> Union[List[Tuple[int, int, int]]]:
"""
Filters out all points that exist within the specified Laue zone according to the zone axis rule.
Args:
points (np.ndarray): The list of points to be filtered.
laue_zone (int): The desired Laue zone.
Returns:
list of 3-tuples
"""
if any(isinstance(n, tuple) for n in points):
return list(points)
if len(points) == 0:
return []
filtered = np.where(np.dot(np.array(self.beam_direction), np.transpose(points)) == laue_zone)
result = points[filtered]
result_tuples = cast(List[Tuple[int, int, int]], [tuple(x) for x in result.tolist()])
return result_tuples
def get_interplanar_spacings(
self, structure: Structure, points: Union[List[Tuple[int, int, int]], np.ndarray]
) -> Dict[Tuple[int, int, int], float]:
"""
Args:
structure (Structure): the input structure.
points (tuple): the desired hkl indices.
Returns:
Dict of hkl to its interplanar spacing, in angstroms (float).
"""
points_filtered = self.zone_axis_filter(points)
if (0, 0, 0) in points_filtered:
points_filtered.remove((0, 0, 0))
interplanar_spacings_val = np.array(list(map(lambda x: structure.lattice.d_hkl(x), points_filtered)))
interplanar_spacings = dict(zip(points_filtered, interplanar_spacings_val))
return interplanar_spacings
def bragg_angles(
self, interplanar_spacings: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], float]:
"""
Gets the Bragg angles for every hkl point passed in (where n = 1).
Args:
interplanar_spacings (dict): dictionary of hkl to interplanar spacing
Returns:
dict of hkl plane (3-tuple) to Bragg angle in radians (float)
"""
plane = list(interplanar_spacings.keys())
interplanar_spacings_val = np.array(list(interplanar_spacings.values()))
bragg_angles_val = np.arcsin(self.wavelength_rel() / (2 * interplanar_spacings_val))
bragg_angles = dict(zip(plane, bragg_angles_val))
return bragg_angles
def get_s2(self, bragg_angles: Dict[Tuple[int, int, int], float]) -> Dict[Tuple[int, int, int], float]:
"""
Calculates the s squared parameter (= square of sin theta over lambda) for each hkl plane.
Args:
bragg_angles (Dict): The bragg angles for each hkl plane.
Returns:
Dict of hkl plane to s2 parameter, calculates the s squared parameter
(= square of sin theta over lambda).
"""
plane = list(bragg_angles.keys())
bragg_angles_val = np.array(list(bragg_angles.values()))
s2_val = (np.sin(bragg_angles_val) / self.wavelength_rel()) ** 2
s2 = dict(zip(plane, s2_val))
return s2
def x_ray_factors(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[str, Dict[Tuple[int, int, int], float]]:
"""
Calculates x-ray factors, which are required to calculate atomic scattering factors. Method partially inspired
by the equivalent process in the xrd module.
Args:
structure (Structure): The input structure.
bragg_angles (Dict): Dictionary of hkl plane to Bragg angle.
Returns:
dict of atomic symbol to another dict of hkl plane to x-ray factor (in angstroms).
"""
x_ray_factors = {}
s2 = self.get_s2(bragg_angles)
atoms = structure.composition.elements
scattering_factors_for_atom = {}
for atom in atoms:
coeffs = np.array(ATOMIC_SCATTERING_PARAMS[atom.symbol])
for plane in bragg_angles:
scattering_factor_curr = atom.Z - 41.78214 * s2[plane] * np.sum(
coeffs[:, 0] * np.exp(-coeffs[:, 1] * s2[plane]), axis=None
)
scattering_factors_for_atom[plane] = scattering_factor_curr
x_ray_factors[atom.symbol] = scattering_factors_for_atom
scattering_factors_for_atom = {}
return x_ray_factors
def electron_scattering_factors(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[str, Dict[Tuple[int, int, int], float]]:
"""
Calculates atomic scattering factors for electrons using the Mott-Bethe formula (1st order Born approximation).
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict from atomic symbol to another dict of hkl plane to factor (in angstroms)
"""
electron_scattering_factors = {}
x_ray_factors = self.x_ray_factors(structure, bragg_angles)
s2 = self.get_s2(bragg_angles)
atoms = structure.composition.elements
prefactor = 0.023934
scattering_factors_for_atom = {}
for atom in atoms:
for plane in bragg_angles:
scattering_factor_curr = prefactor * (atom.Z - x_ray_factors[atom.symbol][plane]) / s2[plane]
scattering_factors_for_atom[plane] = scattering_factor_curr
electron_scattering_factors[atom.symbol] = scattering_factors_for_atom
scattering_factors_for_atom = {}
return electron_scattering_factors
def cell_scattering_factors(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], int]:
"""
Calculates the scattering factor for the whole cell.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane (3-tuple) to scattering factor (in angstroms).
"""
cell_scattering_factors = {}
electron_scattering_factors = self.electron_scattering_factors(structure, bragg_angles)
scattering_factor_curr = 0
for plane in bragg_angles:
for site in structure:
for sp, occu in site.species.items():
g_dot_r = np.dot(np.array(plane), np.transpose(site.frac_coords))
scattering_factor_curr += electron_scattering_factors[sp.symbol][plane] * np.exp(
2j * np.pi * g_dot_r
)
cell_scattering_factors[plane] = scattering_factor_curr
scattering_factor_curr = 0
return cell_scattering_factors
def cell_intensity(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], float]:
"""
Calculates cell intensity for each hkl plane. For simplicity's sake, take I = |F|**2.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane to cell intensity
"""
csf = self.cell_scattering_factors(structure, bragg_angles)
plane = bragg_angles.keys()
csf_val = np.array(list(csf.values()))
cell_intensity_val = (csf_val * csf_val.conjugate()).real
cell_intensity = dict(zip(plane, cell_intensity_val))
return cell_intensity
def get_pattern(
self,
structure: Structure,
scaled: bool = None,
two_theta_range: Tuple[float, float] = None,
) -> pd.DataFrame:
"""
Returns all relevant TEM DP info in a pandas dataframe.
Args:
structure (Structure): The input structure.
scaled (boolean): Required value for inheritance, does nothing in TEM pattern
two_theta_range (Tuple): Required value for inheritance, does nothing in TEM pattern
Returns:
PandasDataFrame
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
field_names = [
"Position",
"(hkl)",
"Intensity (norm)",
"Film radius",
"Interplanar Spacing",
]
rows_list = []
for dot in tem_dots:
dict1 = {
"Pos": dot.position,
"(hkl)": dot.hkl,
"Intnsty (norm)": dot.intensity,
"Film rad": dot.film_radius,
"Interplanar Spacing": dot.d_spacing,
}
rows_list.append(dict1)
df = pd.DataFrame(rows_list, columns=field_names)
return df
def normalized_cell_intensity(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], float]:
"""
Normalizes the cell_intensity dict to 1, for use in plotting.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane to normalized cell intensity
"""
normalized_cell_intensity = {}
cell_intensity = self.cell_intensity(structure, bragg_angles)
max_intensity = max(cell_intensity.values())
norm_factor = 1 / max_intensity
for plane in cell_intensity:
normalized_cell_intensity[plane] = cell_intensity[plane] * norm_factor
return normalized_cell_intensity
def is_parallel(
self,
structure: Structure,
plane: Tuple[int, int, int],
other_plane: Tuple[int, int, int],
) -> bool:
"""
Checks if two hkl planes are parallel in reciprocal space.
Args:
structure (Structure): The input structure.
plane (3-tuple): The first plane to be compared.
other_plane (3-tuple): The other plane to be compared.
Returns:
boolean
"""
phi = self.get_interplanar_angle(structure, plane, other_plane)
return phi in (180, 0) or np.isnan(phi)
def get_first_point(self, structure: Structure, points: list) -> Dict[Tuple[int, int, int], float]:
"""
Gets the first point to be plotted in the 2D DP, corresponding to maximum d/minimum R.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
dict of a hkl plane to max interplanar distance.
"""
max_d = -100.0
max_d_plane = (0, 0, 1)
points = self.zone_axis_filter(points)
spacings = self.get_interplanar_spacings(structure, points)
for plane in sorted(spacings.keys()):
if spacings[plane] > max_d:
max_d_plane = plane
max_d = spacings[plane]
return {max_d_plane: max_d}
@staticmethod
def get_interplanar_angle(structure: Structure, p1: Tuple[int, int, int], p2: Tuple[int, int, int]) -> float:
"""
Returns the interplanar angle (in degrees) between the normal of two crystal planes.
Formulas from International Tables for Crystallography Volume C pp. 2-9.
Args:
structure (Structure): The input structure.
p1 (3-tuple): plane 1
p2 (3-tuple): plane 2
Returns:
float
"""
a, b, c = structure.lattice.a, structure.lattice.b, structure.lattice.c
alpha, beta, gamma = (
np.deg2rad(structure.lattice.alpha),
np.deg2rad(structure.lattice.beta),
np.deg2rad(structure.lattice.gamma),
)
v = structure.lattice.volume
a_star = b * c * np.sin(alpha) / v
b_star = a * c * np.sin(beta) / v
c_star = a * b * np.sin(gamma) / v
cos_alpha_star = (np.cos(beta) * np.cos(gamma) - np.cos(alpha)) / (np.sin(beta) * np.sin(gamma))
cos_beta_star = (np.cos(alpha) * np.cos(gamma) - np.cos(beta)) / (np.sin(alpha) * np.sin(gamma))
cos_gamma_star = (np.cos(alpha) * np.cos(beta) - np.cos(gamma)) / (np.sin(alpha) * np.sin(beta))
r1_norm = np.sqrt(
p1[0] ** 2 * a_star ** 2
+ p1[1] ** 2 * b_star ** 2
+ p1[2] ** 2 * c_star ** 2
+ 2 * p1[0] * p1[1] * a_star * b_star * cos_gamma_star
+ 2 * p1[0] * p1[2] * a_star * c_star * cos_beta_star
+ 2 * p1[1] * p1[2] * b_star * c_star * cos_gamma_star
)
r2_norm = np.sqrt(
p2[0] ** 2 * a_star ** 2
+ p2[1] ** 2 * b_star ** 2
+ p2[2] ** 2 * c_star ** 2
+ 2 * p2[0] * p2[1] * a_star * b_star * cos_gamma_star
+ 2 * p2[0] * p2[2] * a_star * c_star * cos_beta_star
+ 2 * p2[1] * p2[2] * b_star * c_star * cos_gamma_star
)
r1_dot_r2 = (
p1[0] * p2[0] * a_star ** 2
+ p1[1] * p2[1] * b_star ** 2
+ p1[2] * p2[2] * c_star ** 2
+ (p1[0] * p2[1] + p2[0] * p1[1]) * a_star * b_star * cos_gamma_star
+ (p1[0] * p2[2] + p2[0] * p1[1]) * a_star * c_star * cos_beta_star
+ (p1[1] * p2[2] + p2[1] * p1[2]) * b_star * c_star * cos_alpha_star
)
phi = np.arccos(r1_dot_r2 / (r1_norm * r2_norm))
return np.rad2deg(phi)
@staticmethod
def get_plot_coeffs(
p1: Tuple[int, int, int],
p2: Tuple[int, int, int],
p3: Tuple[int, int, int],
) -> np.ndarray:
"""
Calculates coefficients of the vector addition required to generate positions for each DP point
by the Moore-Penrose inverse method.
Args:
p1 (3-tuple): The first point. Fixed.
p2 (3-tuple): The second point. Fixed.
p3 (3-tuple): The point whose coefficients are to be calculted.
Returns:
Numpy array
"""
a = np.array([[p1[0], p2[0]], [p1[1], p2[1]], [p1[2], p2[2]]])
b = np.array([[p3[0], p3[1], p3[2]]]).T
a_pinv = np.linalg.pinv(a)
x = np.dot(a_pinv, b)
return np.ravel(x)
def get_positions(self, structure: Structure, points: list) -> Dict[Tuple[int, int, int], np.ndarray]:
"""
Calculates all the positions of each hkl point in the 2D diffraction pattern by vector addition.
Distance in centimeters.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
dict of hkl plane to xy-coordinates.
"""
positions = {}
points = self.zone_axis_filter(points)
# first is the max_d, min_r
first_point_dict = self.get_first_point(structure, points)
for point in first_point_dict:
first_point = point
first_d = first_point_dict[point]
spacings = self.get_interplanar_spacings(structure, points)
# second is the first non-parallel-to-first-point vector when sorted.
# note 000 is "parallel" to every plane vector.
for plane in sorted(spacings.keys()):
second_point, second_d = plane, spacings[plane]
if not self.is_parallel(structure, first_point, second_point):
break
p1 = first_point
p2 = second_point
if (0, 0, 0) in points:
points.remove((0, 0, 0))
points.remove(first_point)
points.remove(second_point)
positions[(0, 0, 0)] = np.array([0, 0])
r1 = self.wavelength_rel() * self.camera_length / first_d
positions[first_point] = np.array([r1, 0])
r2 = self.wavelength_rel() * self.camera_length / second_d
phi = np.deg2rad(self.get_interplanar_angle(structure, first_point, second_point))
positions[second_point] = np.array([r2 * np.cos(phi), r2 * np.sin(phi)])
for plane in points:
coeffs = self.get_plot_coeffs(p1, p2, plane)
pos = np.array(
[
coeffs[0] * positions[first_point][0] + coeffs[1] * positions[second_point][0],
coeffs[0] * positions[first_point][1] + coeffs[1] * positions[second_point][1],
]
)
positions[plane] = pos
points.append((0, 0, 0))
points.append(first_point)
points.append(second_point)
return positions
def tem_dots(self, structure: Structure, points) -> List:
"""
Generates all TEM_dot as named tuples that will appear on the 2D diffraction pattern.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
list of TEM_dots
"""
dots = []
interplanar_spacings = self.get_interplanar_spacings(structure, points)
bragg_angles = self.bragg_angles(interplanar_spacings)
cell_intensity = self.normalized_cell_intensity(structure, bragg_angles)
positions = self.get_positions(structure, points)
for plane in cell_intensity.keys():
dot = namedtuple("dot", ["position", "hkl", "intensity", "film_radius", "d_spacing"])
position = positions[plane]
hkl = plane
intensity = cell_intensity[plane]
film_radius = 0.91 * (10 ** -3 * self.cs * self.wavelength_rel() ** 3) ** Fraction("1/4")
d_spacing = interplanar_spacings[plane]
tem_dot = dot(position, hkl, intensity, film_radius, d_spacing)
dots.append(tem_dot)
return dots
def get_plot_2d(self, structure: Structure) -> go.Figure:
"""
Generates the 2D diffraction pattern of the input structure.
Args:
structure (Structure): The input structure.
Returns:
Figure
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
xs = []
ys = []
hkls = []
intensities = []
for dot in tem_dots:
xs.append(dot.position[0])
ys.append(dot.position[1])
hkls.append(str(dot.hkl))
intensities.append(dot.intensity)
hkls = list(map(unicodeify_spacegroup, list(map(latexify_spacegroup, hkls))))
data = [
go.Scatter(
x=xs,
y=ys,
text=hkls,
hoverinfo="text",
mode="markers",
marker=dict(
size=8,
cmax=1,
cmin=0,
color=intensities,
colorscale=[[0, "black"], [1.0, "white"]],
),
showlegend=False,
),
go.Scatter(
x=[0],
y=[0],
text="(0, 0, 0): Direct beam",
hoverinfo="text",
mode="markers",
marker=dict(size=14, cmax=1, cmin=0, color="white"),
showlegend=False,
),
]
layout = go.Layout(
title="2D Diffraction Pattern<br>Beam Direction: " + "".join(str(e) for e in self.beam_direction),
font=dict(size=14, color="#7f7f7f"),
hovermode="closest",
xaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
yaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
width=550,
height=550,
paper_bgcolor="rgba(100,110,110,0.5)",
plot_bgcolor="black",
)
fig = go.Figure(data=data, layout=layout)
return fig
def get_plot_2d_concise(self, structure: Structure) -> go.Figure:
"""
Generates the concise 2D diffraction pattern of the input structure of a smaller size and without layout.
Does not display.
Args:
structure (Structure): The input structure.
Returns:
Figure
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
xs = []
ys = []
hkls = []
intensities = []
for dot in tem_dots:
if dot.hkl != (0, 0, 0):
xs.append(dot.position[0])
ys.append(dot.position[1])
hkls.append(dot.hkl)
intensities.append(dot.intensity)
data = [
go.Scatter(
x=xs,
y=ys,
text=hkls,
mode="markers",
hoverinfo="skip",
marker=dict(
size=4,
cmax=1,
cmin=0,
color=intensities,
colorscale=[[0, "black"], [1.0, "white"]],
),
showlegend=False,
)
]
layout = go.Layout(
xaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
yaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
plot_bgcolor="black",
margin={"l": 0, "r": 0, "t": 0, "b": 0},
width=121,
height=121,
)
fig = go.Figure(data=data, layout=layout)
fig.layout.update(showlegend=False)
return fig
|
mit
|
vshtanko/scikit-learn
|
sklearn/linear_model/tests/test_sparse_coordinate_descent.py
|
244
|
9986
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
|
bsd-3-clause
|
mlyundin/scikit-learn
|
examples/exercises/plot_iris_exercise.py
|
323
|
1602
|
"""
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
|
bsd-3-clause
|
ibis-project/ibis
|
ibis/backends/dask/tests/test_core.py
|
1
|
5059
|
from typing import Any
import dask.dataframe as dd
import pytest
from dask.dataframe.utils import tm
from multipledispatch.conflict import ambiguities
import ibis
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
from ibis.backends.pandas.dispatch import execute_node as pandas_execute_node
from ibis.expr.scope import Scope
from ..client import DaskClient
from ..core import execute, is_computable_input
from ..dispatch import execute_node, post_execute, pre_execute
pytestmark = pytest.mark.dask
@pytest.mark.parametrize('func', [execute_node, pre_execute, post_execute])
def test_no_execute_ambiguities(func):
assert not ambiguities(func.funcs)
def test_from_dataframe(dataframe, ibis_table, core_client):
t = ibis.dask.from_dataframe(dataframe)
result = t.execute()
expected = ibis_table.execute()
tm.assert_frame_equal(result, expected)
t = ibis.dask.from_dataframe(dataframe, name='foo')
expected = ibis_table.execute()
tm.assert_frame_equal(result, expected)
client = core_client
t = ibis.dask.from_dataframe(dataframe, name='foo', client=client)
expected = ibis_table.execute()
tm.assert_frame_equal(result, expected)
def test_pre_execute_basic():
"""
Test that pre_execute has intercepted execution and provided its own
scope dict
"""
@pre_execute.register(ops.Add)
def pre_execute_test(op, *clients, scope=None, **kwargs):
return Scope({op: 4}, None)
one = ibis.literal(1)
expr = one + one
result = execute(expr)
assert result == 4
del pre_execute.funcs[(ops.Add,)]
pre_execute.reorder()
pre_execute._cache.clear()
def test_execute_parameter_only():
param = ibis.param('int64')
result = execute(param, params={param: 42})
assert result == 42
def test_missing_data_sources():
t = ibis.table([('a', 'string')])
expr = t.a.length()
with pytest.raises(com.UnboundExpressionError):
execute(expr)
def test_missing_data_on_custom_client():
class MyClient(DaskClient):
def table(self, name):
return ops.DatabaseTable(
name, ibis.schema([('a', 'int64')]), self
).to_expr()
con = MyClient(ibis.dask, {})
t = con.table('t')
with pytest.raises(
NotImplementedError,
match=(
'Could not find signature for execute_node: '
'<DatabaseTable, MyClient>'
),
):
con.execute(t)
def test_post_execute_called_on_joins(dataframe, core_client, ibis_table):
count = [0]
@post_execute.register(ops.InnerJoin, dd.DataFrame)
def tmp_left_join_exe(op, lhs, **kwargs):
count[0] += 1
return lhs
left = ibis_table
right = left.view()
join = left.join(right, 'plain_strings')[left.plain_int64]
result = join.execute()
assert result is not None
assert len(result.index) > 0
assert count[0] == 1
def test_is_computable_input():
class MyObject:
def __init__(self, value: float) -> None:
self.value = value
def __getattr__(self, name: str) -> Any:
return getattr(self.value, name)
def __hash__(self) -> int:
return hash((type(self), self.value))
def __eq__(self, other):
return (
isinstance(other, type(self))
and isinstance(self, type(other))
and self.value == other.value
)
def __float__(self) -> float:
return self.value
@execute_node.register(ops.Add, int, MyObject)
def add_int_my_object(op, left, right, **kwargs):
return left + right.value
# This multimethod must be implemented to play nicely with other value
# types like columns and literals. In other words, for a custom
# non-expression object to play nicely it must somehow map to one of the
# types in ibis/expr/datatypes.py
@dt.infer.register(MyObject)
def infer_my_object(_, **kwargs):
return dt.float64
@is_computable_input.register(MyObject)
def is_computable_input_my_object(_):
return True
one = ibis.literal(1)
two = MyObject(2.0)
assert is_computable_input(two)
three = one + two
four = three + 1
result = execute(four)
assert result == 4.0
del execute_node[ops.Add, int, MyObject]
execute_node.reorder()
execute_node._cache.clear()
del dt.infer.funcs[(MyObject,)]
dt.infer.reorder()
dt.infer._cache.clear()
def test_scope_look_up():
# test if scope could lookup items properly
scope = Scope()
one_day = ibis.interval(days=1).op()
one_hour = ibis.interval(hours=1).op()
scope = scope.merge_scope(Scope({one_day: 1}, None))
assert scope.get_value(one_hour) is None
assert scope.get_value(one_day) is not None
def test_new_dispatcher():
types = (ops.TableColumn, dd.DataFrame)
assert execute_node.dispatch(*types) is not None
assert pandas_execute_node.dispatch(*types) is None
|
apache-2.0
|
xhochy/arrow
|
dev/archery/archery/tests/test_docker.py
|
1
|
15422
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import os
import re
import subprocess
from unittest import mock
import pytest
from archery.docker import DockerCompose
from archery.testing import assert_subprocess_calls, override_env, PartialEnv
missing_service_compose_yml = """
version: '3.5'
x-hierarchy:
- foo:
- sub-foo:
- sub-sub-foo
- another-sub-sub-foo
- bar:
- sub-bar
- baz
services:
foo:
image: dummy
sub-sub-foo:
image: dummy
another-sub-sub-foo:
image: dummy
bar:
image: dummy
sub-bar:
image: dummy
baz:
image: dummy
"""
missing_node_compose_yml = """
version: '3.5'
x-hierarchy:
- foo:
- sub-foo:
- sub-sub-foo
- another-sub-sub-foo
- bar
- baz
services:
foo:
image: dummy
sub-foo:
image: dummy
sub-sub-foo:
image: dummy
another-sub-sub-foo:
image: dummy
bar:
image: dummy
sub-bar:
image: dummy
baz:
image: dummy
"""
ok_compose_yml = """
version: '3.5'
x-hierarchy:
- foo:
- sub-foo:
- sub-sub-foo
- another-sub-sub-foo
- bar:
- sub-bar
- baz
services:
foo:
image: dummy
sub-foo:
image: dummy
sub-sub-foo:
image: dummy
another-sub-sub-foo:
image: dummy
bar:
image: dummy
sub-bar:
image: dummy
baz:
image: dummy
"""
arrow_compose_yml = """
version: '3.5'
x-with-gpus:
- ubuntu-cuda
x-hierarchy:
- conda-cpp:
- conda-python:
- conda-python-pandas
- conda-python-dask
- ubuntu-cpp:
- ubuntu-cpp-cmake32
- ubuntu-c-glib:
- ubuntu-ruby
- ubuntu-cuda
services:
conda-cpp:
image: dummy
conda-python:
image: dummy
conda-python-pandas:
image: dummy
conda-python-dask:
image: dummy
ubuntu-cpp:
image: dummy
ubuntu-cpp-cmake32:
image: dummy
ubuntu-c-glib:
image: dummy
ubuntu-ruby:
image: dummy
ubuntu-cuda:
image: dummy-cuda
environment:
CUDA_ENV: 1
OTHER_ENV: 2
volumes:
- /host:/container
command: /bin/bash -c "echo 1 > /tmp/dummy && cat /tmp/dummy"
"""
arrow_compose_env = {
'UBUNTU': '20.04', # overridden below
'PYTHON': '3.6',
'PANDAS': 'latest',
'DASK': 'latest', # overridden below
}
def create_config(directory, yml_content, env_content=None):
env_path = directory / '.env'
config_path = directory / 'docker-compose.yml'
with config_path.open('w') as fp:
fp.write(yml_content)
if env_content is not None:
with env_path.open('w') as fp:
for k, v in env_content.items():
fp.write("{}={}\n".format(k, v))
return config_path
def format_run(args):
cmd = ["run", "--rm"]
if isinstance(args, str):
return " ".join(cmd + [args])
else:
return cmd + args
@pytest.fixture
def arrow_compose_path(tmpdir):
return create_config(tmpdir, arrow_compose_yml, arrow_compose_env)
def test_config_validation(tmpdir):
config_path = create_config(tmpdir, missing_service_compose_yml)
msg = "`sub-foo` is defined in `x-hierarchy` bot not in `services`"
with pytest.raises(ValueError, match=msg):
DockerCompose(config_path)
config_path = create_config(tmpdir, missing_node_compose_yml)
msg = "`sub-bar` is defined in `services` but not in `x-hierarchy`"
with pytest.raises(ValueError, match=msg):
DockerCompose(config_path)
config_path = create_config(tmpdir, ok_compose_yml)
DockerCompose(config_path) # no issue
def assert_docker_calls(compose, expected_args):
base_command = ['docker']
expected_commands = []
for args in expected_args:
if isinstance(args, str):
args = re.split(r"\s", args)
expected_commands.append(base_command + args)
return assert_subprocess_calls(expected_commands, check=True)
def assert_compose_calls(compose, expected_args, env=mock.ANY):
base_command = ['docker-compose', '--file', str(compose.config_path)]
expected_commands = []
for args in expected_args:
if isinstance(args, str):
args = re.split(r"\s", args)
expected_commands.append(base_command + args)
return assert_subprocess_calls(expected_commands, check=True, env=env)
def test_arrow_example_validation_passes(arrow_compose_path):
DockerCompose(arrow_compose_path)
def test_compose_default_params_and_env(arrow_compose_path):
compose = DockerCompose(arrow_compose_path, params=dict(
UBUNTU='18.04',
DASK='master'
))
assert compose.dotenv == arrow_compose_env
assert compose.params == {
'UBUNTU': '18.04',
'DASK': 'master',
}
def test_forwarding_env_variables(arrow_compose_path):
expected_calls = [
"pull --ignore-pull-failures conda-cpp",
"build conda-cpp",
]
expected_env = PartialEnv(
MY_CUSTOM_VAR_A='a',
MY_CUSTOM_VAR_B='b'
)
with override_env({'MY_CUSTOM_VAR_A': 'a', 'MY_CUSTOM_VAR_B': 'b'}):
compose = DockerCompose(arrow_compose_path)
with assert_compose_calls(compose, expected_calls, env=expected_env):
assert os.environ['MY_CUSTOM_VAR_A'] == 'a'
assert os.environ['MY_CUSTOM_VAR_B'] == 'b'
compose.pull('conda-cpp')
compose.build('conda-cpp')
def test_compose_pull(arrow_compose_path):
compose = DockerCompose(arrow_compose_path)
expected_calls = [
"pull --ignore-pull-failures conda-cpp",
]
with assert_compose_calls(compose, expected_calls):
compose.pull('conda-cpp')
expected_calls = [
"pull --ignore-pull-failures conda-cpp",
"pull --ignore-pull-failures conda-python",
"pull --ignore-pull-failures conda-python-pandas"
]
with assert_compose_calls(compose, expected_calls):
compose.pull('conda-python-pandas')
expected_calls = [
"pull --ignore-pull-failures conda-cpp",
"pull --ignore-pull-failures conda-python",
]
with assert_compose_calls(compose, expected_calls):
compose.pull('conda-python-pandas', pull_leaf=False)
def test_compose_pull_params(arrow_compose_path):
expected_calls = [
"pull --ignore-pull-failures conda-cpp",
"pull --ignore-pull-failures conda-python",
]
compose = DockerCompose(arrow_compose_path, params=dict(UBUNTU='18.04'))
expected_env = PartialEnv(PYTHON='3.6', PANDAS='latest')
with assert_compose_calls(compose, expected_calls, env=expected_env):
compose.pull('conda-python-pandas', pull_leaf=False)
def test_compose_build(arrow_compose_path):
compose = DockerCompose(arrow_compose_path)
expected_calls = [
"build conda-cpp",
]
with assert_compose_calls(compose, expected_calls):
compose.build('conda-cpp')
expected_calls = [
"build --no-cache conda-cpp"
]
with assert_compose_calls(compose, expected_calls):
compose.build('conda-cpp', use_cache=False)
expected_calls = [
"build conda-cpp",
"build conda-python",
"build conda-python-pandas"
]
with assert_compose_calls(compose, expected_calls):
compose.build('conda-python-pandas')
expected_calls = [
"build --no-cache conda-cpp",
"build --no-cache conda-python",
"build --no-cache conda-python-pandas",
]
with assert_compose_calls(compose, expected_calls):
compose.build('conda-python-pandas', use_cache=False)
expected_calls = [
"build conda-cpp",
"build conda-python",
"build --no-cache conda-python-pandas",
]
with assert_compose_calls(compose, expected_calls):
compose.build('conda-python-pandas', use_cache=True,
use_leaf_cache=False)
def test_compose_build_params(arrow_compose_path):
expected_calls = [
"build ubuntu-cpp",
]
compose = DockerCompose(arrow_compose_path, params=dict(UBUNTU='18.04'))
expected_env = PartialEnv(UBUNTU="18.04")
with assert_compose_calls(compose, expected_calls, env=expected_env):
compose.build('ubuntu-cpp')
compose = DockerCompose(arrow_compose_path, params=dict(UBUNTU='16.04'))
expected_env = PartialEnv(UBUNTU="16.04")
with assert_compose_calls(compose, expected_calls, env=expected_env):
compose.build('ubuntu-cpp')
expected_calls = [
"build --no-cache conda-cpp",
"build --no-cache conda-python",
"build --no-cache conda-python-pandas",
]
compose = DockerCompose(arrow_compose_path, params=dict(UBUNTU='18.04'))
expected_env = PartialEnv(PYTHON='3.6', PANDAS='latest')
with assert_compose_calls(compose, expected_calls, env=expected_env):
compose.build('conda-python-pandas', use_cache=False)
def test_compose_run(arrow_compose_path):
expected_calls = [
format_run("conda-cpp"),
]
compose = DockerCompose(arrow_compose_path)
with assert_compose_calls(compose, expected_calls):
compose.run('conda-cpp')
expected_calls = [
format_run("conda-python")
]
expected_env = PartialEnv(PYTHON='3.6')
with assert_compose_calls(compose, expected_calls, env=expected_env):
compose.run('conda-python')
compose = DockerCompose(arrow_compose_path, params=dict(PYTHON='3.8'))
expected_env = PartialEnv(PYTHON='3.8')
with assert_compose_calls(compose, expected_calls, env=expected_env):
compose.run('conda-python')
compose = DockerCompose(arrow_compose_path, params=dict(PYTHON='3.8'))
for command in ["bash", "echo 1"]:
expected_calls = [
format_run(["conda-python", command]),
]
expected_env = PartialEnv(PYTHON='3.8')
with assert_compose_calls(compose, expected_calls, env=expected_env):
compose.run('conda-python', command)
expected_calls = [
(
format_run("-e CONTAINER_ENV_VAR_A=a -e CONTAINER_ENV_VAR_B=b "
"conda-python")
)
]
compose = DockerCompose(arrow_compose_path)
expected_env = PartialEnv(PYTHON='3.6')
with assert_compose_calls(compose, expected_calls, env=expected_env):
env = collections.OrderedDict([
("CONTAINER_ENV_VAR_A", "a"),
("CONTAINER_ENV_VAR_B", "b")
])
compose.run('conda-python', env=env)
expected_calls = [
(
format_run("--volume /host/build:/build --volume "
"/host/ccache:/ccache:delegated conda-python")
)
]
compose = DockerCompose(arrow_compose_path)
with assert_compose_calls(compose, expected_calls):
volumes = ("/host/build:/build", "/host/ccache:/ccache:delegated")
compose.run('conda-python', volumes=volumes)
def test_compose_run_force_pull_and_build(arrow_compose_path):
compose = DockerCompose(arrow_compose_path)
expected_calls = [
"pull --ignore-pull-failures conda-cpp",
format_run("conda-cpp")
]
with assert_compose_calls(compose, expected_calls):
compose.run('conda-cpp', force_pull=True)
expected_calls = [
"build conda-cpp",
format_run("conda-cpp")
]
with assert_compose_calls(compose, expected_calls):
compose.run('conda-cpp', force_build=True)
expected_calls = [
"pull --ignore-pull-failures conda-cpp",
"build conda-cpp",
format_run("conda-cpp")
]
with assert_compose_calls(compose, expected_calls):
compose.run('conda-cpp', force_pull=True, force_build=True)
expected_calls = [
"pull --ignore-pull-failures conda-cpp",
"pull --ignore-pull-failures conda-python",
"pull --ignore-pull-failures conda-python-pandas",
"build conda-cpp",
"build conda-python",
"build conda-python-pandas",
format_run("conda-python-pandas bash")
]
with assert_compose_calls(compose, expected_calls):
compose.run('conda-python-pandas', command='bash', force_build=True,
force_pull=True)
expected_calls = [
"pull --ignore-pull-failures conda-cpp",
"pull --ignore-pull-failures conda-python",
"build conda-cpp",
"build conda-python",
"build --no-cache conda-python-pandas",
format_run("conda-python-pandas bash")
]
with assert_compose_calls(compose, expected_calls):
compose.run('conda-python-pandas', command='bash', force_build=True,
force_pull=True, use_leaf_cache=False)
def test_compose_push(arrow_compose_path):
compose = DockerCompose(arrow_compose_path, params=dict(PYTHON='3.8'))
expected_env = PartialEnv(PYTHON="3.8")
expected_calls = [
mock.call(["docker", "login", "-u", "user", "-p", "pass"], check=True),
]
for image in ["conda-cpp", "conda-python", "conda-python-pandas"]:
expected_calls.append(
mock.call(["docker-compose", "--file", str(compose.config_path),
"push", image], check=True, env=expected_env)
)
with assert_subprocess_calls(expected_calls):
compose.push('conda-python-pandas', user='user', password='pass')
def test_compose_error(arrow_compose_path):
compose = DockerCompose(arrow_compose_path, params=dict(
PYTHON='3.8',
PANDAS='master'
))
error = subprocess.CalledProcessError(99, [])
with mock.patch('subprocess.run', side_effect=error):
with pytest.raises(RuntimeError) as exc:
compose.run('conda-cpp')
exception_message = str(exc.value)
assert "exited with a non-zero exit code 99" in exception_message
assert "PANDAS: latest" in exception_message
assert "export PANDAS=master" in exception_message
def test_image_with_gpu(arrow_compose_path):
compose = DockerCompose(arrow_compose_path)
expected_calls = [
[
"run", "--rm", "-it", "--gpus", "all",
"-e", "CUDA_ENV=1",
"-e", "OTHER_ENV=2",
"-v", "/host:/container:rw",
"dummy-cuda",
"/bin/bash", "-c", "echo 1 > /tmp/dummy && cat /tmp/dummy"
]
]
with assert_docker_calls(compose, expected_calls):
compose.run('ubuntu-cuda', force_pull=False, force_build=False)
def test_listing_images(arrow_compose_path):
compose = DockerCompose(arrow_compose_path)
assert sorted(compose.images()) == [
'conda-cpp',
'conda-python',
'conda-python-dask',
'conda-python-pandas',
'ubuntu-c-glib',
'ubuntu-cpp',
'ubuntu-cpp-cmake32',
'ubuntu-cuda',
'ubuntu-ruby',
]
|
apache-2.0
|
kagayakidan/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
176
|
2169
|
from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
|
bsd-3-clause
|
frank-tancf/scikit-learn
|
examples/mixture/plot_gmm_covariances.py
|
13
|
4262
|
"""
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.model_selection import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = dict((covar_type,
GMM(n_components=n_classes, covariance_type=covar_type,
init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators / 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
patverga/torch-relation-extraction
|
bin/analysis/plot-sent-len-bar.py
|
1
|
1198
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.colors
import sys
matplotlib.rc('text', usetex=True)
fontsize = 22
font = {'family' : 'serif',
'serif' : 'Times Roman',
'size' : fontsize}
matplotlib.rc('font', **font)
output_dir = "doc/naacl2016/"
# load in data
xlabels = ["$<3$", "$<5$", "$\geq 5$", "$\geq 10$"]
uschema_f1s = [0.24300966, 0.3140496, 0.20021415, 0.10432721]
lstm_f1s = [0.17596321, 0.28711897, 0.25862822, 0.14799798]
colors=['0.75', '0.25']
bar_width = 0.25
# initialize figures
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.set_title("LSTM + USchema F1: Varying Pattern Length", fontsize=fontsize)
ax1.set_ylabel("F1")
ax1.set_xlabel("Pattern Length")
inds = np.arange(len(uschema_f1s))
ax1.set_xticks(inds + bar_width)
ax1.set_xticklabels(xlabels)
lstm_bar = ax1.bar(inds+bar_width, lstm_f1s, bar_width, color=colors[1])
uschema_bar = ax1.bar(inds, uschema_f1s, bar_width, color=colors[0])
plt.tight_layout()
# add legend
ax1.legend((lstm_bar[0], uschema_bar[0]), ('LSTM', 'USchema'), fontsize=18)
fig1.savefig("%s/f1-vary-pat-length.pdf" % (output_dir), bbox_inches='tight')
plt.show()
|
mit
|
PanDAWMS/panda-server
|
pandaserver/userinterface/Client.py
|
1
|
78409
|
'''
client methods
'''
import os
import re
import sys
import gzip
import uuid
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
import socket
import getpass
import tempfile
try:
import cPickle as pickle
except ImportError:
import pickle
import json
from pandaserver.srvcore.CoreUtils import commands_get_status_output
# configuration
try:
baseURL = os.environ['PANDA_URL']
except Exception:
baseURL = 'http://pandaserver.cern.ch:25080/server/panda'
try:
baseURLSSL = os.environ['PANDA_URL_SSL']
except Exception:
baseURLSSL = 'https://pandaserver.cern.ch:25443/server/panda'
# exit code
EC_Failed = 255
# panda server URLs
if 'PANDA_URL_MAP' in os.environ:
serverURLs = {'default' : {'URL' : baseURL,
'URLSSL' : baseURLSSL},
}
# decode envvar to map
try:
for tmpCompStr in os.environ['PANDA_URL_MAP'].split('|'):
tmpKey,tmpURL,tmpURLSSL = tmpCompStr.split(',')
# append
serverURLs[tmpKey] = {'URL' : tmpURL,
'URLSSL' : tmpURLSSL}
except Exception:
pass
else:
# default
serverURLs = {'default' : {'URL' : baseURL,
'URLSSL' : baseURLSSL},
'CERN' : {'URL' : 'http://pandaserver.cern.ch:25080/server/panda',
'URLSSL' : 'https://pandaserver.cern.ch:25443/server/panda'},
}
# bamboo
baseURLBAMBOO = 'http://pandabamboo.cern.ch:25070/bamboo/bamboo'
# wrapper for pickle with python 3
def pickle_dumps(obj):
return pickle.dumps(obj, protocol=0)
def pickle_loads(obj_string):
try:
return pickle.loads(obj_string.encode())
except Exception:
return pickle.loads(obj_string)
# get URL
def _getURL(type,srvID=None):
if srvID in serverURLs:
urls = serverURLs[srvID]
else:
urls = serverURLs['default']
return urls[type]
# get Panda srvIDs
def getPandas():
srvs = list(serverURLs)
# remove 'default'
try:
srvs.remove('default')
except Exception:
pass
return srvs
# look for a grid proxy certificate
def _x509():
# see X509_USER_PROXY
try:
return os.environ['X509_USER_PROXY']
except Exception:
pass
# see the default place
x509 = '/tmp/x509up_u%s' % os.getuid()
if os.access(x509,os.R_OK):
return x509
# no valid proxy certificate
# FIXME
print("No valid grid proxy certificate found")
return ''
# curl class
class _Curl:
# constructor
def __init__(self):
# path to curl
self.path = 'curl'
# verification of the host certificate
self.verifyHost = True
# request a compressed response
self.compress = True
# SSL cert/key
self.sslCert = ''
self.sslKey = ''
# verbose
self.verbose = False
# use json
self.use_json = False
# GET method
def get(self,url,data):
# make command
com = '%s --silent --get' % self.path
if not self.verifyHost:
com += ' --insecure'
elif 'X509_CERT_DIR' in os.environ:
com += ' --capath %s' % os.environ['X509_CERT_DIR']
elif os.path.exists('/etc/grid-security/certificates'):
com += ' --capath /etc/grid-security/certificates'
if self.compress:
com += ' --compressed'
if self.sslCert != '':
com += ' --cert %s' % self.sslCert
com += ' --cacert %s' % self.sslCert
if self.sslKey != '':
com += ' --key %s' % self.sslKey
# timeout
com += ' -m 600'
# json
if self.use_json:
com += ' -H "Accept: application/json"'
# data
strData = ''
for key in data:
strData += 'data="%s"\n' % urlencode({key:data[key]})
# write data to temporary config file
try:
tmpName = os.environ['PANDA_TMP']
except Exception:
tmpName = '/tmp'
tmpName += '/%s_%s' % (getpass.getuser(), str(uuid.uuid4()))
tmpFile = open(tmpName,'w')
tmpFile.write(strData)
tmpFile.close()
com += ' --config %s' % tmpName
com += ' %s' % url
# execute
if self.verbose:
print(com)
print(strData)
ret = commands_get_status_output(com)
# remove temporary file
os.remove(tmpName)
if ret[0] != 0:
ret = (ret[0]%255,ret[1])
if self.verbose:
print(ret)
return ret
# POST method
def post(self, url, data, via_file=False):
# make command
com = '%s --silent' % self.path
if not self.verifyHost:
com += ' --insecure'
elif 'X509_CERT_DIR' in os.environ:
com += ' --capath %s' % os.environ['X509_CERT_DIR']
elif os.path.exists('/etc/grid-security/certificates'):
com += ' --capath /etc/grid-security/certificates'
if self.compress:
com += ' --compressed'
if self.sslCert != '':
com += ' --cert %s' % self.sslCert
com += ' --cacert %s' % self.sslCert
if self.sslKey != '':
com += ' --key %s' % self.sslKey
# timeout
com += ' -m 600'
# json
if self.use_json:
com += ' -H "Accept: application/json"'
# data
strData = ''
for key in data:
strData += 'data="%s"\n' % urlencode({key:data[key]})
# write data to temporary config file
try:
tmpName = os.environ['PANDA_TMP']
except Exception:
tmpName = '/tmp'
tmpName += '/%s_%s' % (getpass.getuser(), str(uuid.uuid4()))
tmpNameOut = '{0}.out'.format(tmpName)
tmpFile = open(tmpName,'w')
tmpFile.write(strData)
tmpFile.close()
com += ' --config %s' % tmpName
if via_file:
com += ' -o {0}'.format(tmpNameOut)
com += ' %s' % url
# execute
if self.verbose:
print(com)
print(strData)
s,o = commands_get_status_output(com)
if via_file:
with open(tmpNameOut, 'rb') as f:
ret = (s, f.read())
os.remove(tmpNameOut)
else:
ret = (s, o)
# remove temporary file
os.remove(tmpName)
if ret[0] != 0:
ret = (ret[0]%255,ret[1])
if self.verbose:
print(ret)
return ret
# PUT method
def put(self,url,data):
# make command
com = '%s --silent' % self.path
if not self.verifyHost:
com += ' --insecure'
elif 'X509_CERT_DIR' in os.environ:
com += ' --capath %s' % os.environ['X509_CERT_DIR']
elif os.path.exists('/etc/grid-security/certificates'):
com += ' --capath /etc/grid-security/certificates'
if self.compress:
com += ' --compressed'
if self.sslCert != '':
com += ' --cert %s' % self.sslCert
com += ' --cacert %s' % self.sslCert
if self.sslKey != '':
com += ' --key %s' % self.sslKey
# emulate PUT
for key in data:
com += ' -F "%s=@%s"' % (key,data[key])
com += ' %s' % url
# execute
if self.verbose:
print(com)
ret = commands_get_status_output(com)
if ret[0] != 0:
ret = (ret[0]%255,ret[1])
if self.verbose:
print(ret)
return ret
'''
Client API
'''
# use web cache
def useWebCache():
"""Switch to use web cache for some read-only requests so that the number
of hits to the back-end database is reduced.
args:
returns:
"""
global baseURL
baseURL = re.sub('25080','25085',baseURL)
global serverURLs
for tmpKey in serverURLs:
tmpVal = serverURLs[tmpKey]
tmpVal['URL'] = baseURL
# submit jobs
def submitJobs(jobs,srvID=None,toPending=False):
"""Submit jobs
args:
jobs: the list of JobSpecs
srvID: obsolete
toPending: set True if jobs need to be pending state for the
two-staged submission mechanism
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
True: request is processed
False: not processed
"""
# set hostname
hostname = socket.getfqdn()
for job in jobs:
job.creationHost = hostname
# serialize
strJobs = pickle_dumps(jobs)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = _getURL('URLSSL',srvID) + '/submitJobs'
data = {'jobs':strJobs}
if toPending:
data['toPending'] = True
status,output = curl.post(url,data)
if status!=0:
print(output)
return status,output
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR submitJobs : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# run task assignment
def runTaskAssignment(jobs):
"""Run the task brokerage
args:
ids: list of typical JobSpecs for tasks to be assigned
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
True: request is processed
False: not processed
"""
# set hostname
hostname = socket.getfqdn()
for job in jobs:
job.creationHost = hostname
# serialize
strJobs = pickle_dumps(jobs)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/runTaskAssignment'
data = {'jobs':strJobs}
status,output = curl.post(url,data)
if status!=0:
print(output)
return status,output
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR runTaskAssignment : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get job status
def getJobStatus(ids, use_json=False):
"""Get job status
args:
ids: the list of PandaIDs
use_json: using json instead of pickle
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of JobSpecs (or Nones for non-existing PandaIDs)
"""
# serialize
if use_json:
strIDs = json.dumps(ids)
else:
strIDs = pickle_dumps(ids)
# instantiate curl
curl = _Curl()
curl.use_json = use_json
# execute
url = _getURL('URL') + '/getJobStatus'
data = {'ids':strIDs}
status,output = curl.post(url, data, via_file=True)
try:
if use_json:
return status, json.loads(output)
return status,pickle_loads(output)
except Exception as e:
errStr = "ERROR getJobStatus : %s" % str(e)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get PandaID with jobexeID
def getPandaIDwithJobExeID(ids):
"""Get the list of PandaIDs corresponding to a given jobExecutionIDs
args:
ids: list of jobExecutionIDs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of PandaIDs (or Nones for non-existing IDs)
"""
# serialize
strIDs = pickle_dumps(ids)
# instantiate curl
curl = _Curl()
# execute
url = _getURL('URL') + '/getPandaIDwithJobExeID'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getPandaIDwithJobExeID : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get assigning task
def getAssigningTask():
"""Get the list of IDs of tasks which are being assigned by the
task brokerage
args:
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of taskIDs
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getAssigningTask'
status,output = curl.get(url,{})
try:
return status,pickle_loads(output)
except Exception:
print(output)
type, value, traceBack = sys.exc_info()
errStr = "ERROR getAssigningTask : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get assigned cloud for tasks
def seeCloudTask(ids):
"""Check to which clouds the tasks are assigned
args:
ids: the list of taskIDs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of clouds (or Nones if tasks are not yet assigned)
raises:
EC_Failed: if communication failure to the panda server
"""
# serialize
strIDs = pickle_dumps(ids)
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/seeCloudTask'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR seeCloudTask : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# kill jobs
def killJobs(ids,code=None,verbose=False,srvID=None,useMailAsID=False,keepUnmerged=False, jobSubStatus=None):
"""Kill jobs. Normal users can kill only their own jobs.
People with production VOMS role can kill any jobs.
Running jobs are killed when next heartbeat comes from the pilot.
Set code=9 if running jobs need to be killed immediately.
args:
ids: the list of PandaIDs
code: specify why the jobs are killed
2: expire
3: aborted
4: expire in waiting
7: retry by server
8: rebrokerage
9: force kill
50: kill by JEDI
91: kill user jobs with prod role
verbose: set True to see what's going on
srvID: obsolete
useMailAsID: obsolete
keepUnmerged: set True not to cancel unmerged jobs when pmerge is killed.
jobSubStatus: set job sub status if any
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of clouds (or Nones if tasks are not yet assigned)
"""
# serialize
strIDs = pickle_dumps(ids)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = _getURL('URLSSL',srvID) + '/killJobs'
data = {'ids':strIDs,'code':code,'useMailAsID':useMailAsID}
killOpts = ''
if keepUnmerged:
killOpts += 'keepUnmerged,'
if jobSubStatus is not None:
killOpts += 'jobSubStatus={0},'.format(jobSubStatus)
data['killOpts'] = killOpts[:-1]
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR killJobs : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# reassign jobs
def reassignJobs(ids,forPending=False,firstSubmission=None):
"""Triggers reassignment of jobs. This is not effective if jobs were preassigned to sites before being submitted.
args:
ids: the list of taskIDs
forPending: set True if pending jobs are reassigned
firstSubmission: set True if first jobs are submitted for a task, or False if not
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
True: request is processed
False: not processed
"""
# serialize
strIDs = pickle_dumps(ids)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/reassignJobs'
data = {'ids':strIDs}
if forPending:
data['forPending'] = True
if firstSubmission is not None:
data['firstSubmission'] = firstSubmission
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR reassignJobs : %s %s" % (type,value)
print(errStr)
return EC_Failed,"stat=%s err=%s %s" % (status,output,errStr)
# query PandaIDs (obsolete)
def queryPandaIDs(ids):
# serialize
strIDs = pickle_dumps(ids)
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/queryPandaIDs'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR queryPandaIDs : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# query job info per cloud (obsolete)
def queryJobInfoPerCloud(cloud,schedulerID=None):
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/queryJobInfoPerCloud'
data = {'cloud':cloud}
if schedulerID is not None:
data['schedulerID'] = schedulerID
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR queryJobInfoPerCloud : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get job statistics
def getJobStatistics(sourcetype=None):
"""Get job statistics
args:
sourcetype: type of jobs
all: all jobs
analysis: analysis jobs
production: production jobs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the number jobs per job status in each site
"""
# instantiate curl
curl = _Curl()
# execute
ret = {}
for srvID in getPandas():
url = _getURL('URL',srvID) + '/getJobStatistics'
data = {}
if sourcetype is not None:
data['sourcetype'] = sourcetype
status,output = curl.get(url,data)
try:
tmpRet = status,pickle_loads(output)
if status != 0:
return tmpRet
except Exception:
print(output)
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobStatistics : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# gather
for tmpCloud in tmpRet[1]:
tmpVal = tmpRet[1][tmpCloud]
if tmpCloud not in ret:
# append cloud values
ret[tmpCloud] = tmpVal
else:
# sum statistics
for tmpStatus in tmpVal:
tmpCount = tmpVal[tmpStatus]
if tmpStatus in ret[tmpCloud]:
ret[tmpCloud][tmpStatus] += tmpCount
else:
ret[tmpCloud][tmpStatus] = tmpCount
return 0,ret
# get job statistics for Bamboo
def getJobStatisticsForBamboo(useMorePG=False):
"""Get job statistics for Bamboo
args:
useMorePG: set True if fine-grained classification is required
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the number jobs per job status in each site
"""
# instantiate curl
curl = _Curl()
# execute
ret = {}
for srvID in getPandas():
url = _getURL('URL',srvID) + '/getJobStatisticsForBamboo'
data = {}
if useMorePG is not False:
data['useMorePG'] = useMorePG
status,output = curl.get(url,data)
try:
tmpRet = status,pickle_loads(output)
if status != 0:
return tmpRet
except Exception:
print(output)
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobStatisticsForBamboo : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# gather
for tmpCloud in tmpRet[1]:
tmpMap = tmpRet[1][tmpCloud]
if tmpCloud not in ret:
# append cloud values
ret[tmpCloud] = tmpMap
else:
# sum statistics
for tmpPType in tmpMap:
tmpVal = tmpMap[tmpPType]
if tmpPType not in ret[tmpCloud]:
ret[tmpCloud][tmpPType] = tmpVal
else:
for tmpStatus in tmpVal:
tmpCount = tmpVal[tmpStatus]
if tmpStatus in ret[tmpCloud][tmpPType]:
ret[tmpCloud][tmpPType][tmpStatus] += tmpCount
else:
ret[tmpCloud][tmpPType][tmpStatus] = tmpCount
return 0,ret
# get highest prio jobs
def getHighestPrioJobStat(perPG=False,useMorePG=False):
"""Get the number of jobs with the highest priorities in each combination of cloud and processingType
args:
perPG: set True if grouped by processingGroup instead of processingType
useMorePG: set True if fine-grained classification is required
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the number jobs and priorities in each combination of cloud and processingType (or processingGroup)
"""
# instantiate curl
curl = _Curl()
# execute
ret = {}
url = baseURL + '/getHighestPrioJobStat'
data = {'perPG':perPG}
if useMorePG is not False:
data['useMorePG'] = useMorePG
status,output = curl.get(url,data)
try:
return status,pickle_loads(output)
except Exception:
print(output)
type, value, traceBack = sys.exc_info()
errStr = "ERROR getHighestPrioJobStat : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get jobs updated recently
def getJobsToBeUpdated(limit=5000,lockedby='',srvID=None):
"""Get the list of jobs which have been recently updated.
args:
limit: the maximum number of jobs
lockedby: name of the machinery which submitted jobs
srvID: obsolete
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the lit of PandaIDs
"""
# instantiate curl
curl = _Curl()
# execute
url = _getURL('URL',srvID) + '/getJobsToBeUpdated'
status,output = curl.get(url,{'limit':limit,'lockedby':lockedby})
try:
return status,pickle_loads(output)
except Exception:
print(output)
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobsToBeUpdated : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# update prodDBUpdateTimes
def updateProdDBUpdateTimes(params,verbose=False,srvID=None):
"""Update timestamp of jobs when update info is propagated to another database
args:
params: map of PandaID and jobStatus and timestamp
verbose: set True to see what's going on
srvID: obsolete
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
True: request is processed
False: not processed
"""
# serialize
strPar = pickle_dumps(params)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = _getURL('URLSSL',srvID) + '/updateProdDBUpdateTimes'
data = {'params':strPar}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR updateProdDBUpdateTimes : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get PandaID at site
def getPandaIDsSite(site,status,limit=500):
"""Get the list of jobs in a job status at at a site
args:
site: site name
status: job status
limit: maximum number of jobs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of PandaIDs
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getPandaIDsSite'
status,output = curl.get(url,{'site':site,'status':status,'limit':limit})
try:
return status,pickle_loads(output)
except Exception:
print(output)
type, value, traceBack = sys.exc_info()
errStr = "ERROR getPandaIDsSite : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get job statistics per site
def getJobStatisticsPerSite(predefined=False,workingGroup='',countryGroup='',jobType='',minPriority=None,
readArchived=None):
"""Get job statistics with job attributes
args:
predefined: get jobs which are assiggned to sites before being submitted
workingGroup: commna-separated list of workingGroups
countryGroup: commna-separated list of countryGroups
jobType: type of jobs
all: all jobs
analysis: analysis jobs
production: production jobs
minPriority: get jobs with higher priorities than this value
readArchived: get jobs with finished/failed/cancelled state in addition
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the number jobs per job status in each site
"""
# instantiate curl
curl = _Curl()
# execute
ret = {}
for srvID in getPandas():
url = _getURL('URL',srvID) + '/getJobStatisticsPerSite'
data = {'predefined':predefined}
if workingGroup not in ['',None]:
data['workingGroup'] = workingGroup
if countryGroup not in ['',None]:
data['countryGroup'] = countryGroup
if jobType not in ['',None]:
data['jobType'] = jobType
if minPriority not in ['',None]:
data['minPriority'] = minPriority
if readArchived not in ['',None]:
data['readArchived'] = readArchived
status,output = curl.get(url,data)
try:
tmpRet = status,pickle_loads(output)
if status != 0:
return tmpRet
except Exception:
print(output)
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobStatisticsPerSite : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# gather
for tmpSite in tmpRet[1]:
tmpVal = tmpRet[1][tmpSite]
if tmpSite not in ret:
# append site values
ret[tmpSite] = tmpVal
else:
# sum statistics
for tmpStatus in tmpVal:
tmpCount = tmpVal[tmpStatus]
if tmpStatus in ret[tmpSite]:
ret[tmpSite][tmpStatus] += tmpCount
else:
ret[tmpSite][tmpStatus] = tmpCount
return 0,ret
# get job statistics per site with label
def getJobStatisticsWithLabel(site=''):
"""Get job statistics per prodSourceLabel
args:
site: commna-separated list of sites. An empty string for all sites.
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the number jobs per job status and prodSourceLabel in each site
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getJobStatisticsWithLabel'
data = {}
if site not in ['',None]:
data['site'] = site
status,output = curl.get(url,data)
try:
return status,pickle_loads(output)
except Exception:
print(output)
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobStatisticsWithLabel : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get the number of waiting jobs per site and user (obsolete)
def getJobStatisticsPerUserSite():
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getJobStatisticsPerUserSite'
data = {}
status,output = curl.get(url,data)
try:
return status,pickle_loads(output)
except Exception:
print(output)
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobStatisticsPerUserSite : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get job statistics per site and resource
def getJobStatisticsPerSiteResource(timeWindow=None):
"""Get job statistics with job attributes
args:
timeWindow: to count number of jobs that finish/failed/cancelled for last N minutes. 12*60 by default
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the number jobs per job status in each site and resource
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getJobStatisticsPerSiteResource'
data = {}
if timeWindow is not None:
data['timeWindow'] = timeWindow
status,output = curl.get(url,data)
try:
return status,json.loads(output)
except Exception:
print(output)
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobStatisticsPerSiteResource : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get job statistics per site, label, and resource
def get_job_statistics_per_site_label_resource(time_window=None):
"""Get job statistics per site, label, and resource
args:
timeWindow: to count number of jobs that finish/failed/cancelled for last N minutes. 12*60 by default
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the number jobs per job status in each site and resource
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/get_job_statistics_per_site_label_resource'
data = {}
if time_window is not None:
data['time_window'] = time_window
status,output = curl.get(url,data)
try:
return status,json.loads(output)
except Exception as e:
print(output)
errStr = "ERROR get_job_statistics_per_site_label_resource : %s" % str(e)
print(errStr)
return EC_Failed,output+'\n'+errStr
# query last files in datasets
def queryLastFilesInDataset(datasets):
"""Get names of files which have the largest serial number in each dataset
args:
datasets: the list of dataset names
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the dataset name and the file name
"""
# serialize
strDSs = pickle_dumps(datasets)
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/queryLastFilesInDataset'
data = {'datasets':strDSs}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
print("ERROR queryLastFilesInDataset : %s %s" % (type,value))
return EC_Failed,None
# insert sandbox file info
def insertSandboxFileInfo(userName,fileName,fileSize,checkSum,verbose=False):
"""Insert infomation of input sandbox
args:
userName: the name of the user
fileName: the file name
fileSize: the file size
fileSize: md5sum of the file
verbose: set True to see what's going on
returns:
status code
0: communication succeeded to the panda server
else: communication failure
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/insertSandboxFileInfo'
data = {'userName':userName,'fileName':fileName,'fileSize':fileSize,'checkSum':checkSum}
return curl.post(url,data)
# upload input sandbox file
def putFile(file):
"""Upload input sandbox
args:
file: the file name
returns:
status code
0: communication succeeded to the panda server
else: communication failure
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/putFile'
data = {'file':file}
return curl.put(url,data)
# delete file (obsolete)
def deleteFile(file):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/deleteFile'
data = {'file':file}
return curl.post(url,data)
# touch file (obsolete)
def touchFile(sourceURL,filename):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = sourceURL + '/server/panda/touchFile'
data = {'filename':filename}
return curl.post(url,data)
# get site specs
def getSiteSpecs(siteType=None):
"""Get list of site specifications
args:
siteType: type of sites
None: all sites
analysis: analysis sites
production: production sites
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of site and attributes
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getSiteSpecs'
data = {}
if siteType is not None:
data = {'siteType':siteType}
status,output = curl.get(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getSiteSpecs : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get cloud specs
def getCloudSpecs():
"""Get list of cloud specifications
args:
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of cloud and attributes
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getCloudSpecs'
status,output = curl.get(url,{})
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getCloudSpecs : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get nPilots (obsolete)
def getNumPilots():
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getNumPilots'
status,output = curl.get(url,{})
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getNumPilots : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# get a list of DN/myproxy pass phrase/queued job count at a site
def getNUserJobs(siteName):
"""Get a list of DN/myproxy pass phrase/queued job count at a site. production or pilot role is required
args:
siteName: the site name
returns:
status code
0: communication succeeded to the panda server
else: communication failure
a dictionary of DN, myproxy pass phrase, queued job count, hostname of myproxy server
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/getNUserJobs'
data = {'siteName':siteName}
status,output = curl.get(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getNUserJobs : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# run brokerage
def runBrokerage(sites,atlasRelease,cmtConfig=None):
"""Run brokerage
args:
sites: the list of candidate sites
atlasRelease: version number of SW release
cmtConfig: cmt config
returns:
status code
0: communication succeeded to the panda server
else: communication failure
the name of the selected site
"""
# serialize
strSites = pickle_dumps(sites)
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/runBrokerage'
data = {'sites':strSites,
'atlasRelease':atlasRelease}
if cmtConfig is not None:
data['cmtConfig'] = cmtConfig
return curl.get(url,data)
# get RW
def getRW(priority=0):
"""Get the amount of workload queued in each cloud
args:
priority: workload with higher priorities than this value
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of cloud and the amount of workload
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURLBAMBOO + '/getRW'
# get RWs for high priority tasks
data = {'priority':priority}
status,output = curl.get(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getRW : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# change job priorities (obsolete)
def changeJobPriorities(newPrioMap):
# serialize
newPrioMapStr = pickle_dumps(newPrioMap)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeJobPriorities'
data = {'newPrioMap':newPrioMapStr}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeJobPriorities : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# insert task params
def insertTaskParams(taskParams):
"""Insert task parameters
args:
taskParams: a dictionary of task parameters
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and JediTaskID
True: request is processed
False: not processed
"""
# serialize
taskParamsStr = json.dumps(taskParams)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/insertTaskParams'
data = {'taskParams':taskParamsStr}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR insertTaskParams : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# kill task
def killTask(jediTaskID):
"""Kill a task
args:
jediTaskID: jediTaskID of the task to be killed
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/killTask'
data = {'jediTaskID':jediTaskID}
data['properErrorCode'] = True
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR killTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# finish task
def finishTask(jediTaskID,soft=False):
"""Finish a task
args:
jediTaskID: jediTaskID of the task to be finished
soft: If True, new jobs are not generated and the task is
finihsed once all remaining jobs are done.
If False, all remaining jobs are killed and then the
task is finished
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/finishTask'
data = {'jediTaskID':jediTaskID}
data['properErrorCode'] = True
if soft:
data['soft'] = True
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR finishTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# reassign task to a site
def reassignTaskToSite(jediTaskID,site,mode=None):
"""Reassign a task to a site. Existing jobs are killed and new jobs are generated at the site
args:
jediTaskID: jediTaskID of the task to be reassigned
site: the site name where the task is reassigned
mode: If soft, only defined/waiting/assigned/activated jobs are killed. If nokill, no jobs are killed. All jobs are killed by default.
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
maxSite = 60
if site is not None and len(site) > maxSite:
return EC_Failed,'site parameter is too long > {0}chars'.format(maxSite)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/reassignTask'
data = {'jediTaskID':jediTaskID,'site':site}
if mode is not None:
data['mode'] = mode
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR reassignTaskToSite : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# reassign task to a cloud
def reassignTaskToCloud(jediTaskID,cloud,mode=None):
"""Reassign a task to a cloud. Existing jobs are killed and new jobs are generated in the cloud
args:
jediTaskID: jediTaskID of the task to be reassigned
cloud: the cloud name where the task is reassigned
mode: If soft, only defined/waiting/assigned/activated jobs are killed. If nokill, no jobs are killed. All jobs are killed by default.
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/reassignTask'
data = {'jediTaskID':jediTaskID,'cloud':cloud}
if mode is not None:
data['mode'] = mode
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR reassignTaskToCloud : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# reassign task to a nucleus
def reassignTaskToNucleus(jediTaskID,nucleus,mode=None):
"""Reassign a task to a nucleus. Existing jobs are killed and new jobs are generated in the cloud
args:
jediTaskID: jediTaskID of the task to be reassigned
nucleus: the nucleus name where the task is reassigned
mode: If soft, only defined/waiting/assigned/activated jobs are killed. If nokill, no jobs are killed. All jobs are killed by default.
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/reassignTask'
data = {'jediTaskID':jediTaskID,'nucleus':nucleus}
if mode is not None:
data['mode'] = mode
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR reassignTaskToCloud : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# upload log
def uploadLog(logStr,logFileName):
"""Upload sandbox
args:
logStr: log message
logFileName: name of log file
returns:
status code
0: communication succeeded to the panda server
else: communication failure
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# write log to a tmp file
fh = tempfile.NamedTemporaryFile(delete=False)
gfh = gzip.open(fh.name,mode='wb')
if sys.version_info[0] >= 3:
logStr = logStr.encode('utf-8')
gfh.write(logStr)
gfh.close()
# execute
url = baseURLSSL + '/uploadLog'
data = {'file':'{0};filename={1}'.format(fh.name,logFileName)}
retVal = curl.put(url,data)
os.unlink(fh.name)
return retVal
# change task priority
def changeTaskPriority(jediTaskID,newPriority):
"""Change task priority
args:
jediTaskID: jediTaskID of the task to change the priority
newPriority: new task priority
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
0: unknown task
1: succeeded
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskPriority'
data = {'jediTaskID':jediTaskID,
'newPriority':newPriority}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeTaskPriority : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# set debug mode
def setDebugMode(pandaID,modeOn):
"""Turn debug mode on/off for a job
args:
pandaID: PandaID of the job
modeOn: True to turn it on. Oppositely, False
returns:
status code
0: communication succeeded to the panda server
another: communication failure
error message
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/setDebugMode'
data = {'pandaID':pandaID,
'modeOn':modeOn}
return curl.post(url,data)
# retry task
def retryTask(jediTaskID, verbose=False, noChildRetry=False, discardEvents=False, disable_staging_mode=False):
"""Retry task
args:
jediTaskID: jediTaskID of the task to retry
noChildRetry: True not to retry child tasks
discardEvents: discard events
disable_staging_mode: disable staging mode
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/retryTask'
data = {'jediTaskID':jediTaskID}
data['properErrorCode'] = True
if noChildRetry:
data['noChildRetry'] = True
if discardEvents:
data['discardEvents'] = True
if disable_staging_mode:
data['disable_staging_mode'] = True
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR retryTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# reload input
def reloadInput(jediTaskID,verbose=False):
"""Retry task
args:
jediTaskID: jediTaskID of the task to retry
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/reloadInput'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR reloadInput : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# change task walltime
def changeTaskWalltime(jediTaskID,wallTime):
"""Change task priority
args:
jediTaskID: jediTaskID of the task to change the priority
wallTime: new walltime for the task
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
0: unknown task
1: succeeded
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskAttributePanda'
data = {'jediTaskID':jediTaskID,
'attrName':'wallTime',
'attrValue':wallTime}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeTaskWalltime : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# change task cputime
def changeTaskCputime(jediTaskID,cpuTime):
"""Change task cpuTime
args:
jediTaskID: jediTaskID of the task to change the priority
cpuTime: new cputime for the task
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
0: unknown task
1: succeeded
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskAttributePanda'
data = {'jediTaskID':jediTaskID,
'attrName':'cpuTime',
'attrValue':cpuTime}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeTaskCputime : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# change task RAM count
def changeTaskRamCount(jediTaskID,ramCount):
"""Change task priority
args:
jediTaskID: jediTaskID of the task to change the priority
ramCount: new ramCount for the task
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
0: unknown task
1: succeeded
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskAttributePanda'
data = {'jediTaskID':jediTaskID,
'attrName':'ramCount',
'attrValue':ramCount}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeTaskRamCount : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# change task attribute
def changeTaskAttribute(jediTaskID,attrName,attrValue):
"""Change task attribute
args:
jediTaskID: jediTaskID of the task to change the attribute
attrName: attribute name
attrValue: new value for the attribute
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tupple of return code and message
0: unknown task
1: succeeded
2: disallowed to update the attribute
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskAttributePanda'
data = {'jediTaskID':jediTaskID,
'attrName':attrName,
'attrValue':attrValue}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeTaskAttributePanda : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# change split rule for task
def changeTaskSplitRule(jediTaskID,ruleName,ruleValue):
"""Change split rule fo task
args:
jediTaskID: jediTaskID of the task to change the rule
ruleName: rule name
ruleValue: new value for the rule
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tupple of return code and message
0: unknown task
1: succeeded
2: disallowed to update the attribute
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskSplitRulePanda'
data = {'jediTaskID':jediTaskID,
'attrName':ruleName,
'attrValue':ruleValue}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeTaskSplitRule : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# pause task
def pauseTask(jediTaskID,verbose=False):
"""Pause task
args:
jediTaskID: jediTaskID of the task to pause
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/pauseTask'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR pauseTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# resume task
def resumeTask(jediTaskID,verbose=False):
"""Resume task
args:
jediTaskID: jediTaskID of the task to release
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/resumeTask'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR resumeTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# avalanche task
def avalancheTask(jediTaskID,verbose=False):
"""force avalanche for task
args:
jediTaskID: jediTaskID of the task to avalanche
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/avalancheTask'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR resumeTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# increase attempt number for unprocessed files
def increaseAttemptNr(jediTaskID,increase):
"""Change task priority
args:
jediTaskID: jediTaskID of the task to increase attempt numbers
increase: increase for attempt numbers
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
0: succeeded
1: unknown task
2: invalid task status
3: permission denied
4: wrong parameter
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/increaseAttemptNrPanda'
data = {'jediTaskID':jediTaskID,
'increasedNr':increase}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR increaseAttemptNr : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# kill unfinished jobs
def killUnfinishedJobs(jediTaskID,code=None,verbose=False,srvID=None,useMailAsID=False):
"""Kill unfinished jobs in a task. Normal users can kill only their own jobs.
People with production VOMS role can kill any jobs.
Running jobs are killed when next heartbeat comes from the pilot.
Set code=9 if running jobs need to be killed immediately.
args:
jediTaskID: the taskID of the task
code: specify why the jobs are killed
2: expire
3: aborted
4: expire in waiting
7: retry by server
8: rebrokerage
9: force kill
50: kill by JEDI
91: kill user jobs with prod role
verbose: set True to see what's going on
srvID: obsolete
useMailAsID: obsolete
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of clouds (or Nones if tasks are not yet assigned)
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = _getURL('URLSSL',srvID) + '/killUnfinishedJobs'
data = {'jediTaskID':jediTaskID,'code':code,'useMailAsID':useMailAsID}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR killUnfinishedJobs : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# trigger task brokerage
def triggerTaskBrokerage(jediTaskID):
"""Trigger task brokerge
args:
jediTaskID: jediTaskID of the task to change the attribute
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tupple of return code and message
0: unknown task
1: succeeded
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskModTimePanda'
data = {'jediTaskID':jediTaskID,
'diffValue':-12}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR triggerTaskBrokerage : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# get PanDA IDs with TaskID
def getPandaIDsWithTaskID(jediTaskID):
"""Get PanDA IDs with TaskID
args:
jediTaskID: jediTaskID of the task to get lit of PanDA IDs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of PanDA IDs
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getPandaIDsWithTaskID'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getPandaIDsWithTaskID : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# reactivate task
def reactivateTask(jediTaskID):
"""Reactivate task
args:
jediTaskID: jediTaskID of the task to be reactivated
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tupple of return code and message
0: unknown task
1: succeeded
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/reactivateTask'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR reactivateTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# get task status TaskID
def getTaskStatus(jediTaskID):
"""Get task status
args:
jediTaskID: jediTaskID of the task to get lit of PanDA IDs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the status string
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getTaskStatus'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getTaskStatus : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# reassign specified tasks (and their jobs) to a new share
def reassignShare(jedi_task_ids, share, reassign_running=False):
"""
args:
jedi_task_ids: task ids to act on
share: share to be applied to jeditaskids
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tuple of return code and message
1: logical error
0: success
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
jedi_task_ids_pickle = pickle_dumps(jedi_task_ids)
change_running_pickle = pickle_dumps(reassign_running)
# execute
url = baseURLSSL + '/reassignShare'
data = {'jedi_task_ids_pickle': jedi_task_ids_pickle,
'share': share,
'reassign_running': change_running_pickle}
status, output = curl.post(url, data)
try:
return status, pickle_loads(output)
except Exception:
err_type, err_value = sys.exc_info()[:2]
err_str = "ERROR reassignShare : {0} {1}".format(err_type, err_value)
return EC_Failed, '{0}\n{1}'.format(output, err_str)
# list tasks in a particular share and optionally status
def listTasksInShare(gshare, status='running'):
"""
args:
gshare: global share
status: task status, running by default
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tuple of return code and jedi_task_ids
1: logical error
0: success
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/listTasksInShare'
data = {'gshare': gshare,
'status': status}
status, output = curl.post(url, data)
try:
return status, pickle_loads(output)
except Exception:
err_type, err_value = sys.exc_info()[:2]
err_str = "ERROR listTasksInShare : {0} {1}".format(err_type, err_value)
return EC_Failed, '{0}\n{1}'.format(output, err_str)
# get taskParamsMap with TaskID
def getTaskParamsMap(jediTaskID):
"""Get task status
args:
jediTaskID: jediTaskID of the task to get taskParamsMap
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tuple of return code and taskParamsMap
1: logical error
0: success
None: database error
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getTaskParamsMap'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle_loads(output)
except Exception:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getTaskParamsMap : %s %s" % (type,value)
print(errStr)
return EC_Failed,output+'\n'+errStr
# set num slots for workload provisioning
def setNumSlotsForWP(pandaQueueName, numSlots, gshare=None, resourceType=None, validPeriod=None):
"""Set num slots for workload provisioning
args:
pandaQueueName: Panda Queue name
numSlots: the number of slots. 0 to dynamically set based on the number of starting jobs
gshare: global share. None to set for any global share (default)
resourceType: resource type. None to set for any resource type (default)
validPeriod: How long the rule is valid in days. None if no expiration (default)
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: succeeded
1: server error
100: non SSL connection
101: missing production role
102: type error for some parameters
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/setNumSlotsForWP'
data = {'pandaQueueName': pandaQueueName,
'numSlots': numSlots}
if gshare is not None:
data['gshare'] = gshare
if resourceType is not None:
data['resourceType'] = resourceType
if validPeriod is not None:
data['validPeriod'] = validPeriod
status,output = curl.post(url, data)
try:
return status, json.loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR setNumSlotsForWP : %s %s" % (errtype,errvalue)
return EC_Failed, output+'\n'+errStr
# enable jumbo jobs
def enableJumboJobs(jediTaskID, totalJumboJobs=1, nJumboPerSite=1):
"""Enable jumbo jobs
args:
jediTaskID: jediTaskID of the task
totalJumboJobs: The total number of active jumbo jobs produced for the task. Use 0 to disable jumbo jobs for the task
nJumboPerSite: The number of active jumbo jobs per site
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: succeeded
1: server error
100: non SSL connection
101: missing production role
102: type error for some parameters
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/enableJumboJobs'
data = {'jediTaskID': jediTaskID,
'nJumboJobs': totalJumboJobs,
'nJumboPerSite': nJumboPerSite}
status,output = curl.post(url, data)
try:
return status, json.loads(output)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR /enableJumboJobs : %s %s" % (errtype,errvalue)
return EC_Failed, output+'\n'+errStr
# get Global Share status
def getGShareStatus():
"""
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: succeeded
1: server error
100: non SSL connection
101: missing production role
102: type error for some parameters
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/getGShareStatus'
status, output = curl.post(url, {})
try:
return status, json.loads(output)
except Exception:
err_type,err_value = sys.exc_info()[:2]
err_str = "ERROR /getGShareStatus : %s %s" % (err_type, err_value)
return EC_Failed, output+'\n' + err_str
# send a harvester command to panda server in order sweep a panda queue
def sweepPQ(panda_queue, status_list, ce_list, submission_host_list):
"""
args:
panda_queue: panda queue name
status_list: list with statuses to sweep, e.g. ['submitted']
ce_list: list of CEs belonging to the site or 'ALL'
submission_host_list: list of submission hosts this applies or 'ALL'
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tuple of return code and message
False: logical error
True: success
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
panda_queue_json = json.dumps(panda_queue)
status_list_json = json.dumps(status_list)
ce_list_json = json.dumps(ce_list)
submission_host_list_json = json.dumps(submission_host_list)
# execute
url = baseURLSSL + '/sweepPQ'
data = {'panda_queue': panda_queue_json,
'status_list': status_list_json,
'ce_list': ce_list_json,
'submission_host_list': submission_host_list_json
}
status, output = curl.post(url, data)
try:
return status, json.loads(output)
except Exception:
err_type, err_value = sys.exc_info()[:2]
err_str = "ERROR sweepPQ : {0} {1}".format(err_type, err_value)
return EC_Failed, '{0}\n{1}'.format(output, err_str)
# send a command to a job
def send_command_to_job(panda_id, com):
"""
args:
panda_id: PandaID of the job
com: a command string passed to the pilot. max 250 chars
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tuple of return code and message
False: failed
True: the command received
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/send_command_to_job'
data = {'panda_id': panda_id,
'com': com
}
status, output = curl.post(url, data)
try:
return status, json.loads(output)
except Exception as e:
err_str = "ERROR send_command_to_job : {}".format(str(e))
return EC_Failed, '{0}\n{1}'.format(output, err_str)
|
apache-2.0
|
belltailjp/scikit-learn
|
examples/datasets/plot_random_multilabel_dataset.py
|
93
|
3460
|
"""
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_indicator=True,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
|
bsd-3-clause
|
massmutual/scikit-learn
|
examples/cluster/plot_digits_linkage.py
|
369
|
2959
|
"""
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
|
bsd-3-clause
|
wwf5067/statsmodels
|
statsmodels/iolib/tests/test_foreign.py
|
25
|
7274
|
"""
Tests for iolib/foreign.py
"""
import os
import warnings
from datetime import datetime
from numpy.testing import *
import numpy as np
from pandas import DataFrame, isnull
import pandas.util.testing as ptesting
from statsmodels.compat.python import BytesIO, asbytes
import statsmodels.api as sm
from statsmodels.iolib.foreign import (StataWriter, genfromdta,
_datetime_to_stata_elapsed, _stata_elapsed_date_to_datetime)
from statsmodels.datasets import macrodata
import pandas
pandas_old = int(pandas.__version__.split('.')[1]) < 9
# Test precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
curdir = os.path.dirname(os.path.abspath(__file__))
def test_genfromdta():
#Test genfromdta vs. results/macrodta.npy created with genfromtxt.
#NOTE: Stata handles data very oddly. Round tripping from csv to dta
# to ndarray 2710.349 (csv) -> 2510.2491 (stata) -> 2710.34912109375
# (dta/ndarray)
from .results.macrodata import macrodata_result as res2
res1 = genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta')
assert_array_equal(res1 == res2, True)
def test_genfromdta_pandas():
from pandas.util.testing import assert_frame_equal
dta = macrodata.load_pandas().data
curdir = os.path.dirname(os.path.abspath(__file__))
res1 = sm.iolib.genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta',
pandas=True)
res1 = res1.astype(float)
assert_frame_equal(res1, dta)
def test_stata_writer_structured():
buf = BytesIO()
dta = macrodata.load().data
dtype = dta.dtype
dta = dta.astype(np.dtype([('year', int),
('quarter', int)] + dtype.descr[2:]))
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
assert_array_equal(dta, dta2)
def test_stata_writer_array():
buf = BytesIO()
dta = macrodata.load().data
dta = DataFrame.from_records(dta)
dta.columns = ["v%d" % i for i in range(1,15)]
writer = StataWriter(buf, dta.values)
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
dta = dta.to_records(index=False)
assert_array_equal(dta, dta2)
def test_missing_roundtrip():
buf = BytesIO()
dta = np.array([(np.nan, np.inf, "")],
dtype=[("double_miss", float), ("float_miss", np.float32),
("string_miss", "a1")])
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
dta = genfromdta(buf, missing_flt=np.nan)
assert_(isnull(dta[0][0]))
assert_(isnull(dta[0][1]))
assert_(dta[0][2] == asbytes(""))
dta = genfromdta(os.path.join(curdir, "results/data_missing.dta"),
missing_flt=-999)
assert_(np.all([dta[0][i] == -999 for i in range(5)]))
def test_stata_writer_pandas():
buf = BytesIO()
dta = macrodata.load().data
dtype = dta.dtype
#as of 0.9.0 pandas only supports i8 and f8
dta = dta.astype(np.dtype([('year', 'i8'),
('quarter', 'i8')] + dtype.descr[2:]))
dta4 = dta.astype(np.dtype([('year', 'i4'),
('quarter', 'i4')] + dtype.descr[2:]))
dta = DataFrame.from_records(dta)
dta4 = DataFrame.from_records(dta4)
# dta is int64 'i8' given to Stata writer
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
dta5 = DataFrame.from_records(dta2)
# dta2 is int32 'i4' returned from Stata reader
if dta5.dtypes[1] is np.dtype('int64'):
ptesting.assert_frame_equal(dta.reset_index(), dta5)
else:
# don't check index because it has different size, int32 versus int64
ptesting.assert_frame_equal(dta4, dta5[dta5.columns[1:]])
def test_stata_writer_unicode():
# make sure to test with characters outside the latin-1 encoding
pass
@dec.skipif(pandas_old)
def test_genfromdta_datetime():
results = [(datetime(2006, 11, 19, 23, 13, 20), 1479596223000,
datetime(2010, 1, 20), datetime(2010, 1, 8), datetime(2010, 1, 1),
datetime(1974, 7, 1), datetime(2010, 1, 1), datetime(2010, 1, 1)),
(datetime(1959, 12, 31, 20, 3, 20), -1479590, datetime(1953, 10, 2),
datetime(1948, 6, 10), datetime(1955, 1, 1), datetime(1955, 7, 1),
datetime(1955, 1, 1), datetime(2, 1, 1))]
with warnings.catch_warnings(record=True) as w:
dta = genfromdta(os.path.join(curdir, "results/time_series_examples.dta"))
assert_(len(w) == 1) # should get a warning for that format.
assert_array_equal(dta[0].tolist(), results[0])
assert_array_equal(dta[1].tolist(), results[1])
with warnings.catch_warnings(record=True):
dta = genfromdta(os.path.join(curdir, "results/time_series_examples.dta"),
pandas=True)
assert_array_equal(dta.irow(0).tolist(), results[0])
assert_array_equal(dta.irow(1).tolist(), results[1])
def test_date_converters():
ms = [-1479597200000, -1e6, -1e5, -100, 1e5, 1e6, 1479597200000]
days = [-1e5, -1200, -800, -365, -50, 0, 50, 365, 800, 1200, 1e5]
weeks = [-1e4, -1e2, -53, -52, -51, 0, 51, 52, 53, 1e2, 1e4]
months = [-1e4, -1e3, -100, -13, -12, -11, 0, 11, 12, 13, 100, 1e3, 1e4]
quarter = [-100, -50, -5, -4, -3, 0, 3, 4, 5, 50, 100]
half = [-50, 40, 30, 10, 3, 2, 1, 0, 1, 2, 3, 10, 30, 40, 50]
year = [1, 50, 500, 1000, 1500, 1975, 2075]
for i in ms:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tc"), "tc"), i)
for i in days:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "td"), "td"), i)
for i in weeks:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tw"), "tw"), i)
for i in months:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tm"), "tm"), i)
for i in quarter:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tq"), "tq"), i)
for i in half:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "th"), "th"), i)
for i in year:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "ty"), "ty"), i)
@dec.skipif(pandas_old)
def test_datetime_roundtrip():
dta = np.array([(1, datetime(2010, 1, 1), 2),
(2, datetime(2010, 2, 1), 3),
(4, datetime(2010, 3, 1), 5)],
dtype=[('var1', float), ('var2', object), ('var3', float)])
buf = BytesIO()
writer = StataWriter(buf, dta, {"var2" : "tm"})
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
assert_equal(dta, dta2)
dta = DataFrame.from_records(dta)
buf = BytesIO()
writer = StataWriter(buf, dta, {"var2" : "tm"})
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf, pandas=True)
ptesting.assert_frame_equal(dta, dta2.drop('index', axis=1))
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb'],
exit=False)
|
bsd-3-clause
|
mantidproject/mantid
|
qt/applications/workbench/workbench/plotting/plotscriptgenerator/test/test_plotscriptgeneratoraxes.py
|
3
|
6462
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
import unittest
import matplotlib as mpl
mpl.use('Agg') # noqa
import matplotlib.pyplot as plt
from matplotlib.ticker import LogFormatterSciNotation, ScalarFormatter
from unittest.mock import Mock
from workbench.plotting.plotscriptgenerator.axes import (generate_axis_limit_commands,
generate_axis_label_commands)
from workbench.plotting.plotscriptgenerator import generate_script
class PlotGeneratorAxisTest(unittest.TestCase):
def test_generate_axis_label_commands_only_returns_commands_for_labels_that_are_set(self):
mock_ax = Mock(get_xlabel=lambda: '', get_ylabel=lambda: 'y')
expected = ["set_ylabel('y')"]
self.assertEqual(expected, generate_axis_label_commands(mock_ax))
def test_generate_axis_label_commands_returns_empty_list_when_no_labels_set(self):
mock_ax = Mock(get_xlabel=lambda: '', get_ylabel=lambda: '')
self.assertEqual([], generate_axis_label_commands(mock_ax))
def test_generate_axis_label_commands_returns_x_and_y_command_if_both_labels_set(self):
mock_ax = Mock(get_xlabel=lambda: "X", get_ylabel=lambda: "Y")
expected = ["set_xlabel('X')", "set_ylabel('Y')"]
actual = generate_axis_label_commands(mock_ax)
self.assertEqual(expected, actual)
def test_generate_axis_label_commands_returns_only_x_command_if_y_label_not_set(self):
mock_ax = Mock(get_xlabel=lambda: "X", get_ylabel=lambda: "")
expected = ["set_xlabel('X')"]
actual = generate_axis_label_commands(mock_ax)
self.assertEqual(expected, actual)
def test_generate_axis_label_commands_returns_empty_list_if_no_labels_set(self):
mock_ax = Mock(get_xlabel=lambda: "", get_ylabel=lambda: "")
actual = generate_axis_label_commands(mock_ax)
self.assertEqual([], actual)
def test_generate_axis_limit_commands_returns_empty_list_if_limits_not_changed(self):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([-10, 10], [1, 2])
self.assertEqual([], generate_axis_limit_commands(ax))
plt.close()
del fig
def test_generate_axis_limit_commands_returns_x_limit_command_if_x_limit_changed(self):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([-10, 10], [1, 2])
ax.set_xlim([-5, 5])
self.assertEqual(['set_xlim([-5.0, 5.0])'], generate_axis_limit_commands(ax))
plt.close()
del fig
def test_generate_axis_limit_commands_returns_x_and_y_limit_commands_if_limits_changed(self):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([-10, 10], [1, 2])
ax.set_xlim([-5, 5])
ax.set_ylim([0, 4])
self.assertEqual(['set_xlim([-5.0, 5.0])', 'set_ylim([0.0, 4.0])'],
generate_axis_limit_commands(ax))
plt.close()
del fig
def test_generate_tick_commands_for_tiled_plot(self):
"""
Check that the tick commands are generated for every plot in the figure.
"""
fig, axes = plt.subplots(ncols=2, nrows=2, subplot_kw={'projection': 'mantid'})
for ax in fig.get_axes():
ax.plot([-10, 10], [1, 2])
script = generate_script(fig)
# Should be axes[i][j].tick_params for multiple subplots.
self.assertNotIn("axes.tick_params", script)
self.assertIn("axes[0][0].tick_params", script)
self.assertIn("axes[0][1].tick_params", script)
self.assertIn("axes[1][0].tick_params", script)
self.assertIn("axes[1][1].tick_params", script)
plt.close()
del fig
def test_generate_tick_format_commands(self):
"""
Check that the tick format commands are correctly generated if they are set different to the default.
"""
fig, axes = plt.subplots(ncols=2, nrows=2, subplot_kw={'projection': 'mantid'})
for ax in fig.get_axes():
ax.plot([-10, 10], [1, 2])
# Only change the major formatter for one of the x axes and one of the y axes.
# The rest will be default, and shouldn't generate any lines in the script.
axes[0][1].xaxis.set_major_formatter(LogFormatterSciNotation())
axes[1][0].yaxis.set_major_formatter(LogFormatterSciNotation())
script = generate_script(fig)
# Check the import is there exactly once.
self.assertEqual(script.count("from matplotlib.ticker import"), 1)
# We only set the major formatter for axes[0][1].xaxis, so the command should only be present there.
self.assertNotIn("axes[0][0].xaxis.set_major_formatter", script)
self.assertIn("axes[0][1].xaxis.set_major_formatter", script)
self.assertNotIn("axes[1][0].xaxis.set_major_formatter", script)
self.assertNotIn("axes[1][1].xaxis.set_major_formatter", script)
# We only set the major formatter for axes[1][0].yaxis, so the command should only be present there.
self.assertNotIn("axes[0][0].yaxis.set_major_formatter", script)
self.assertNotIn("axes[0][1].yaxis.set_major_formatter", script)
self.assertIn("axes[1][0].yaxis.set_major_formatter", script)
self.assertNotIn("axes[1][1].yaxis.set_major_formatter", script)
def test_generate_tick_format_commands_log_scale(self):
"""
Check that tick format commands are correctly generated for a log axis scale.
"""
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1, projection='mantid')
axes.plot([-10, 10], [1, 2])
axes.set_yscale('log')
axes.yaxis.set_major_formatter(ScalarFormatter())
script = generate_script(fig)
# Scalar format is not the default for log axis, so should appear in script.
self.assertIn("axes.yaxis.set_major_formatter(ScalarFormatter(", script)
# LogFormatterSciNotation is default for log axis, so shouldn't appear in script for minor or major ticks.
self.assertNotIn("_formatter(LogFormatterSciNotation", script)
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
winklerand/pandas
|
pandas/io/gbq.py
|
2
|
3915
|
""" Google BigQuery support """
def _try_import():
# since pandas is a dependency of pandas-gbq
# we need to import on first use
try:
import pandas_gbq
except ImportError:
# give a nice error message
raise ImportError("Load data from Google BigQuery\n"
"\n"
"the pandas-gbq package is not installed\n"
"see the docs: https://pandas-gbq.readthedocs.io\n"
"\n"
"you can install via pip or conda:\n"
"pip install pandas-gbq\n"
"conda install pandas-gbq -c conda-forge\n")
return pandas_gbq
def read_gbq(query, project_id=None, index_col=None, col_order=None,
reauth=False, verbose=True, private_key=None, dialect='legacy',
**kwargs):
r"""Load data from Google BigQuery.
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
Authentication to the Google BigQuery service is via OAuth 2.0.
- If "private_key" is not provided:
By default "application default credentials" are used.
If default application credentials are not found or are restrictive,
user account credentials are used. In this case, you will be asked to
grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
Service account credentials will be used to authenticate.
Parameters
----------
query : str
SQL-Like Query to return data values
project_id : str
Google BigQuery Account project ID.
index_col : str (optional)
Name of result column to use for index in results DataFrame
col_order : list(str) (optional)
List of BigQuery column names in the desired order for results
DataFrame
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
verbose : boolean (default True)
Verbose output
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. jupyter iPython notebook on remote host)
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery SQL Reference
<https://cloud.google.com/bigquery/sql-reference/>`__
`**kwargs` : Arbitrary keyword arguments
configuration (dict): query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery SQL Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__
Returns
-------
df: DataFrame
DataFrame representing results of query
"""
pandas_gbq = _try_import()
return pandas_gbq.read_gbq(
query, project_id=project_id,
index_col=index_col, col_order=col_order,
reauth=reauth, verbose=verbose,
private_key=private_key,
dialect=dialect,
**kwargs)
def to_gbq(dataframe, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail', private_key=None):
pandas_gbq = _try_import()
pandas_gbq.to_gbq(dataframe, destination_table, project_id,
chunksize=chunksize,
verbose=verbose, reauth=reauth,
if_exists=if_exists, private_key=private_key)
|
bsd-3-clause
|
clarkfitzg/seaborn
|
seaborn/distributions.py
|
21
|
28328
|
"""Plotting functions for visualizing distributions."""
from __future__ import division
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
from six import string_types
try:
import statsmodels.nonparametric.api as smnp
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from .utils import set_hls_values, iqr, _kde_support
from .palettes import color_palette, blend_palette
from .axisgrid import JointGrid
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From http://stats.stackexchange.com/questions/798/
a = np.asarray(a)
h = 2 * iqr(a) / (len(a) ** (1 / 3))
# fall back to sqrt(a) bins if iqr is 0
if h == 0:
return np.sqrt(a.size)
else:
return np.ceil((a.max() - a.min()) / h)
def distplot(a, bins=None, hist=True, kde=True, rug=False, fit=None,
hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,
color=None, vertical=False, norm_hist=False, axlabel=None,
label=None, ax=None):
"""Flexibly plot a univariate distribution of observations.
This function combines the matplotlib ``hist`` function (with automatic
calculation of a good default bin size) with the seaborn :func:`kdeplot`
and :func:`rugplot` functions. It can also fit ``scipy.stats``
distributions and plot the estimated PDF over the data.
Parameters
----------
a : Series, 1d-array, or list.
Observed data. If this is a Series object with a ``name`` attribute,
the name will be used to label the data axis.
bins : argument for matplotlib hist(), or None, optional
Specification of hist bins, or None to use Freedman-Diaconis rule.
hist : bool, optional
Whether to plot a (normed) histogram.
kde : bool, optional
Whether to plot a gaussian kernel density estimate.
rug : bool, optional
Whether to draw a rugplot on the support axis.
fit : random variable object, optional
An object with `fit` method, returning a tuple that can be passed to a
`pdf` method a positional arguments following an grid of values to
evaluate the pdf on.
{hist, kde, rug, fit}_kws : dictionaries, optional
Keyword arguments for underlying plotting functions.
color : matplotlib color, optional
Color to plot everything but the fitted curve in.
vertical : bool, optional
If True, oberved values are on y-axis.
norm_hist : bool, otional
If True, the histogram height shows a density rather than a count.
This is implied if a KDE or fitted density is plotted.
axlabel : string, False, or None, optional
Name for the support axis label. If None, will try to get it
from a.namel if False, do not set a label.
label : string, optional
Legend label for the relevent component of the plot
ax : matplotlib axis, optional
if provided, plot on this axis
Returns
-------
ax : matplotlib Axes
Returns the Axes object with the plot for further tweaking.
See Also
--------
kdeplot : Show a univariate or bivariate distribution with a kernel
density estimate.
rugplot : Draw small vertical lines to show each observation in a
distribution.
Examples
--------
Show a default plot with a kernel density estimate and histogram with bin
size determined automatically with a reference rule:
.. plot::
:context: close-figs
>>> import seaborn as sns, numpy as np
>>> sns.set(rc={"figure.figsize": (8, 4)}); np.random.seed(0)
>>> x = np.random.randn(100)
>>> ax = sns.distplot(x)
Use Pandas objects to get an informative axis label:
.. plot::
:context: close-figs
>>> import pandas as pd
>>> x = pd.Series(x, name="x variable")
>>> ax = sns.distplot(x)
Plot the distribution with a kenel density estimate and rug plot:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, rug=True, hist=False)
Plot the distribution with a histogram and maximum likelihood gaussian
distribution fit:
.. plot::
:context: close-figs
>>> from scipy.stats import norm
>>> ax = sns.distplot(x, fit=norm, kde=False)
Plot the distribution on the vertical axis:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, vertical=True)
Change the color of all the plot elements:
.. plot::
:context: close-figs
>>> sns.set_color_codes()
>>> ax = sns.distplot(x, color="y")
Pass specific parameters to the underlying plot functions:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, rug=True, rug_kws={"color": "g"},
... kde_kws={"color": "k", "lw": 3, "label": "KDE"},
... hist_kws={"histtype": "step", "linewidth": 3,
... "alpha": 1, "color": "g"})
"""
if ax is None:
ax = plt.gca()
# Intelligently label the support axis
label_ax = bool(axlabel)
if axlabel is None and hasattr(a, "name"):
axlabel = a.name
if axlabel is not None:
label_ax = True
# Make a a 1-d array
a = np.asarray(a).squeeze()
# Decide if the hist is normed
norm_hist = norm_hist or kde or (fit is not None)
# Handle dictionary defaults
if hist_kws is None:
hist_kws = dict()
if kde_kws is None:
kde_kws = dict()
if rug_kws is None:
rug_kws = dict()
if fit_kws is None:
fit_kws = dict()
# Get the color from the current color cycle
if color is None:
if vertical:
line, = ax.plot(0, a.mean())
else:
line, = ax.plot(a.mean(), 0)
color = line.get_color()
line.remove()
# Plug the label into the right kwarg dictionary
if label is not None:
if hist:
hist_kws["label"] = label
elif kde:
kde_kws["label"] = label
elif rug:
rug_kws["label"] = label
elif fit:
fit_kws["label"] = label
if hist:
if bins is None:
bins = min(_freedman_diaconis_bins(a), 50)
hist_kws.setdefault("alpha", 0.4)
hist_kws.setdefault("normed", norm_hist)
orientation = "horizontal" if vertical else "vertical"
hist_color = hist_kws.pop("color", color)
ax.hist(a, bins, orientation=orientation,
color=hist_color, **hist_kws)
if hist_color != color:
hist_kws["color"] = hist_color
if kde:
kde_color = kde_kws.pop("color", color)
kdeplot(a, vertical=vertical, ax=ax, color=kde_color, **kde_kws)
if kde_color != color:
kde_kws["color"] = kde_color
if rug:
rug_color = rug_kws.pop("color", color)
axis = "y" if vertical else "x"
rugplot(a, axis=axis, ax=ax, color=rug_color, **rug_kws)
if rug_color != color:
rug_kws["color"] = rug_color
if fit is not None:
fit_color = fit_kws.pop("color", "#282828")
gridsize = fit_kws.pop("gridsize", 200)
cut = fit_kws.pop("cut", 3)
clip = fit_kws.pop("clip", (-np.inf, np.inf))
bw = stats.gaussian_kde(a).scotts_factor() * a.std(ddof=1)
x = _kde_support(a, bw, gridsize, cut, clip)
params = fit.fit(a)
pdf = lambda x: fit.pdf(x, *params)
y = pdf(x)
if vertical:
x, y = y, x
ax.plot(x, y, color=fit_color, **fit_kws)
if fit_color != "#282828":
fit_kws["color"] = fit_color
if label_ax:
if vertical:
ax.set_ylabel(axlabel)
else:
ax.set_xlabel(axlabel)
return ax
def _univariate_kdeplot(data, shade, vertical, kernel, bw, gridsize, cut,
clip, legend, ax, cumulative=False, **kwargs):
"""Plot a univariate kernel density estimate on one of the axes."""
# Sort out the clipping
if clip is None:
clip = (-np.inf, np.inf)
# Calculate the KDE
if _has_statsmodels:
# Prefer using statsmodels for kernel flexibility
x, y = _statsmodels_univariate_kde(data, kernel, bw,
gridsize, cut, clip,
cumulative=cumulative)
else:
# Fall back to scipy if missing statsmodels
if kernel != "gau":
kernel = "gau"
msg = "Kernel other than `gau` requires statsmodels."
warnings.warn(msg, UserWarning)
if cumulative:
raise ImportError("Cumulative distributions are currently"
"only implemented in statsmodels."
"Please install statsmodels.")
x, y = _scipy_univariate_kde(data, bw, gridsize, cut, clip)
# Make sure the density is nonnegative
y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
# Flip the data if the plot should be on the y axis
if vertical:
x, y = y, x
# Check if a label was specified in the call
label = kwargs.pop("label", None)
# Otherwise check if the data object has a name
if label is None and hasattr(data, "name"):
label = data.name
# Decide if we're going to add a legend
legend = label is not None and legend
label = "_nolegend_" if label is None else label
# Use the active color cycle to find the plot color
line, = ax.plot(x, y, **kwargs)
color = line.get_color()
line.remove()
kwargs.pop("color", None)
# Draw the KDE plot and, optionally, shade
ax.plot(x, y, color=color, label=label, **kwargs)
alpha = kwargs.get("alpha", 0.25)
if shade:
if vertical:
ax.fill_betweenx(y, 1e-12, x, color=color, alpha=alpha)
else:
ax.fill_between(x, 1e-12, y, color=color, alpha=alpha)
# Draw the legend here
if legend:
ax.legend(loc="best")
return ax
def _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip,
cumulative=False):
"""Compute a univariate kernel density estimate using statsmodels."""
fft = kernel == "gau"
kde = smnp.KDEUnivariate(data)
kde.fit(kernel, bw, fft, gridsize=gridsize, cut=cut, clip=clip)
if cumulative:
grid, y = kde.support, kde.cdf
else:
grid, y = kde.support, kde.density
return grid, y
def _scipy_univariate_kde(data, bw, gridsize, cut, clip):
"""Compute a univariate kernel density estimate using scipy."""
try:
kde = stats.gaussian_kde(data, bw_method=bw)
except TypeError:
kde = stats.gaussian_kde(data)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
if isinstance(bw, string_types):
bw = "scotts" if bw == "scott" else bw
bw = getattr(kde, "%s_factor" % bw)()
grid = _kde_support(data, bw, gridsize, cut, clip)
y = kde(grid)
return grid, y
def _bivariate_kdeplot(x, y, filled, fill_lowest,
kernel, bw, gridsize, cut, clip,
axlabel, ax, **kwargs):
"""Plot a joint KDE estimate as a bivariate contour plot."""
# Determine the clipping
if clip is None:
clip = [(-np.inf, np.inf), (-np.inf, np.inf)]
elif np.ndim(clip) == 1:
clip = [clip, clip]
# Calculate the KDE
if _has_statsmodels:
xx, yy, z = _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip)
else:
xx, yy, z = _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip)
# Plot the contours
n_levels = kwargs.pop("n_levels", 10)
cmap = kwargs.get("cmap", "BuGn" if filled else "BuGn_d")
if isinstance(cmap, string_types):
if cmap.endswith("_d"):
pal = ["#333333"]
pal.extend(color_palette(cmap.replace("_d", "_r"), 2))
cmap = blend_palette(pal, as_cmap=True)
else:
cmap = mpl.cm.get_cmap(cmap)
kwargs["cmap"] = cmap
contour_func = ax.contourf if filled else ax.contour
cset = contour_func(xx, yy, z, n_levels, **kwargs)
if filled and not fill_lowest:
cset.collections[0].set_alpha(0)
kwargs["n_levels"] = n_levels
# Label the axes
if hasattr(x, "name") and axlabel:
ax.set_xlabel(x.name)
if hasattr(y, "name") and axlabel:
ax.set_ylabel(y.name)
return ax
def _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using statsmodels."""
if isinstance(bw, string_types):
bw_func = getattr(smnp.bandwidths, "bw_" + bw)
x_bw = bw_func(x)
y_bw = bw_func(y)
bw = [x_bw, y_bw]
elif np.isscalar(bw):
bw = [bw, bw]
if isinstance(x, pd.Series):
x = x.values
if isinstance(y, pd.Series):
y = y.values
kde = smnp.KDEMultivariate([x, y], "cc", bw)
x_support = _kde_support(x, kde.bw[0], gridsize, cut, clip[0])
y_support = _kde_support(y, kde.bw[1], gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde.pdf([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using scipy."""
data = np.c_[x, y]
kde = stats.gaussian_kde(data.T)
data_std = data.std(axis=0, ddof=1)
if isinstance(bw, string_types):
bw = "scotts" if bw == "scott" else bw
bw_x = getattr(kde, "%s_factor" % bw)() * data_std[0]
bw_y = getattr(kde, "%s_factor" % bw)() * data_std[1]
elif np.isscalar(bw):
bw_x, bw_y = bw, bw
else:
msg = ("Cannot specify a different bandwidth for each dimension "
"with the scipy backend. You should install statsmodels.")
raise ValueError(msg)
x_support = _kde_support(data[:, 0], bw_x, gridsize, cut, clip[0])
y_support = _kde_support(data[:, 1], bw_y, gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def kdeplot(data, data2=None, shade=False, vertical=False, kernel="gau",
bw="scott", gridsize=100, cut=3, clip=None, legend=True,
cumulative=False, shade_lowest=True, ax=None, **kwargs):
"""Fit and plot a univariate or bivariate kernel density estimate.
Parameters
----------
data : 1d array-like
Input data.
data2: 1d array-like
Second input data. If present, a bivariate KDE will be estimated.
shade : bool, optional
If True, shade in the area under the KDE curve (or draw with filled
contours when data is bivariate).
vertical : bool
If True, density is on x-axis.
kernel : {'gau' | 'cos' | 'biw' | 'epa' | 'tri' | 'triw' }, optional
Code for shape of kernel to fit with. Bivariate KDE can only use
gaussian kernel.
bw : {'scott' | 'silverman' | scalar | pair of scalars }, optional
Name of reference method to determine kernel size, scalar factor,
or scalar for each dimension of the bivariate plot.
gridsize : int, optional
Number of discrete points in the evaluation grid.
cut : scalar, optional
Draw the estimate to cut * bw from the extreme data points.
clip : pair of scalars, or pair of pair of scalars, optional
Lower and upper bounds for datapoints used to fit KDE. Can provide
a pair of (low, high) bounds for bivariate plots.
legend : bool, optinal
If True, add a legend or label the axes when possible.
cumulative : bool
If True, draw the cumulative distribution estimated by the kde.
shade_lowest : bool
If True, shade the lowest contour of a bivariate KDE plot. Not
relevant when drawing a univariate plot or when ``shade=False``.
Setting this to ``False`` can be useful when you want multiple
densities on the same Axes.
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis.
kwargs : key, value pairings
Other keyword arguments are passed to ``plt.plot()`` or
``plt.contour{f}`` depending on whether a univariate or bivariate
plot is being drawn.
Returns
-------
ax : matplotlib Axes
Axes with plot.
See Also
--------
distplot: Flexibly plot a univariate distribution of observations.
jointplot: Plot a joint dataset with bivariate and marginal distributions.
Examples
--------
Plot a basic univariate density:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(10)
>>> import seaborn as sns; sns.set(color_codes=True)
>>> mean, cov = [0, 2], [(1, .5), (.5, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, size=50).T
>>> ax = sns.kdeplot(x)
Shade under the density curve and use a different color:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, shade=True, color="r")
Plot a bivariate density:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, y)
Use filled contours:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, y, shade=True)
Use more contour levels and a different color palette:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, y, n_levels=30, cmap="Purples_d")
Use a narrower bandwith:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, bw=.15)
Plot the density on the vertical axis:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(y, vertical=True)
Limit the density curve within the range of the data:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, cut=0)
Plot two shaded bivariate densities:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> setosa = iris.loc[iris.species == "setosa"]
>>> virginica = iris.loc[iris.species == "virginica"]
>>> ax = sns.kdeplot(setosa.sepal_width, setosa.sepal_length,
... cmap="Reds", shade=True, shade_lowest=False)
>>> ax = sns.kdeplot(virginica.sepal_width, virginica.sepal_length,
... cmap="Blues", shade=True, shade_lowest=False)
"""
if ax is None:
ax = plt.gca()
data = data.astype(np.float64)
if data2 is not None:
data2 = data2.astype(np.float64)
bivariate = False
if isinstance(data, np.ndarray) and np.ndim(data) > 1:
bivariate = True
x, y = data.T
elif isinstance(data, pd.DataFrame) and np.ndim(data) > 1:
bivariate = True
x = data.iloc[:, 0].values
y = data.iloc[:, 1].values
elif data2 is not None:
bivariate = True
x = data
y = data2
if bivariate and cumulative:
raise TypeError("Cumulative distribution plots are not"
"supported for bivariate distributions.")
if bivariate:
ax = _bivariate_kdeplot(x, y, shade, shade_lowest,
kernel, bw, gridsize, cut, clip, legend,
ax, **kwargs)
else:
ax = _univariate_kdeplot(data, shade, vertical, kernel, bw,
gridsize, cut, clip, legend, ax,
cumulative=cumulative, **kwargs)
return ax
def rugplot(a, height=.05, axis="x", ax=None, **kwargs):
"""Plot datapoints in an array as sticks on an axis.
Parameters
----------
a : vector
1D array of observations.
height : scalar, optional
Height of ticks as proportion of the axis.
axis : {'x' | 'y'}, optional
Axis to draw rugplot on.
ax : matplotlib axes
Axes to draw plot into; otherwise grabs current axes.
kwargs : key, value mappings
Other keyword arguments are passed to ``axvline`` or ``axhline``.
Returns
-------
ax : matplotlib axes
The Axes object with the plot on it.
"""
if ax is None:
ax = plt.gca()
a = np.asarray(a)
vertical = kwargs.pop("vertical", axis == "y")
func = ax.axhline if vertical else ax.axvline
kwargs.setdefault("linewidth", 1)
for pt in a:
func(pt, 0, height, **kwargs)
return ax
def jointplot(x, y, data=None, kind="scatter", stat_func=stats.pearsonr,
color=None, size=6, ratio=5, space=.2,
dropna=True, xlim=None, ylim=None,
joint_kws=None, marginal_kws=None, annot_kws=None, **kwargs):
"""Draw a plot of two variables with bivariate and univariate graphs.
This function provides a convenient interface to the :class:`JointGrid`
class, with several canned plot kinds. This is intended to be a fairly
lightweight wrapper; if you need more flexibility, you should use
:class:`JointGrid` directly.
Parameters
----------
x, y : strings or vectors
Data or names of variables in ``data``.
data : DataFrame, optional
DataFrame when ``x`` and ``y`` are variable names.
kind : { "scatter" | "reg" | "resid" | "kde" | "hex" }, optional
Kind of plot to draw.
stat_func : callable or None
Function used to calculate a statistic about the relationship and
annotate the plot. Should map `x` and `y` either to a single value
or to a (value, p) tuple. Set to ``None`` if you don't want to
annotate the plot.
color : matplotlib color, optional
Color used for the plot elements.
size : numeric, optional
Size of the figure (it will be square).
ratio : numeric, optional
Ratio of joint axes size to marginal axes height.
space : numeric, optional
Space between the joint and marginal axes
dropna : bool, optional
If True, remove observations that are missing from ``x`` and ``y``.
{x, y}lim : two-tuples, optional
Axis limits to set before plotting.
{joint, marginal, annot}_kws : dicts
Additional keyword arguments for the plot components.
kwargs : key, value pairs
Additional keyword arguments are passed to the function used to
draw the plot on the joint Axes, superseding items in the
``joint_kws`` dictionary.
Returns
-------
grid : :class:`JointGrid`
:class:`JointGrid` object with the plot on it.
See Also
--------
JointGrid : The Grid class used for drawing this plot. Use it directly if
you need more flexibility.
Examples
--------
Draw a scatterplot with marginal histograms:
.. plot::
:context: close-figs
>>> import numpy as np, pandas as pd; np.random.seed(0)
>>> import seaborn as sns; sns.set(style="white", color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> g = sns.jointplot(x="total_bill", y="tip", data=tips)
Add regression and kernel density fits:
.. plot::
:context: close-figs
>>> g = sns.jointplot("total_bill", "tip", data=tips, kind="reg")
Replace the scatterplot with a joint histogram using hexagonal bins:
.. plot::
:context: close-figs
>>> g = sns.jointplot("total_bill", "tip", data=tips, kind="hex")
Replace the scatterplots and histograms with density estimates and align
the marginal Axes tightly with the joint Axes:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> g = sns.jointplot("sepal_width", "petal_length", data=iris,
... kind="kde", space=0, color="g")
Use a different statistic for the annotation:
.. plot::
:context: close-figs
>>> from scipy.stats import spearmanr
>>> g = sns.jointplot("size", "total_bill", data=tips,
... stat_func=spearmanr, color="m")
Draw a scatterplot, then add a joint density estimate:
.. plot::
:context: close-figs
>>> g = (sns.jointplot("sepal_length", "sepal_width",
... data=iris, color="k")
... .plot_joint(sns.kdeplot, zorder=0, n_levels=6))
Pass vectors in directly without using Pandas, then name the axes:
.. plot::
:context: close-figs
>>> x, y = np.random.randn(2, 300)
>>> g = (sns.jointplot(x, y, kind="hex", stat_func=None)
... .set_axis_labels("x", "y"))
Draw a smaller figure with more space devoted to the marginal plots:
.. plot::
:context: close-figs
>>> g = sns.jointplot("total_bill", "tip", data=tips,
... size=5, ratio=3, color="g")
Pass keyword arguments down to the underlying plots:
.. plot::
:context: close-figs
>>> g = sns.jointplot("petal_length", "sepal_length", data=iris,
... marginal_kws=dict(bins=15, rug=True),
... annot_kws=dict(stat="r"),
... s=40, edgecolor="w", linewidth=1)
"""
# Set up empty default kwarg dicts
if joint_kws is None:
joint_kws = {}
joint_kws.update(kwargs)
if marginal_kws is None:
marginal_kws = {}
if annot_kws is None:
annot_kws = {}
# Make a colormap based off the plot color
if color is None:
color = color_palette()[0]
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [set_hls_values(color_rgb, l=l) for l in np.linspace(1, 0, 12)]
cmap = blend_palette(colors, as_cmap=True)
# Initialize the JointGrid object
grid = JointGrid(x, y, data, dropna=dropna,
size=size, ratio=ratio, space=space,
xlim=xlim, ylim=ylim)
# Plot the data using the grid
if kind == "scatter":
joint_kws.setdefault("color", color)
grid.plot_joint(plt.scatter, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("hex"):
x_bins = _freedman_diaconis_bins(grid.x)
y_bins = _freedman_diaconis_bins(grid.y)
gridsize = int(np.mean([x_bins, y_bins]))
joint_kws.setdefault("gridsize", gridsize)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(plt.hexbin, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("kde"):
joint_kws.setdefault("shade", True)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(kdeplot, **joint_kws)
marginal_kws.setdefault("shade", True)
marginal_kws.setdefault("color", color)
grid.plot_marginals(kdeplot, **marginal_kws)
elif kind.startswith("reg"):
from .linearmodels import regplot
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
joint_kws.setdefault("color", color)
grid.plot_joint(regplot, **joint_kws)
elif kind.startswith("resid"):
from .linearmodels import residplot
joint_kws.setdefault("color", color)
grid.plot_joint(residplot, **joint_kws)
x, y = grid.ax_joint.collections[0].get_offsets().T
marginal_kws.setdefault("color", color)
marginal_kws.setdefault("kde", False)
distplot(x, ax=grid.ax_marg_x, **marginal_kws)
distplot(y, vertical=True, fit=stats.norm, ax=grid.ax_marg_y,
**marginal_kws)
stat_func = None
else:
msg = "kind must be either 'scatter', 'reg', 'resid', 'kde', or 'hex'"
raise ValueError(msg)
if stat_func is not None:
grid.annotate(stat_func, **annot_kws)
return grid
|
bsd-3-clause
|
hainm/scikit-learn
|
sklearn/datasets/tests/test_svmlight_format.py
|
228
|
11221
|
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
|
bsd-3-clause
|
IssamLaradji/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
8
|
25170
|
"""Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(RuntimeWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
"""Check that dense and sparse minibatch update give the same results"""
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_check_fitted():
km = KMeans(n_clusters=n_clusters, random_state=42)
assert_raises(AttributeError, km._check_fitted)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
verbose=10, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, verbose=10, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, verbose=10,
init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
"""Check if copy_x=False returns nearly equal X after de-centering."""
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
"""Check k_means with a bad initialization does not yield a singleton
Starting with bad centers that are quickly ignored should not
result in a repositioning of the centers to the center of mass that
would lead to collapsed centers which in turns make the clustering
dependent of the numerical unstabilities.
"""
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
"""Check that increasing the number of init increases the quality"""
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
|
bsd-3-clause
|
mmottahedi/neuralnilm_prototype
|
scripts/e401.py
|
2
|
23899
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
e400
'learn_init': False
independently_centre_inputs : True
e401
input is in range [0,1]
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
max_input_power=5900,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.9,
one_target_per_seq=False,
n_seq_per_batch=64,
subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
# standardise_input=True,
standardise_targets=True,
unit_variance_targets=True,
input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-5,
learning_rate_changes_by_iteration={
1000: 1e-6,
2000: 1e-7
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
def exp_a(name):
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 2,
'stride': 1,
'nonlinearity': identity,
'b': None,
'border_mode': 'same'
},
{
'type': BatchNormLayer,
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'W_hid_to_hid': Identity(scale=0.5),
'learn_init': False, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': identity,
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'W_hid_to_hid': Identity(scale=0.5),
'learn_init': False, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_b(name):
"""
tanh first layer, all others are rectify
"""
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 2,
'stride': 1,
'nonlinearity': identity,
'b': None,
'border_mode': 'same'
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'W_hid_to_hid': Identity(scale=0.5),
'learn_init': False, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': identity,
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'W_hid_to_hid': Identity(scale=0.5),
'learn_init': False, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_c(name):
"""
tanh all the way through. Identity init of RNNs
"""
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 2,
'stride': 1,
'nonlinearity': identity,
'b': None,
'border_mode': 'same'
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'W_hid_to_hid': Identity(scale=0.5),
'learn_init': False, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': identity,
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'W_hid_to_hid': Identity(scale=0.5),
'learn_init': False, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
"""
e380 (tanh all the way though, default inits) with batch norm
"""
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 2,
'stride': 1,
'nonlinearity': identity,
'b': None,
'border_mode': 'same'
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'learn_init': False, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': identity,
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'learn_init': False, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
"""
e380 again
"""
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 2,
'stride': 1,
'nonlinearity': tanh,
'border_mode': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False, 'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False, 'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_f(name):
# two dense layers at start, batch norm
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 40,
'nonlinearity': identity,
'W': Normal(std=1),
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': tanh,
'axes': (1)
},
{
'type': DenseLayer,
'num_units': 40,
'nonlinearity': identity,
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': tanh,
'axes': (1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'learn_init': False, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': identity,
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'learn_init': False, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_g(name):
# two dense layers at start, no batch norm
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 40,
'nonlinearity': tanh,
'W': Normal(std=1)
},
{
'type': DenseLayer,
'num_units': 40,
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False, 'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False, 'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_h(name):
# replace tanh with sigmoid
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 40,
'nonlinearity': sigmoid,
'W': Normal(std=1)
},
{
'type': DenseLayer,
'num_units': 40,
'nonlinearity': sigmoid
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': sigmoid,
'learn_init': False, 'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': sigmoid,
'learn_init': False, 'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_i(name):
# e59 (but with softplus output, BiRNN, gradient_steps, learn_init=false,
# conv1d stride=4 not 5), single target appliance
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
learning_rate=1e-2
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': sigmoid,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': sigmoid,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
# EXPERIMENTS = list('abcdefghi')
EXPERIMENTS = list('i')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
mit
|
CVML/scikit-learn
|
sklearn/decomposition/tests/test_incremental_pca.py
|
297
|
8265
|
"""Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
|
bsd-3-clause
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/pandas/io/sas/sas7bdat.py
|
7
|
26963
|
"""
Read SAS7BDAT files
Based on code written by Jared Hobbs:
https://bitbucket.org/jaredhobbs/sas7bdat
See also:
https://github.com/BioStatMatt/sas7bdat
Partial documentation of the file format:
https://cran.r-project.org/web/packages/sas7bdat/vignettes/sas7bdat.pdf
Reference for binary data compression:
http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
"""
import pandas as pd
from pandas import compat
from pandas.io.common import get_filepath_or_buffer, BaseIterator
import numpy as np
import struct
import pandas.io.sas.sas_constants as const
from pandas.io.sas.saslib import Parser
class _subheader_pointer(object):
pass
class _column(object):
pass
# SAS7BDAT represents a SAS data file in SAS7BDAT format.
class SAS7BDATReader(BaseIterator):
"""
Read SAS files in SAS7BDAT format.
Parameters
----------
path_or_buf : path name or buffer
Name of SAS file or file-like object pointing to SAS file
contents.
index : column identifier, defaults to None
Column to use as index.
convert_dates : boolean, defaults to True
Attempt to convert dates to Pandas datetime values. Note all
SAS date formats are supported.
blank_missing : boolean, defaults to True
Convert empty strings to missing values (SAS uses blanks to
indicate missing character variables).
chunksize : int, defaults to None
Return SAS7BDATReader object for iterations, returns chunks
with given number of lines.
encoding : string, defaults to None
String encoding.
convert_text : bool, defaults to True
If False, text variables are left as raw bytes.
convert_header_text : bool, defaults to True
If False, header text, including column names, are left as raw
bytes.
"""
def __init__(self, path_or_buf, index=None, convert_dates=True,
blank_missing=True, chunksize=None, encoding=None,
convert_text=True, convert_header_text=True):
self.index = index
self.convert_dates = convert_dates
self.blank_missing = blank_missing
self.chunksize = chunksize
self.encoding = encoding
self.convert_text = convert_text
self.convert_header_text = convert_header_text
self.default_encoding = "latin-1"
self.compression = ""
self.column_names_strings = []
self.column_names = []
self.column_types = []
self.column_formats = []
self.columns = []
self._current_page_data_subheader_pointers = []
self._cached_page = None
self._column_data_lengths = []
self._column_data_offsets = []
self._current_row_in_file_index = 0
self._current_row_on_page_index = 0
self._current_row_in_file_index = 0
self._path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf)
if isinstance(self._path_or_buf, compat.string_types):
self._path_or_buf = open(self._path_or_buf, 'rb')
self.handle = self._path_or_buf
self._get_properties()
self._parse_metadata()
def close(self):
try:
self.handle.close()
except AttributeError:
pass
def _get_properties(self):
# Check magic number
self._path_or_buf.seek(0)
self._cached_page = self._path_or_buf.read(288)
if self._cached_page[0:len(const.magic)] != const.magic:
self.close()
raise ValueError("magic number mismatch (not a SAS file?)")
# Get alignment information
align1, align2 = 0, 0
buf = self._read_bytes(const.align_1_offset, const.align_1_length)
if buf == const.u64_byte_checker_value:
align2 = const.align_2_value
self.U64 = True
self._int_length = 8
self._page_bit_offset = const.page_bit_offset_x64
self._subheader_pointer_length = const.subheader_pointer_length_x64
else:
self.U64 = False
self._page_bit_offset = const.page_bit_offset_x86
self._subheader_pointer_length = const.subheader_pointer_length_x86
self._int_length = 4
buf = self._read_bytes(const.align_2_offset, const.align_2_length)
if buf == const.align_1_checker_value:
align1 = const.align_2_value
total_align = align1 + align2
# Get endianness information
buf = self._read_bytes(const.endianness_offset,
const.endianness_length)
if buf == b'\x01':
self.byte_order = "<"
else:
self.byte_order = ">"
# Get encoding information
buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0]
if buf in const.encoding_names:
self.file_encoding = const.encoding_names[buf]
else:
self.file_encoding = "unknown (code=%s)" % str(buf)
# Get platform information
buf = self._read_bytes(const.platform_offset, const.platform_length)
if buf == b'1':
self.platform = "unix"
elif buf == b'2':
self.platform = "windows"
else:
self.platform = "unknown"
buf = self._read_bytes(const.dataset_offset, const.dataset_length)
self.name = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.name = self.name.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.file_type_offset, const.file_type_length)
self.file_type = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.file_type = self.file_type.decode(
self.encoding or self.default_encoding)
# Timestamp is epoch 01/01/1960
epoch = pd.datetime(1960, 1, 1)
x = self._read_float(const.date_created_offset + align1,
const.date_created_length)
self.date_created = epoch + pd.to_timedelta(x, unit='s')
x = self._read_float(const.date_modified_offset + align1,
const.date_modified_length)
self.date_modified = epoch + pd.to_timedelta(x, unit='s')
self.header_length = self._read_int(const.header_size_offset + align1,
const.header_size_length)
# Read the rest of the header into cached_page.
buf = self._path_or_buf.read(self.header_length - 288)
self._cached_page += buf
if len(self._cached_page) != self.header_length:
self.close()
raise ValueError("The SAS7BDAT file appears to be truncated.")
self._page_length = self._read_int(const.page_size_offset + align1,
const.page_size_length)
self._page_count = self._read_int(const.page_count_offset + align1,
const.page_count_length)
buf = self._read_bytes(const.sas_release_offset + total_align,
const.sas_release_length)
self.sas_release = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.sas_release = self.sas_release.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.sas_server_type_offset + total_align,
const.sas_server_type_length)
self.server_type = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.server_type = self.server_type.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.os_version_number_offset + total_align,
const.os_version_number_length)
self.os_version = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.os_version = self.os_version.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.os_name_offset + total_align,
const.os_name_length)
buf = buf.rstrip(b'\x00 ')
if len(buf) > 0:
self.os_name = buf.decode(self.encoding or self.default_encoding)
else:
buf = self._read_bytes(const.os_maker_offset + total_align,
const.os_maker_length)
self.os_name = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.os_name = self.os_name.decode(
self.encoding or self.default_encoding)
def __next__(self):
da = self.read(nrows=self.chunksize or 1)
if da is None:
raise StopIteration
return da
# Read a single float of the given width (4 or 8).
def _read_float(self, offset, width):
if width not in (4, 8):
self.close()
raise ValueError("invalid float width")
buf = self._read_bytes(offset, width)
fd = "f" if width == 4 else "d"
return struct.unpack(self.byte_order + fd, buf)[0]
# Read a single signed integer of the given width (1, 2, 4 or 8).
def _read_int(self, offset, width):
if width not in (1, 2, 4, 8):
self.close()
raise ValueError("invalid int width")
buf = self._read_bytes(offset, width)
it = {1: "b", 2: "h", 4: "l", 8: "q"}[width]
iv = struct.unpack(self.byte_order + it, buf)[0]
return iv
def _read_bytes(self, offset, length):
if self._cached_page is None:
self._path_or_buf.seek(offset)
buf = self._path_or_buf.read(length)
if len(buf) < length:
self.close()
msg = "Unable to read {:d} bytes from file position {:d}."
raise ValueError(msg.format(length, offset))
return buf
else:
if offset + length > len(self._cached_page):
self.close()
raise ValueError("The cached page is too small.")
return self._cached_page[offset:offset + length]
def _parse_metadata(self):
done = False
while not done:
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
break
if len(self._cached_page) != self._page_length:
self.close()
raise ValueError(
"Failed to read a meta data page from the SAS file.")
done = self._process_page_meta()
def _process_page_meta(self):
self._read_page_header()
pt = [const.page_meta_type, const.page_amd_type] + const.page_mix_types
if self._current_page_type in pt:
self._process_page_metadata()
return ((self._current_page_type in [256] + const.page_mix_types) or
(self._current_page_data_subheader_pointers is not None))
def _read_page_header(self):
bit_offset = self._page_bit_offset
tx = const.page_type_offset + bit_offset
self._current_page_type = self._read_int(tx, const.page_type_length)
tx = const.block_count_offset + bit_offset
self._current_page_block_count = self._read_int(
tx, const.block_count_length)
tx = const.subheader_count_offset + bit_offset
self._current_page_subheaders_count = (
self._read_int(tx, const.subheader_count_length))
def _process_page_metadata(self):
bit_offset = self._page_bit_offset
for i in range(self._current_page_subheaders_count):
pointer = self._process_subheader_pointers(
const.subheader_pointers_offset + bit_offset, i)
if pointer.length == 0:
continue
if pointer.compression == const.truncated_subheader_id:
continue
subheader_signature = self._read_subheader_signature(
pointer.offset)
subheader_index = (
self._get_subheader_index(subheader_signature,
pointer.compression, pointer.ptype))
self._process_subheader(subheader_index, pointer)
def _get_subheader_index(self, signature, compression, ptype):
index = const.subheader_signature_to_index.get(signature)
if index is None:
f1 = ((compression == const.compressed_subheader_id) or
(compression == 0))
f2 = (ptype == const.compressed_subheader_type)
if (self.compression != "") and f1 and f2:
index = const.index.dataSubheaderIndex
else:
self.close()
raise ValueError("Unknown subheader signature")
return index
def _process_subheader_pointers(self, offset, subheader_pointer_index):
subheader_pointer_length = self._subheader_pointer_length
total_offset = (offset +
subheader_pointer_length * subheader_pointer_index)
subheader_offset = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_length = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_compression = self._read_int(total_offset, 1)
total_offset += 1
subheader_type = self._read_int(total_offset, 1)
x = _subheader_pointer()
x.offset = subheader_offset
x.length = subheader_length
x.compression = subheader_compression
x.ptype = subheader_type
return x
def _read_subheader_signature(self, offset):
subheader_signature = self._read_bytes(offset, self._int_length)
return subheader_signature
def _process_subheader(self, subheader_index, pointer):
offset = pointer.offset
length = pointer.length
if subheader_index == const.index.rowSizeIndex:
processor = self._process_rowsize_subheader
elif subheader_index == const.index.columnSizeIndex:
processor = self._process_columnsize_subheader
elif subheader_index == const.index.columnTextIndex:
processor = self._process_columntext_subheader
elif subheader_index == const.index.columnNameIndex:
processor = self._process_columnname_subheader
elif subheader_index == const.index.columnAttributesIndex:
processor = self._process_columnattributes_subheader
elif subheader_index == const.index.formatAndLabelIndex:
processor = self._process_format_subheader
elif subheader_index == const.index.columnListIndex:
processor = self._process_columnlist_subheader
elif subheader_index == const.index.subheaderCountsIndex:
processor = self._process_subheader_counts
elif subheader_index == const.index.dataSubheaderIndex:
self._current_page_data_subheader_pointers.append(pointer)
return
else:
raise ValueError("unknown subheader index")
processor(offset, length)
def _process_rowsize_subheader(self, offset, length):
int_len = self._int_length
lcs_offset = offset
lcp_offset = offset
if self.U64:
lcs_offset += 682
lcp_offset += 706
else:
lcs_offset += 354
lcp_offset += 378
self.row_length = self._read_int(
offset + const.row_length_offset_multiplier * int_len, int_len)
self.row_count = self._read_int(
offset + const.row_count_offset_multiplier * int_len, int_len)
self.col_count_p1 = self._read_int(
offset + const.col_count_p1_multiplier * int_len, int_len)
self.col_count_p2 = self._read_int(
offset + const.col_count_p2_multiplier * int_len, int_len)
mx = const.row_count_on_mix_page_offset_multiplier * int_len
self._mix_page_row_count = self._read_int(offset + mx, int_len)
self._lcs = self._read_int(lcs_offset, 2)
self._lcp = self._read_int(lcp_offset, 2)
def _process_columnsize_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
self.column_count = self._read_int(offset, int_len)
if (self.col_count_p1 + self.col_count_p2 !=
self.column_count):
print("Warning: column count mismatch (%d + %d != %d)\n",
self.col_count_p1, self.col_count_p2, self.column_count)
# Unknown purpose
def _process_subheader_counts(self, offset, length):
pass
def _process_columntext_subheader(self, offset, length):
offset += self._int_length
text_block_size = self._read_int(offset, const.text_block_size_length)
buf = self._read_bytes(offset, text_block_size)
cname_raw = buf[0:text_block_size].rstrip(b"\x00 ")
cname = cname_raw
if self.convert_header_text:
cname = cname.decode(self.encoding or self.default_encoding)
self.column_names_strings.append(cname)
if len(self.column_names_strings) == 1:
compression_literal = ""
for cl in const.compression_literals:
if cl in cname_raw:
compression_literal = cl
self.compression = compression_literal
offset -= self._int_length
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
compression_literal = buf.rstrip(b"\x00")
if compression_literal == "":
self._lcs = 0
offset1 = offset + 32
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp]
elif compression_literal == const.rle_compression:
offset1 = offset + 40
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp]
elif self._lcs > 0:
self._lcp = 0
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcs)
self.creator_proc = buf[0:self._lcp]
if self.convert_header_text:
if hasattr(self, "creator_proc"):
self.creator_proc = self.creator_proc.decode(
self.encoding or self.default_encoding)
def _process_columnname_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
column_name_pointers_count = (length - 2 * int_len - 12) // 8
for i in range(column_name_pointers_count):
text_subheader = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_text_subheader_offset
col_name_offset = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_offset_offset
col_name_length = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_length_offset
idx = self._read_int(
text_subheader, const.column_name_text_subheader_length)
col_offset = self._read_int(
col_name_offset, const.column_name_offset_length)
col_len = self._read_int(
col_name_length, const.column_name_length_length)
name_str = self.column_names_strings[idx]
self.column_names.append(name_str[col_offset:col_offset + col_len])
def _process_columnattributes_subheader(self, offset, length):
int_len = self._int_length
column_attributes_vectors_count = (
length - 2 * int_len - 12) // (int_len + 8)
self.column_types = np.empty(
column_attributes_vectors_count, dtype=np.dtype('S1'))
self._column_data_lengths = np.empty(
column_attributes_vectors_count, dtype=np.int64)
self._column_data_offsets = np.empty(
column_attributes_vectors_count, dtype=np.int64)
for i in range(column_attributes_vectors_count):
col_data_offset = (offset + int_len +
const.column_data_offset_offset +
i * (int_len + 8))
col_data_len = (offset + 2 * int_len +
const.column_data_length_offset +
i * (int_len + 8))
col_types = (offset + 2 * int_len +
const.column_type_offset + i * (int_len + 8))
x = self._read_int(col_data_offset, int_len)
self._column_data_offsets[i] = x
x = self._read_int(col_data_len, const.column_data_length_length)
self._column_data_lengths[i] = x
x = self._read_int(col_types, const.column_type_length)
if x == 1:
self.column_types[i] = b'd'
else:
self.column_types[i] = b's'
def _process_columnlist_subheader(self, offset, length):
# unknown purpose
pass
def _process_format_subheader(self, offset, length):
int_len = self._int_length
text_subheader_format = (
offset +
const.column_format_text_subheader_index_offset +
3 * int_len)
col_format_offset = (offset +
const.column_format_offset_offset +
3 * int_len)
col_format_len = (offset +
const.column_format_length_offset +
3 * int_len)
text_subheader_label = (
offset +
const.column_label_text_subheader_index_offset +
3 * int_len)
col_label_offset = (offset +
const.column_label_offset_offset +
3 * int_len)
col_label_len = offset + const.column_label_length_offset + 3 * int_len
x = self._read_int(text_subheader_format,
const.column_format_text_subheader_index_length)
format_idx = min(x, len(self.column_names_strings) - 1)
format_start = self._read_int(
col_format_offset, const.column_format_offset_length)
format_len = self._read_int(
col_format_len, const.column_format_length_length)
label_idx = self._read_int(
text_subheader_label,
const.column_label_text_subheader_index_length)
label_idx = min(label_idx, len(self.column_names_strings) - 1)
label_start = self._read_int(
col_label_offset, const.column_label_offset_length)
label_len = self._read_int(col_label_len,
const.column_label_length_length)
label_names = self.column_names_strings[label_idx]
column_label = label_names[label_start: label_start + label_len]
format_names = self.column_names_strings[format_idx]
column_format = format_names[format_start: format_start + format_len]
current_column_number = len(self.columns)
col = _column()
col.col_id = current_column_number
col.name = self.column_names[current_column_number]
col.label = column_label
col.format = column_format
col.ctype = self.column_types[current_column_number]
col.length = self._column_data_lengths[current_column_number]
self.column_formats.append(column_format)
self.columns.append(col)
def read(self, nrows=None):
if (nrows is None) and (self.chunksize is not None):
nrows = self.chunksize
elif nrows is None:
nrows = self.row_count
if self._current_row_in_file_index >= self.row_count:
return None
m = self.row_count - self._current_row_in_file_index
if nrows > m:
nrows = m
nd = (self.column_types == b'd').sum()
ns = (self.column_types == b's').sum()
self._string_chunk = np.empty((ns, nrows), dtype=np.object)
self._byte_chunk = np.empty((nd, 8 * nrows), dtype=np.uint8)
self._current_row_in_chunk_index = 0
p = Parser(self)
p.read(nrows)
rslt = self._chunk_to_dataframe()
if self.index is not None:
rslt = rslt.set_index(self.index)
return rslt
def _read_next_page(self):
self._current_page_data_subheader_pointers = []
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
return True
elif len(self._cached_page) != self._page_length:
self.close()
msg = ("failed to read complete page from file "
"(read {:d} of {:d} bytes)")
raise ValueError(msg.format(len(self._cached_page),
self._page_length))
self._read_page_header()
if self._current_page_type == const.page_meta_type:
self._process_page_metadata()
pt = [const.page_meta_type, const.page_data_type]
pt += [const.page_mix_types]
if self._current_page_type not in pt:
return self._read_next_page()
return False
def _chunk_to_dataframe(self):
n = self._current_row_in_chunk_index
m = self._current_row_in_file_index
ix = range(m - n, m)
rslt = pd.DataFrame(index=ix)
js, jb = 0, 0
for j in range(self.column_count):
name = self.column_names[j]
if self.column_types[j] == b'd':
rslt[name] = self._byte_chunk[jb, :].view(
dtype=self.byte_order + 'd')
rslt[name] = np.asarray(rslt[name], dtype=np.float64)
if self.convert_dates and (self.column_formats[j] == "MMDDYY"):
epoch = pd.datetime(1960, 1, 1)
rslt[name] = epoch + pd.to_timedelta(rslt[name], unit='d')
jb += 1
elif self.column_types[j] == b's':
rslt[name] = self._string_chunk[js, :]
if self.convert_text and (self.encoding is not None):
rslt[name] = rslt[name].str.decode(
self.encoding or self.default_encoding)
if self.blank_missing:
ii = rslt[name].str.len() == 0
rslt.loc[ii, name] = np.nan
js += 1
else:
self.close()
raise ValueError("unknown column type %s" %
self.column_types[j])
return rslt
|
mit
|
Aasmi/scikit-learn
|
sklearn/neighbors/approximate.py
|
128
|
22351
|
"""Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
|
bsd-3-clause
|
MatthieuBizien/scikit-learn
|
examples/model_selection/plot_roc_crossval.py
|
12
|
3461
|
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.model_selection.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
colors = cycle(['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange'])
lw = 2
i = 0
for (train, test), color in zip(cv.split(X, y), colors):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=lw, color=color,
label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=lw, color='k',
label='Luck')
mean_tpr /= cv.get_n_splits(X, y)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color='g', linestyle='--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=lw)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
iamkingmaker/trading-with-python
|
lib/vixFutures.py
|
79
|
4157
|
# -*- coding: utf-8 -*-
"""
set of tools for working with VIX futures
@author: Jev Kuznetsov
Licence: GPL v2
"""
import datetime as dt
from pandas import *
import os
import urllib2
#from csvDatabase import HistDataCsv
m_codes = dict(zip(range(1,13),['F','G','H','J','K','M','N','Q','U','V','X','Z'])) #month codes of the futures
monthToCode = dict(zip(range(1,len(m_codes)+1),m_codes))
def getCboeData(year,month):
''' download data from cboe '''
fName = "CFE_{0}{1}_VX.csv".format(m_codes[month],str(year)[-2:])
urlStr = "http://cfe.cboe.com/Publish/ScheduledTask/MktData/datahouse/{0}".format(fName)
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
# first column is date, second is future , skip these
header = lines[0].strip().split(',')[2:]
dates = []
data = [[] for i in range(len(header))]
for line in lines[1:]:
fields = line.strip().split(',')
dates.append(datetime.strptime( fields[0],'%m/%d/%Y'))
for i,field in enumerate(fields[2:]):
data[i].append(float(field))
data = dict(zip(header,data))
df = DataFrame(data=data, index=Index(dates))
return df
class Future(object):
''' vix future class '''
def __init__(self,year,month):
self.year = year
self.month = month
self.expiration = self._calculateExpirationDate()
self.cboeData = None # daily cboe data
self.intradayDb = None # intraday database (csv)
def _calculateExpirationDate(self):
''' calculate expiration date of the future, (not 100% reliable) '''
t = dt.date(self.year,self.month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_new = t_new-datetools.relativedelta(days=30)
return t_new
def getCboeData(self, dataDir=None, forceUpdate=False):
''' download interday CBOE data
specify dataDir to save data to csv.
data will not be downloaded if csv file is already present.
This can be overridden with setting forceUpdate to True
'''
if dataDir is not None:
fileFound = os.path.exists(self._csvFilename(dataDir))
if forceUpdate or not fileFound:
self.cboeData = getCboeData(self.year, self.month)
self.to_csv(dataDir)
else:
self.cboeData = DataFrame.from_csv(self._csvFilename(dataDir))
else:
self.cboeData = getCboeData(self.year, self.month)
return self.cboeData
def updateIntradayDb(self,dbDir):
#self.intradayDb =
pass
def to_csv(self,dataDir):
''' save to csv in given dir. Filename is automatically generated '''
self.cboeData.to_csv(self._csvFilename(dataDir))
@property
def dates(self):
''' trading days derived from cboe data '''
if self.cboeData is not None:
dates = [d.date() for d in self.cboeData.index]
else:
dates = None
return dates
def _csvFilename(self,dataDir):
fName = "VIX_future_%i_%i.csv" % (self.year, self.month)
return os.path.join(dataDir,fName)
def __repr__(self):
s = 'Vix future [%i-%i (%s)] exp: %s\n' % (self.year, self.month,monthToCode[self.month], self.expiration.strftime("%B, %d %Y (%A)"))
s+= 'Cboe data: %i days'% len(self.cboeData) if self.cboeData is not None else 'No data downloaded yet'
return s
if __name__ == '__main__':
print 'testing vix futures'
year = 2012
month = 12
f = Future(year,month)
f.getCboeData()
print f
|
bsd-3-clause
|
BoltzmannBrain/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py
|
70
|
10245
|
"""
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
|
agpl-3.0
|
yaukwankiu/armor
|
geometry/regrid_backup.py
|
1
|
4680
|
#regrid.py
# to redraw the grids, to interpolate, etc
"""
USE:
from armor import pattern
from armor.geometry import regrid
reload(pattern); reload(regrid)
a = pattern.a ; c = pattern.c ; a.load(); c.load(); e = regrid.regrid(a,c)
e.show()
"""
""" input: DBZ object, new_horizontal_dimension, new_vertical_dimension,
, new coords for the lowerleft corner
output: new DBZ object"""
import numpy as np
from armor import defaultParameters
from armor.defaultParameters import *
from armor import pattern
DBZ = pattern.DBZ
def interpolate(arr_old, arr_new, I_old, J_old):
"""
input: array, i, j
output: value
(int(x),
int(y)+1)
+ + (int(x)+1, int(y)+1)
(x,y)
+ + (int(x)+1, int(y))
(int(x),
int(y))
be careful - floor(x)=ceil(x)=x for integer x,
so we really want floor(x) and floor(x)+1
"""
I = I_old.copy()
J = J_old.copy()
arr_new2 = arr_new * 0
arr_new2 += (-999)
height_new, width_new = arr_new.shape
height_old, width_old = arr_old.shape
# set all out-of-bounds to (0,0) for convenience
I = (I>=0) * (I<height_old-1) * I #e.g. i>=0 and i<=4 for i=[0,1,2,3,4], width=5
J = (J>=0) * (J<width_old -1) * J
# the loopings are necessary since we don't know beforehand where the (i_old, j_old)
# would land
for i in range(height_new):
for j in range(width_new):
i0 = int(I[i,j])
j0 = int(J[i,j])
i1 = i0 + 1
j1 = j0 + 1
i_frac = i % 1
j_frac = j % 1
f00 = arr_old[i0,j0]
f01 = arr_old[i0,j1]
f10 = arr_old[i1,j0]
f11 = arr_old[i1,j1]
arr_new2[i, j] = (1-i_frac)*(1-j_frac) * f00 + \
(1-i_frac)*( j_frac) * f01 + \
( i_frac)*(1-j_frac) * f00 + \
( i_frac)*( j_frac) * f00
return arr_new2
def regrid(a, b):
"""
a is the object to be resized
b provides the relevant shape information for the process
"""
gridSizeOld = a.matrix.shape
gridSizeNew = b.matrix.shape
height, width = gridSizeNew
X, Y = np.meshgrid(range(width), range(height))
J, I = X, Y
# I, J = I_new, J_new
a_new = DBZ(name=a.name+"rescaled to "+str(gridSizeNew),
matrix = np.zeros(gridSizeNew),
lowerLeftCornerLatitudeLongitude=b.lowerLeftCornerLatitudeLongitude,
)
latOld, longOld = a.lowerLeftCornerLatitudeLongitude
latNew, longNew = b.lowerLeftCornerLatitudeLongitude
latDegreePerGridOld = 1.*(a.upperRightCornerLatitudeLongitude[0]-latOld)/gridSizeOld[0]
longDegreePerGridOld= 1.*(a.upperRightCornerLatitudeLongitude[1]-longOld)/gridSizeOld[1]
latDegreePerGridNew = 1.*(b.upperRightCornerLatitudeLongitude[0]-latOld)/gridSizeNew[0]
longDegreePerGridNew= 1.*(b.upperRightCornerLatitudeLongitude[0]-longOld)/gridSizeNew[1]
#I_old = (1.* I/gridSizeNew[0]+latNew -latOld) * gridSizeOld[0] # this is wrong
#J_old = (1.* J/gridSizeNew[0]+latNew -latOld) * gridSizeOld[0] # we should convert
# with the degree per grid
# as the replacement below
I_old = (1.* I*latDegreePerGridNew +latNew -latOld) / latDegreePerGridOld
J_old = (1.* J*longDegreePerGridNew +longNew -longOld) /longDegreePerGridOld
# debug
print I, J
print I_old, J_old, I_old.shape
print "latDegreePerGridOld , longDegreePerGridOld", latDegreePerGridOld , longDegreePerGridOld
print "latDegreePerGridNew , longDegreePerGridNew", latDegreePerGridNew , longDegreePerGridNew
print "gridSizeOld", gridSizeOld
print "gridSizeNew", gridSizeNew
print "I_old[0,0], J_old[0,0]", I_old[0,0], J_old[0,0]
testmat = np.zeros((1000,1000))
for ii in range(I_old.shape[0]):
for jj in range(I_old.shape[1]):
testmat[I_old[ii,jj]*(I_old[ii,jj]>0), J_old[ii,jj]*(J_old[ii,jj]>0)] = 1
from matplotlib import pyplot as plt
plt.imshow(testmat)
plt.show()
# end debug
arr_old = a.matrix
arr_new = np.zeros((height, width))
a_new.matrix = interpolate(arr_old, arr_new, I_old, J_old)
return a_new
########################
# samples
a = pattern.a
c = pattern.c
|
cc0-1.0
|
JaviMerino/trappy
|
trappy/plot_utils.py
|
2
|
9332
|
# Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Small functions to help with plots"""
# pylint disable=star-args
from matplotlib import pyplot as plt
import os
import re
from trappy.wa import SysfsExtractor
GOLDEN_RATIO = 1.618034
def normalize_title(title, opt_title):
"""Return a string with that contains the title and opt_title if it's
not the empty string
See test_normalize_title() for usage
"""
if opt_title is not "":
title = opt_title + " - " + title
return title
def set_lim(lim, get_lim_f, set_lim_f):
"""Set x or y limitis of the plot
lim can be a tuple containing the limits or the string "default"
or "range". "default" does nothing and uses matplotlib default.
"range" extends the current margin by 10%. This is useful since
the default xlim and ylim of the plots sometimes make it harder to
see data that is just in the margin.
"""
if lim == "default":
return
if lim == "range":
cur_lim = get_lim_f()
lim = (cur_lim[0] - 0.1 * (cur_lim[1] - cur_lim[0]),
cur_lim[1] + 0.1 * (cur_lim[1] - cur_lim[0]))
set_lim_f(lim[0], lim[1])
def set_xlim(ax, xlim):
"""Set the xlim of the plot
See set_lim() for the details
"""
set_lim(xlim, ax.get_xlim, ax.set_xlim)
def set_ylim(ax, ylim):
"""Set the ylim of the plot
See set_lim() for the details
"""
set_lim(ylim, ax.get_ylim, ax.set_ylim)
def pre_plot_setup(width=None, height=None, ncols=1, nrows=1):
"""initialize a figure
width and height are the height and width of each row of plots.
For 1x1 plots, that's the height and width of the plot. This
function should be called before any calls to plot()
"""
if height is None:
if width is None:
height = 6
width = 10
else:
height = width / GOLDEN_RATIO
else:
if width is None:
width = height * GOLDEN_RATIO
height *= nrows
_, axis = plt.subplots(ncols=ncols, nrows=nrows, figsize=(width, height))
# Needed for multirow blots to not overlap with each other
plt.tight_layout(h_pad=3.5)
return axis
def post_plot_setup(ax, title="", xlabel=None, ylabel=None, xlim="default",
ylim="range"):
"""Set xlabel, ylabel title, xlim and ylim of the plot
This has to be called after calls to .plot(). The default ylim is
to extend it by 10% because matplotlib default makes it hard
values that are close to the margins
"""
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if title:
ax.set_title(title)
set_ylim(ax, ylim)
set_xlim(ax, xlim)
def number_freq_plots(runs, map_label):
"""Calculate the number of plots needed for allfreq plots and frequency
histogram plots
"""
num_cpu_plots = len(map_label)
has_devfreq_data = False
for run in runs:
if len(run.devfreq_in_power.data_frame) > 0:
has_devfreq_data = True
break
num_freq_plots = num_cpu_plots
if has_devfreq_data:
num_freq_plots += 1
return num_freq_plots
def plot_temperature(runs, width=None, height=None, ylim="range"):
"""Plot temperatures
runs is an array of FTrace() instances. Extract the control_temp
from the governor data and plot the temperatures reported by the
thermal framework. The governor doesn't track temperature when
it's off, so the thermal framework trace is more reliable.
"""
ax = pre_plot_setup(width, height)
for run in runs:
current_temp = run.thermal_governor.data_frame["current_temperature"]
delta_temp = run.thermal_governor.data_frame["delta_temperature"]
control_series = (current_temp + delta_temp) / 1000
try:
run.thermal.plot_temperature(control_temperature=control_series,
ax=ax, legend_label=run.name)
except ValueError:
run.thermal_governor.plot_temperature(ax=ax, legend_label=run.name)
post_plot_setup(ax, title="Temperature", ylim=ylim)
plt.legend(loc="best")
def plot_hist(data, ax, title, unit, bins, xlabel, xlim, ylim):
"""Plot a histogram"""
mean = data.mean()
std = data.std()
title += " (mean = {:.2f}{}, std = {:.2f})".format(mean, unit, std)
xlabel += " ({})".format(unit)
data.hist(ax=ax, bins=bins)
post_plot_setup(ax, title=title, xlabel=xlabel, ylabel="count", xlim=xlim,
ylim=ylim)
def plot_load(runs, map_label, width=None, height=None):
"""Make a multiplot of all the loads"""
num_runs = len(runs)
axis = pre_plot_setup(width=width, height=height, ncols=num_runs, nrows=2)
if num_runs == 1:
axis = [axis]
else:
axis = zip(*axis)
for ax, run in zip(axis, runs):
run.plot_load(map_label, title=run.name, ax=ax[0])
run.plot_normalized_load(map_label, title=run.name, ax=ax[1])
def plot_allfreqs(runs, map_label, width=None, height=None):
"""Make a multicolumn plots of the allfreqs plots of each run"""
num_runs = len(runs)
nrows = number_freq_plots(runs, map_label)
axis = pre_plot_setup(width=width, height=height, nrows=nrows,
ncols=num_runs)
if num_runs == 1:
if nrows == 1:
axis = [[axis]]
else:
axis = [axis]
elif nrows == 1:
axis = [[ax] for ax in axis]
else:
axis = zip(*axis)
for ax, run in zip(axis, runs):
run.plot_allfreqs(map_label, ax=ax)
def plot_controller(runs, width=None, height=None):
"""Make a multicolumn plot of the pid controller of each run"""
num_runs = len(runs)
axis = pre_plot_setup(width=width, height=height, ncols=num_runs)
if num_runs == 1:
axis = [axis]
for ax, run in zip(axis, runs):
run.pid_controller.plot_controller(title=run.name, ax=ax)
def plot_weighted_input_power(runs, actor_order, width=None, height=None):
"""Make a multicolumn plot of the weighted input power of each run"""
actor_weights = []
for run in runs:
run_path = os.path.dirname(run.trace_path)
sysfs = SysfsExtractor(run_path)
thermal_params = sysfs.get_parameters()
sorted_weights = []
for param in sorted(thermal_params):
if re.match(r"cdev\d+_weight", param):
sorted_weights.append(thermal_params[param])
actor_weights.append(zip(actor_order, sorted_weights))
# Do nothing if we don't have actor weights for any run
if not any(actor_weights):
return
num_runs = len(runs)
axis = pre_plot_setup(width=width, height=height, ncols=num_runs)
if num_runs == 1:
axis = [axis]
for ax, run, weights in zip(axis, runs, actor_weights):
run.thermal_governor.plot_weighted_input_power(weights, title=run.name,
ax=ax)
def plot_input_power(runs, actor_order, width=None, height=None):
"""Make a multicolumn plot of the input power of each run"""
num_runs = len(runs)
axis = pre_plot_setup(width=width, height=height, ncols=num_runs)
if num_runs == 1:
axis = [axis]
for ax, run in zip(axis, runs):
run.thermal_governor.plot_input_power(actor_order, title=run.name,
ax=ax)
plot_weighted_input_power(runs, actor_order, width, height)
def plot_output_power(runs, actor_order, width=None, height=None):
"""Make a multicolumn plot of the output power of each run"""
num_runs = len(runs)
axis = pre_plot_setup(width=width, height=height, ncols=num_runs)
if num_runs == 1:
axis = [axis]
for ax, run in zip(axis, runs):
run.thermal_governor.plot_output_power(actor_order, title=run.name,
ax=ax)
def plot_freq_hists(runs, map_label):
"""Plot frequency histograms of multiple runs"""
num_runs = len(runs)
nrows = 2 * number_freq_plots(runs, map_label)
axis = pre_plot_setup(ncols=num_runs, nrows=nrows)
if num_runs == 1:
axis = [axis]
else:
axis = zip(*axis)
for ax, run in zip(axis, runs):
run.plot_freq_hists(map_label, ax=ax)
def plot_temperature_hist(runs):
"""Plot temperature histograms for all the runs"""
num_runs = 0
for run in runs:
if len(run.thermal.data_frame):
num_runs += 1
if num_runs == 0:
return
axis = pre_plot_setup(ncols=num_runs)
if num_runs == 1:
axis = [axis]
for ax, run in zip(axis, runs):
run.thermal.plot_temperature_hist(ax, run.name)
|
apache-2.0
|
mkuron/espresso
|
doc/tutorials/04-lattice_boltzmann/scripts/part4_plot.py
|
1
|
1625
|
#
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import matplotlib.pyplot as plt
plt.ion()
# get the simulated data points
sim = np.array(node_v_list)
# x-axis: in ESPResSo, LB nodes are shifted by 0.5 agrid
pos = [(i + 0.5) * agrid for i in range(len(sim))]
# analytical curve: the box width is not box_l, but box_l - 2 * wall_offset
# also, the velocity is zero beyond the walls
ana = np.array([max(0, force_density / 2. / visc
* ((box_l - 2 * wall_offset)**2 / 4. - (x - box_l / 2.)**2)) for x in pos])
# fit the simulated curve to the analytical curve with a least squares fit
nonzero = np.nonzero(sim)
fit = ana[nonzero] / sim[nonzero]
scaling = np.mean(fit)
# plot
plt.figure(figsize=(10, 6), dpi=80)
plt.plot(pos, ana, label='analytical')
plt.plot(pos, scaling * sim, '+', label='simulated')
plt.xlabel('$x$-axis ($\AA$)', fontsize=20)
plt.ylabel('LB fluid velocity $u_y(x)$ ($\AA/s$)', fontsize=20)
plt.legend(fontsize=20)
plt.show()
|
gpl-3.0
|
nbarba/py3DRec
|
uncalibrated_rec.py
|
1
|
21294
|
import numpy as np
from numpy.linalg import inv
from numpy.linalg import svd
from numpy.linalg import eig
from numpy.linalg import det
from scipy.optimize import leastsq, least_squares, fmin
import pandas as pd
import numpy as np
import time
import random
import os.path
import argparse
from epipolar_geometry import EpipolarGeometry
from model import RecModel
from image_sequence import ImageSequence
class UncalibratedReconstruction:
'''
Class that contains high level methods to perform 3D reconstruction from a sequence of uncalibrated images.
'''
def __init__(self, sequence_length, width, height, triang_method=0, opt_triang=0, opt_f=1, self_foc=0):
'''
Constructor
Args:
sequence_length: number of images (views)
width: width of the images
height: height of the images
triang_method: triangulation method (0: standard, 1: polynomial)
opt_triang: optimize initial 3D point estimate
opt_f: optimize fundamental matrix estimation
self_foc: for self-calibration, type of focal length expected across views (0: fixed, 1: varying )
'''
self._eg_utils = EpipolarGeometry()
# things that are needed throught the class
self._sequence_length = sequence_length
self._width = width
self._height = height
self._mm = (width + height) / 2
self._triangulation_method = triang_method
self._optimize_triangulation = opt_triang
self._optimize_f = opt_f
self._self_foc = self_foc
def two_view_geometry_computation(self, view1_feat2D, view2_feat2D):
'''
Method to compute the fundamental matrix and epipoles for two views
Args:
view1_feat2D: 2D feature coordinates in view 1
view2_feat2D: 2D feature coordinates in view 2
Returns:
F: the fundamental matrix
epipole_1: view1 epipole
epipole_2: view2 epipoles
'''
F = self._eg_utils.fundamental_matrix(view1_feat2D, view2_feat2D, self._optimize_f)
epipole_1 = self._eg_utils.get_epipole(F)
epipole_2 = self._eg_utils.get_epipole(np.transpose(F))
return F, epipole_1, epipole_2
def compute_reference_frame(self, epipole, F):
'''
Method to compute the reference frame of the reconstruction (i.e. plane at infinity in an affine or metric space).
Args:
epipole: the epipole
F: the fundamental matrix
Returns:
p: the reference plane
h: the homography [e]xF
'''
H = self._eg_utils.compute_homography(epipole, F) # compute the homography [e]xF
# get the reference plane
p = np.sum(np.divide(np.eye(3) - H, np.transpose(np.asarray([epipole, epipole, epipole]))), axis=0) / 3
# adjust reference plane to make the first two projection matrices as equal as possible
p = fmin(self.init_plane, np.append(p, 1), xtol=1e-25, ftol=1e-25, args=(H.real, epipole.real))
p = p[0:3]
return p, H
def init_plane(self, p, H, epi):
'''
Error function to make the difference between the first two projection matrices as small as possible
Note: assuming that the two views used for the initial reconstruction are not too far apart (this their projection matrices are almost equal), has proven to give good results
Args:
p: the reference plane (i.e. plane at infinity)
H: homography [e]x[F]
epi: epipola
Returns:
error: difference between two projection matrices
'''
epi = np.reshape(epi, (3, 1))
p = np.reshape(p, (1, 4))
t = p[0, 0:3]
t = np.reshape(t, (1, 3))
error = sum(sum(abs(H + epi.dot(t) - p[0, 3] * np.eye(3))))
return error
def estimate_initial_projection_matrices(self, H, epipole_2, p):
'''
Method to estimate the projection matrices for the two views (i.e. P1=[I | 0], P2=[H+epi1|e])
Args:
H: homography [e]x[F]
epipole_2: epipole in the 2nd view
p: the reference plane of the reconstruction (i.e. plane at infinity)
Returns:
P: projection matrices for these two views
'''
P = np.zeros((3, 4, self._sequence_length))
P[:, :, 0] = [[1, 0, 0, 0], [0, 1, 0, 0],
[0, 0, 1, 0]] # P1=[I | 0], i.e. frist frame aligned with world frame
epi_tmp = np.reshape(epipole_2, (3, 1)) # P2=[H+epi1|e]
P[:, :3, 1] = H + epi_tmp.dot(np.reshape(p, (1, 3)))
P[:, 3, 1] = epipole_2
P[:, :, 1] = P[:, :, 1] / P[2, 2, 1]
return P
def get_initial_structure(self, feat_2D, P, epipole_1, epipole_2, F):
'''
Method to get an initial 3D structure (i.e. 3D point cloud), from the first two projection matrices through triangulation.
Args:
feat_2D: 2D feature coordinates for all images
P: projection matrices for all views (only the first two views are used)
epipole_1: view 1 epipole
epipole_2: view 2 epipole
F: fundamental matrix
Returns:
points3D: 3D point cloud
'''
number_of_features = feat_2D.shape[2]
points3D = np.zeros(shape=[number_of_features, 4])
for i in range(0, number_of_features):
if (self._triangulation_method == 0):
x = self._eg_utils.triangulate_points(feat_2D[0, :, i], feat_2D[1, :, i], P[:, :, 0], P[:, :, 1])
x = x[0:3]
elif (self._triangulation_method == 1):
x = self._eg_utils.polynomial_triangulation(feat_2D[1, :, i], feat_2D[1, :, i], epipole_1, epipole_2, F,
P[:, :, 0], P[:, :, 1])
x = x[0:3] / x[3] # normalize
if (self._optimize_triangulation == 1):
# refine 3D point estimation (due to noise, lines of sight may not intersect perfectly). Minimizations should be carried out in the images
# and not in the projective 3D space, thus the reprojection error is used.
x = fmin(self.refine_3d_point, x, xtol=1e-25, ftol=1e-25, full_output=0,
args=(P[:, :, 0], P[:, :, 1], feat_2D[0, :, i], feat_2D[1, :, i]))
points3D[i, :] = np.append(x, 1)
return points3D
def refine_3d_point(self, point3D, P1, P2, view1_feat2D, view2_feat2D):
'''
Method to compute the reprojection error of a 3D point in two views
Args:
point3D: 3D point cloud
P1: projection matrix of view 1
P2: projection matrix of view 2
view1_feat2D: 2D feature coordinates in view 1
view2_feat2D: 2D feature coordinates in view 1
Returns:
error: the reprojection error
'''
point3D = np.append(point3D, 1)
error = sum(
self.compute_reprojection_error_point(P1, point3D, view1_feat2D) + self.compute_reprojection_error_point(P2,
point3D,
view2_feat2D))
# sdfds
return error
def projective_pose_estimation(self, feat_2D, P, points3D):
'''
Method to add views using an initial 3D structure, i.e. compute the projection matrices for all the additional views (the first two are already
estimated in previous steps)
Args:
feat_2D: 2D feature coordinates for all images
P: projection matrices
points3d: 3D point cloud
Returns:
P: projection matrices for all views
'''
number_of_features = feat_2D.shape[2]
AA = np.zeros(shape=[2 * number_of_features, 12])
for i in range(2, self._sequence_length):
for j in range(0, number_of_features):
AA[2 * j, 0:4] = points3D[j]
AA[2 * j, 8:12] = -feat_2D[i, 0, j] * points3D[j]
AA[2 * j + 1, 4:8] = points3D[j]
AA[2 * j + 1, 8:12] = -feat_2D[i, 1, j] * points3D[j]
U, s, Vh = svd(AA)
V = np.transpose(Vh)
VV = V[0:12, 11]
VV = VV / VV[10]
VV = np.delete(VV, 10)
# refine the estimate for the i-th projection matrix
result = least_squares(self._eg_utils.refine_projection_matrix, VV, args=(points3D, feat_2D[i, :, :]))
VV = result.x
Pr = np.zeros(shape=[3, 4])
Pr[0, :] = VV[0:4]
Pr[1, :] = VV[4:8]
Pr[2, :] = np.append(np.append(VV[8:10], 1), VV[10])
P[:, :, i] = Pr
return P
def bundle_adjustment(self, feat_2D, P, feat3D):
'''
Method to refine structure and motion, i.e. refine the projection matrices and 3D points using the reprojection error
Args:
feat_2D: 2D feature coordinates for all images
P: projection matrices
points3d: 3D point cloud
Returns:
P: the refined projection matrices
feat3D: the refined 3D point cloud
error: the reprojection error
'''
number_of_features = feat_2D.shape[2]
# The vector to be optimized
X = np.reshape(P[:, :, 0], (1, 12))
# Append the projection matrices...
for i in range(1, self._sequence_length):
X = np.append(X, np.reshape(P[:, :, i], (1, 12)))
X = np.delete(X, [10, 22, (self._sequence_length - 1) * 12 + 10])
# ...and then append the 3D points
X = np.append(X, np.reshape(feat3D[:, 0:3], number_of_features * self._sequence_length))
# Optimize using Levenberg-Marquardt
result = least_squares(self._eg_utils.overall_reprojection_error, X, max_nfev=1000, method='lm',
args=([feat_2D]))
X = result.x
error = np.power(sum(self._eg_utils.overall_reprojection_error(X, feat_2D)), 2)
# get the refined projection matrices from the optimal vector
for i in range(0, self._sequence_length):
P[:, :, i] = np.reshape(X[0 + i * 11:12 + i * 11], (3, 4))
P[2, 3, i] = P[2, 2, i]
P[2, 2, i] = 1
# get the refined 3D coordinates from the optimal vector
feat3D[:, 0:3] = np.reshape(
X[self._sequence_length * 11:self._sequence_length * 11 + self._sequence_length * number_of_features * 3],
(number_of_features, 3))
Tp1 = np.vstack([P[:, :, 0], [0, 0, 0, 1]])
for i in range(0, self._sequence_length):
P[:, :, i] = P[:, :, i].dot(inv(Tp1))
feat3D = Tp1.dot(np.transpose(feat3D))
feat3D = np.transpose(feat3D / feat3D[3, :])
return P, feat3D, error
def self_calibration(self, P):
'''
Self calibration using the procedure described in
M. Pollefeys, R. Koch and L. Van Gool, "Self-Calibration and Metric Reconstruction in spite of Varying and Unknown Internal Camera Parameters", Proc. International Conference on Computer Vision, Narosa Publishing House, pp.90-95, 1998.
Args:
P: projection matrices
Returns:
Tm: transformation matrix that will transform from the projective space to metric space
K: camera intrisic parameters for each view
error: the reprojection error
'''
# setup the system of equations
AAA = np.zeros(shape=[4 * self._sequence_length - 4, 6])
for i in range(0, self._sequence_length - 1):
P_tmp = P[:, :, i + 1]
AAA[0 + 4 * i, :] = [(-np.power(P_tmp[1, 1], 2) + np.power(P_tmp[0, 1], 2) - np.power(P_tmp[1, 0],
2) + np.power(
P_tmp[0, 0], 2)), (-2 * P_tmp[1, 0] * P_tmp[1, 3] + 2 * P_tmp[0, 0] * P_tmp[0, 3]),
(-2 * P_tmp[1, 1] * P_tmp[1, 3] + 2 * P_tmp[0, 1] * P_tmp[0, 3]),
(2 * P_tmp[0, 2] * P_tmp[0, 3] - 2 * P_tmp[1, 2] * P_tmp[1, 3]),
(-np.power(P_tmp[1, 3], 2) + np.power(P_tmp[0, 3], 2)),
(-np.power(P_tmp[1, 2], 2) + np.power(P_tmp[0, 2], 2))]
AAA[1 + 4 * i, :] = [(P_tmp[1, 0] * P_tmp[0, 0] + P_tmp[1, 1] * P_tmp[0, 1]),
(P_tmp[1, 0] * P_tmp[0, 3] + P_tmp[1, 3] * P_tmp[0, 0]),
(P_tmp[1, 1] * P_tmp[0, 3] + P_tmp[1, 3] * P_tmp[0, 1]),
(P_tmp[1, 2] * P_tmp[0, 3] + P_tmp[1, 3] * P_tmp[0, 2]), P_tmp[1, 3] * P_tmp[0, 3],
P_tmp[1, 2] * P_tmp[0, 2]]
AAA[2 + 4 * i, :] = [(P_tmp[2, 0] * P_tmp[0, 0] + P_tmp[2, 1] * P_tmp[0, 1]),
(P_tmp[2, 0] * P_tmp[0, 3] + P_tmp[2, 3] * P_tmp[0, 0]),
(P_tmp[2, 1] * P_tmp[0, 3] + P_tmp[2, 3] * P_tmp[0, 1]),
(P_tmp[2, 2] * P_tmp[0, 3] + P_tmp[2, 3] * P_tmp[0, 2]), P_tmp[2, 3] * P_tmp[0, 3],
P_tmp[2, 2] * P_tmp[0, 2]]
AAA[3 + 4 * i, :] = [(P_tmp[2, 0] * P_tmp[1, 0] + P_tmp[2, 1] * P_tmp[1, 1]),
(P_tmp[2, 0] * P_tmp[1, 3] + P_tmp[2, 3] * P_tmp[1, 0]),
(P_tmp[2, 1] * P_tmp[1, 3] + P_tmp[2, 3] * P_tmp[1, 1]),
(P_tmp[2, 2] * P_tmp[1, 3] + P_tmp[2, 3] * P_tmp[1, 2]), P_tmp[2, 3] * P_tmp[1, 3],
P_tmp[2, 2] * P_tmp[1, 2]]
U, s, Vh = svd(AAA)
V = np.transpose(Vh)
x = V[0:5, 5] / V[5, 5]
jj = np.sign(x[0])
b = x * np.sign(x[0])
# initial estimate of the absolute conic
W = np.asarray([[b[0], 0, 0, b[1]], [0, b[0], 0, b[2]], [0, 0, 1, b[3]], [b[1], b[2], b[3], b[4]]])
# initial estimate of the focal lengths
y = np.ones(shape=[self._sequence_length, 1])
for i in range(0, self._sequence_length):
y[i] = np.sqrt(np.abs(P[0, :, i].dot(W).dot(np.transpose(P[0, :, i]))) / (
P[2, :, i].dot(W).dot(np.transpose(P[2, :, i]))))
if (self._self_foc == 0):
# optimize for fixed focal lengths
pp2 = np.asarray([-b[1] / b[0], -b[2] / b[0], -b[3]])
x = np.hstack((sum(y) / y.shape[0], pp2))
x = fmin(self.fixed_f_error, x, args=(P, self._sequence_length))
error = self.fixed_f_error(x, P, self._sequence_length)
# fill out the camera instrisic parameters.
K = np.zeros((3, 3, self._sequence_length))
for i in range(0, self._sequence_length):
K[:, :, i] = np.eye(3)
K[0, 0, i] = x[0] * self._mm
K[1, 1, i] = x[0] * self._mm
K[0, 2, i] = self._width
K[1, 2, i] = self._height
inf_plane = x[1:4]
# construct the transformation matrix that will take us from the projective space to to metric
a = inv([[x[0], 0, 0], [0, x[0], 0], [0, 0, 1]])
a = np.asarray(a) * jj
tmp = np.asarray([0, 0, 0])
tmp = np.reshape(tmp, (3, 1))
Tm = np.append(a, tmp, 1)
Tm = np.vstack((Tm, np.append(inf_plane, 1)))
else:
# optimize for varying focal lenghts (to do)
print("not yet supported")
return Tm, K, error
def fixed_f_error(self, x, P, n):
'''
Error function for the self-calibration error when the focal lengths are fixed (i.e. we
assume the same focal lengths accross the image sequence)
'''
K1 = np.eye(3)
K1[0, 0] = x[0]
K1[1, 1] = x[0]
pp = np.asarray(x[1:4])
pp = np.reshape(pp, (3, 1))
W = np.asarray(np.append(K1.dot(np.transpose(K1)), -K1.dot(np.transpose(K1)).dot(pp), 1))
tmp = np.append((np.transpose(-pp).dot(K1).dot(np.transpose(K1))),
np.transpose(pp).dot(K1).dot(np.transpose(K1)).dot(pp))
W = np.vstack((W, tmp))
a = K1.dot(np.transpose(K1))
b = a / np.sqrt(np.trace(np.transpose(a).dot(a)))
error = 0
for i in range(0, n - 1):
P_tmp = P[:, :, i + 1]
c = P_tmp.dot(W).dot(np.transpose(P_tmp))
d = c / np.sqrt(np.trace(np.transpose(c) * c))
error = error + np.trace(np.transpose((b - d)).dot(b - d))
return error
def convert_to_metric_space(self, Tm, feat3D, P, K):
'''
Transform the 3D points and projective matrices to the metric space
Args:
Tm: transformation matrix for transformic from projective to metric space
feat3D: 3D point cloud
P: projection matrices (for all views)
K: camera intrisic parameters (for all views)
Returns:
feat3D: 3D point cloud in metric space
P: projectio matrices in metric space
'''
# transform the projective 3d coordinates to metric
InvT = Tm.dot(np.eye(4))
a = inv(InvT)
InvT = InvT * a[3, 3]
feat3D = InvT.dot(np.transpose(feat3D))
feat3D = np.transpose(feat3D / feat3D[3, :])
# Rescale the projection matrices to width,height (no -1, 1)
tmp = np.eye(3)
tmp[0, 0] = self._mm
tmp[1, 1] = self._mm
tmp[0, 2] = self._width
tmp[1, 2] = self._height
for i in range(0, self._sequence_length):
P[:, :, i] = P[:, :, i].dot(inv(InvT))
P[:, :, i] = tmp.dot(P[:, :, i])
a = det(inv(K[:, :, i]).dot(P[0:3, 0:3, i]))
P[:, :, i] = P[:, :, i] * np.sign(a) / np.power(abs(a), 0.333)
return feat3D, P
def main(input_file, show):
print("--------------------------------")
print(" Uncalibrated 3D Reconstruction ")
print("")
print("--------------------------------")
sequence = ImageSequence(input_file)
start_time = time.time()
rec_engine = UncalibratedReconstruction(sequence.length, sequence.width, sequence.height)
# normalize coordinates
norm_feat_2d = sequence.get_normalized_coordinates()
print("> Estimating fundamental matrix....")
F, epipole_1, epipole_2 = rec_engine.two_view_geometry_computation(norm_feat_2d[0], norm_feat_2d[1])
print("> Computing reference plane....")
# Step 2: compute the reconstruction reference plane using the epipole in the second image
p, H = rec_engine.compute_reference_frame(epipole_2, F)
print("> Estimating projection matrices for first two views....")
# Step 3: Estimate projection matrices for the first two views
P = rec_engine.estimate_initial_projection_matrices(H, epipole_2, p)
print("> 3D point estimate triangulation....")
# Step 4: triangulate points to get an initial estimate of the 3D point cloud
feat3D = rec_engine.get_initial_structure(norm_feat_2d, P, epipole_1, epipole_2, F)
print("> Estimating projection matrices for additional views....")
# Step 5: Use the 3D point estimates to estimate the projection matrices of the remaining views
P = rec_engine.projective_pose_estimation(norm_feat_2d, P, feat3D)
print("> Bundle Adjustment....")
# Step 5: Optimize 3D points and projection matrices using the reprojection error
P, feat3D, error = rec_engine.bundle_adjustment(norm_feat_2d, P, feat3D)
print(" - Bundle adjustment error: ", error)
print("> Self-calibration")
# Step 6: Self-calibration
Tm, K, error = rec_engine.self_calibration(P)
print(" - Self-calibration error: ", error)
print(" - Tranformation Matrix (Projective -> Metric): ")
print("> Converting to metric space")
metric_feat3D, metric_P = rec_engine.convert_to_metric_space(Tm, feat3D, P, K)
print("> Saving model...")
recModel = RecModel()
recModel.P = P
recModel.points3D = metric_feat3D
recModel.Tm = Tm
np.savetxt('rec_model_cloud.txt', metric_feat3D, delimiter=',')
print(" - 3D point cloud saved in rec_model_cloud.txt ")
recModel.export_stl_file('reconstructed_model.stl')
print(" - STL model saved in reconstructed_model.stl")
if (show == True):
sequence.show()
print("> 3D reconstruction completed in " + str(round(time.time() - start_time, 1)) + " sec!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='3D Reconstruction from uncalibrated images')
parser.add_argument('--input_file', metavar='path', required=True,
help='Input file containing image point correspondences')
parser.add_argument('--show', required=False, action="store_true",
help="Display the image sequence with the 2D features")
args = parser.parse_args()
main(input_file=args.input_file, show=args.show)
|
mit
|
Clyde-fare/scikit-learn
|
examples/model_selection/grid_search_text_feature_extraction.py
|
253
|
4158
|
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
|
bsd-3-clause
|
jmschrei/scikit-learn
|
sklearn/naive_bayes.py
|
11
|
28770
|
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
|
bsd-3-clause
|
gladk/woodem
|
py/pre/cylTriax.py
|
3
|
26693
|
# encoding: utf-8
from woo.dem import *
from woo.fem import *
import woo.core
import woo.dem
import woo.pyderived
import woo.pack
import woo
import math
from minieigen import *
import numpy
class CylTriaxTest(woo.core.Preprocessor,woo.pyderived.PyWooObject):
r'''
Preprocessor for cylindrical triaxial test with membrane. The test is run in 3 stages:
* compaction, where random loose packing of spheres is compressed to attain the :math:`\sigma_{\rm iso}` (:obj:`sigIso`) pressure in all directions; during this stage, the cylindrical boundary is rigid and resized along main axes (so it can become (slightly) elliptical); friction is turned off during this stage to achieve better compacity; the compaction finishes when stress level is sufficiently close to the desired one, and unbalanced force drops below :obj:`maxUnbalanced`.
* Membrane stabilization: once the compression is done, membrane around the cylinder is activated -- loaded with surface pressure and made flexible. Friction is activated at this moment. The cylinder may deform axially (stress-controlled), but lateral deformation is now due to membrane-particle interaction. This stage finishes when unbalanced force drops below 1/10th of :obj:`maxUnbalanced` (the reason is that membrane motion is not considered when computing unbalanced force, only mononodal particles are). Surface pressure is adjusted so that the value of lateral stress (in terms of global stress tensor) is close to :obj:`sigIso`. At the same time, friction is increased from initial zero values
* Triaxial compression: displacement-controlled compression along the ``z`` axis, with strain rate increasing until :obj:`maxRates` is reached; the test finishes when axial strain attains :obj:`stopStrain`; during the triaxial phase, lateral pressure is exerted by surface load of membrane elements.
Membrane thickness :obj:`memThick` should be set carefully. The article :cite:`Molenkamp1981` discusses membrane thickness relative to maximum grain size, depending on the ratio of grain stiffness and applied stress.
Supports are from the same material as *particles*, but they may have their friction reduced (when :obj:`suppTanPhi` is given).
.. warning:: There are (unfortunately) quite a few tunables which must be tinkered with to get the desired result (those are in the *Tunables* section: :obj:`dtSafety`, :obj:`massFactor`, :obj:`model.damping <woo.models.ContactModelSelector.damping>`, :obj:`maxUnbalanced`). Several factors are also hard-set in the code, hoping that they will work in different scenarios than those which were tested.
.. youtube:: Li13NrIyMYU
'''
_classTraits=None
_PAT=woo.pyderived.PyAttrTrait # less typing
_attrTraits=[
##
_PAT(Vector2,'htDiam',Vector2(.06,.04),unit='m',startGroup='Geometry & control',doc='Initial size of the cylinder (radius and height)'),
_PAT(float,'memThick',-1.0,unit='m',doc='Membrane thickness; if negative, relative to largest particle diameter'),
_PAT(float,'cylDiv',40,'Number of segments for cylinder (first component)'),
_PAT(float,'sigIso',-500e3,unit='Pa',doc='Isotropic compaction stress, and lateral stress during the triaxial phase'),
_PAT(float,'stopStrain',-.2,unit=r'%',doc='Goal value of axial deformation in the triaxial phase'),
_PAT(Vector2,'maxRates',(2e-1,1.),'Maximum strain rates during the compaction phase (for all axes), and during the triaxial phase in the axial sense.'),
## materials
_PAT(
woo.models.ContactModelSelector,'model',woo.models.ContactModelSelector(name='linear',damping=.5,numMat=(1,2),matDesc=['particles','membrane'],mats=[
FrictMat(young=0.3e9,ktDivKn=.2,tanPhi=.4,density=1e8),
FrictMat(young=1.1e6,ktDivKn=.2,tanPhi=.4,density=1e8)
]),
startGroup='Materials',
doc='Select contact model. The first material is for particles; the second, optional, material, is for the membrane (the first material is used if there is no second one, but its friction is nevertheless reduced during the compaction phase to :obj:`suppTanPhi`).'
),
_PAT([Vector2,],'psd',[(2e-3,0),(2.5e-3,.2),(4e-3,1.)],unit=['mm','%'],psd=True,doc='Particle size distribution of particles; first value is diameter, scond is cummulative mass fraction.'),
_PAT([woo.dem.SphereClumpGeom],'clumps',[],"Clump definitions (if empty, use spheres, not clumps)"),
_PAT(str,'spheresFrom','',existingFilename=True,doc='Instead of generating spheres, load them from file (space-separated colums with x,y,z,r entries). The initial cylinder is made to fit inside the packing\'s axis-aligned bounding box (the user is responsible for having those spheres inside cylinder). Cylinder geometry (:obj:`htDiam`) and particle sizes (:obj:`psd` and :obj:`clumps`) are ignored.\n\n.. note:: :obj:`packCacheDir` is still used as usual to cache packings after compaction (to disable packing cache, set it to empty string), and will take precedence over :obj:`spheresFrom` if compacted packing for the same parameters is already cached.'),
_PAT(float,'suppTanPhi',float('nan'),'Friction at supports; if NaN, the same as for particles is used. Supports use the same material as particles otherwise.'),
## output
_PAT(str,'reportFmt',"/tmp/{tid}.xhtml",filename=True,startGroup="Outputs",doc="Report output format; :obj:`Scene.tags <woo.core.Scene.tags>` can be used."),
_PAT(str,'packCacheDir',".",dirname=True,doc="Directory where to store pre-generated feed packings; if empty, packing wil be re-generated every time."),
_PAT(str,'saveFmt',"/tmp/{tid}-{stage}.bin.gz",filename=True,doc='Savefile format; keys are :obj:`Scene.tags <woo.core.Scene.tags>`; additionally ``{stage}`` will be replaced by ``pre-triax`` after membrane stabilization (right before the triaxial compression actually starts) and ``done`` at the very end.'),
_PAT(int,'vtkStep',0,'Periodicity of saving VTK exports'),
_PAT(str,'vtkFmt','/tmp/{title}.{id}-',filename=True,doc='Prefix for VTK exports'),
#_PAT(int,'backupSaveTime',1800,doc='How often to save backup of the simulation (0 or negative to disable)'),
## tunables
_PAT(float,'dtSafety',.9,startGroup='Tunables',doc='Safety factor, stored in :obj:`woo.core.Scene.dtSafety` and used for computing the initial timestep as well as by :obj:`woo.dem.DynDt` later during the simulation.'),
_PAT(float,'massFactor',10.,'Multiply real mass of particles by this number to obtain the :obj:`woo.dem.WeirdTriaxControl.mass` control parameter'),
_PAT(float,'maxUnbalanced',.05,'Maximum unbalanced force at the end of compaction'),
]
def __init__(self,**kw):
woo.core.Preprocessor.__init__(self)
self.wooPyInit(self.__class__,woo.core.Preprocessor,**kw)
def __call__(self):
# preprocessor builds the simulation when called
return prepareCylTriax(self)
def mkFacetCyl(aabb,cylDiv,suppMat,sideMat,suppMask,sideMask,suppBlock,sideBlock,sideThick,mass,inertia):
'Make closed cylinder from facets. Z is axis of the cylinder. The position is determined by aabb; the cylinder may be elliptical, if the x and y dimensions are different. Return list of particles and list of nodes. The first two nodes in the list are bottom central node and top central node. cylDiv is tuple specifying division in circumferential and axial direcrtion respectively.'
r1,r2=.5*aabb.sizes()[0],.5*aabb.sizes()[1]
C=aabb.center()
zMin,zMax=aabb.min[2],aabb.max[2]
centrals=[woo.core.Node(pos=Vector3(C[0],C[1],zMin)),woo.core.Node(pos=Vector3(C[0],C[1],zMax))]
for c in centrals:
c.dem=woo.dem.DemData()
c.dem.mass=mass
c.dem.inertia=inertia
c.dem.blocked='xyzXYZ'
retParticles=[]
# nodes on the perimeter
thetas=numpy.linspace(2*math.pi,0,num=cylDiv[0],endpoint=False)
xxyy=[Vector2(r1*math.cos(th)+C[0],r2*math.sin(th)+C[1]) for th in thetas]
zz=numpy.linspace(zMin,zMax,num=cylDiv[1],endpoint=True)
nnn=[[woo.core.Node(pos=Vector3(xy[0],xy[1],z)) for xy in xxyy] for z in zz]
for i,nn in enumerate(nnn):
if i==0 or i==(len(nnn)-1): blocked=suppBlock
else: blocked=sideBlock
for n in nn:
n.dem=woo.dem.DemData()
n.dem.mass=mass
n.dem.inertia=inertia
n.dem.blocked=blocked
def mkCap(nn,central,mask,mat):
ret=[]
NaN=float('nan')
for i in range(len(nn)):
# with NaN in fakeVel[0], local linear (interpolated) velocity is zero even if nodes move
# that's what we need at supports, which stretch to the membrane's edge,
# but that is not any physical motion
ret.append(woo.dem.Particle(material=mat,shape=Facet(nodes=[nn[i],nn[(i+1)%len(nn)],central],fakeVel=Vector3(NaN,NaN,NaN)),mask=mask))
nn[i].dem.addParRef(ret[-1])
nn[(i+1)%len(nn)].dem.addParRef(ret[-1])
central.dem.addParRef(ret[-1])
return ret
retParticles+=mkCap(nnn[0],central=centrals[0],mask=suppMask,mat=suppMat)
retParticles+=mkCap(list(reversed(nnn[-1])),central=centrals[-1],mask=suppMask,mat=suppMat) # reverse to have normals outside
def mkAround(nnAC,nnBD,mask,mat,halfThick):
ret=[]
for i in range(len(nnAC)):
A,B,C,D=nnAC[i],nnBD[i],nnAC[(i+1)%len(nnAC)],nnBD[(i+1)%len(nnBD)]
ret+=[woo.dem.Particle(material=mat,shape=Membrane(nodes=fNodes,halfThick=halfThick),mask=mask) for fNodes in ((A,B,D),(A,D,C))]
for n in (A,B,D): n.dem.addParRef(ret[-2])
for n in (A,D,C): n.dem.addParRef(ret[-1])
return ret
for i in range(0,len(nnn)-1):
retParticles+=mkAround(nnn[i],nnn[i+1],mask=sideMask,mat=sideMat,halfThick=.5*sideThick)
for p in retParticles: p.shape.wire=True
import itertools
return retParticles,centrals+list(itertools.chain.from_iterable(nnn))
def prepareCylTriax(pre):
import woo
margin=1.5
rad,ht=.5*pre.htDiam[1],pre.htDiam[0]
bot,top=margin*ht,(1+margin)*ht
xymin=Vector2(margin*rad,margin*rad)
xymax=Vector2((margin+2)*rad,(margin+2)*rad)
xydomain=Vector2((2*margin+2)*rad,(2*margin+2)*rad)
xymid=.5*xydomain
S=woo.core.Scene(fields=[DemField()])
for a in ['reportFmt','packCacheDir','saveFmt','vtkFmt']: setattr(pre,a,woo.utils.fixWindowsPath(getattr(pre,a)))
S.pre=pre.deepcopy()
S.periodic=True
S.cell.setBox(xydomain[0],xydomain[1],(2*margin+1)*ht)
meshMask=0b0011
spheMask=0b0001
loneMask=0b0010
S.dem.loneMask=loneMask
# save materials for later manipulation
S.lab.parMat=pre.model.mats[0]
S.lab.memMat=(pre.model.mats[1] if len(pre.model.mats)>1 else pre.model.mats[0].deepcopy())
S.lab.suppMat=pre.model.mats[0].deepcopy()
S.lab.suppMat.tanPhi=pre.suppTanPhi
## generate particles inside cylinder
# radius minus polygonal imprecision (circle segment), minus halfThickness of the membrane
if pre.memThick<0: pre.memThick*=-pre.psd[-1][0]
innerRad=rad-rad*(1.-math.cos(.5*2*math.pi/pre.cylDiv))-.5*pre.memThick
S.lab.memThick=pre.memThick
if pre.packCacheDir:
import hashlib,os
compactMemoize=pre.packCacheDir+'/'+hashlib.sha1(pre.dumps(format='expr')+'ver3').hexdigest()+'.triax-compact'
print 'Compaction memoize file is ',compactMemoize
else: compactMemoize='' # no memoize file
if pre.packCacheDir and os.path.exists(compactMemoize):
print 'Using memoized compact state'
sp=woo.pack.SpherePack()
sp.load(compactMemoize)
meshAabb=eval(sp.userData)
S.lab.compactMemoize=None # none means we just loaded that file
sp.toSimulation(S,mat=S.lab.parMat)
else:
if pre.spheresFrom:
pack=woo.pack.SpherePack()
pack.load(pre.spheresFrom)
else:
pack=woo.pack.randomLoosePsd(predicate=woo.pack.inCylinder(centerBottom=(xymid[0],xymid[1],bot),centerTop=(xymid[0],xymid[1],top),radius=innerRad),psd=pre.psd,mat=S.lab.parMat,clumps=pre.clumps,returnSpherePack=True)
pack.toSimulation(S)
meshAabb=AlignedBox3((xymin[0],xymin[1],bot),(xymax[0],xymax[1],top))
S.lab.compactMemoize=compactMemoize
sumParMass=sum([p.mass for p in S.dem.par])
# create mesh (supports+membrane)
cylDivHt=int(round(ht/(2*math.pi*rad/pre.cylDiv))) # try to be as square as possible
nodeMass=(ht/cylDivHt)**2*pre.memThick*S.lab.memMat.density # approx mass of square slab of our size
nodeInertia=((3/4.)*(nodeMass/math.pi))**(5/3.)*(6/15.)*math.pi # inertial of sphere with the same mass
particles,nodes=mkFacetCyl(
aabb=meshAabb,
cylDiv=(pre.cylDiv,cylDivHt),
suppMask=meshMask,sideMask=meshMask,
sideBlock='xyzXYZ',suppBlock='xyzXYZ',
mass=nodeMass,inertia=nodeInertia*Vector3(1,1,1),
suppMat=S.lab.suppMat,sideMat=S.lab.memMat,
sideThick=pre.memThick,
)
S.lab.cylNodes=nodes
S.dem.par.add(particles,nodes=True) # add all nodes (may be the same automatic?)
#S.dt=pre.pWaveSafety*woo.utils.pWaveDt(S,noClumps=True)
S.dtSafety=pre.dtSafety
if pre.clumps: print 'WARNING: clumps used, Scene.dt might not be correct; lower CylTriaxTest.dtSafety (currently %g) if the simulation is unstable'%(pre.dtSafety)
# setup engines
S.engines=[
WeirdTriaxControl(goal=(pre.sigIso,pre.sigIso,pre.sigIso),maxStrainRate=(pre.maxRates[0],pre.maxRates[0],pre.maxRates[0]),relVol=math.pi*innerRad**2*ht/S.cell.volume,stressMask=0b0111,maxUnbalanced=pre.maxUnbalanced,mass=pre.massFactor*sumParMass,doneHook='import woo.pre.cylTriax; woo.pre.cylTriax.compactionDone(S)',label='triax',absStressTol=1e4,relStressTol=1e-2),
]+DemField.minimalEngines(model=pre.model,lawKw=dict(noFrict=True))+[
#InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()],verletDist=-.05),
#ContactLoop(
# [Cg2_Sphere_Sphere_L6Geom(),Cg2_Facet_Sphere_L6Geom()],
# [Cp2_FrictMat_FrictPhys()],
# [Law2_L6Geom_FrictPhys_IdealElPl(noFrict=True,label='contactLaw')],
# applyForces=True,label='contactLoop'
#),
IntraForce([
In2_Sphere_ElastMat(),
In2_Membrane_ElastMat(bending=True)],
label='intraForce',dead=True # ContactLoop applies forces during compaction
),
# run the same as addPlotData
MeshVolume(mask=S.dem.loneMask,stepPeriod=20,label='meshVolume',dead=False),
woo.core.PyRunner(20,'import woo.pre.cylTriax; woo.pre.cylTriax.addPlotData(S)'),
VtkExport(out=pre.vtkFmt,stepPeriod=pre.vtkStep,what=VtkExport.all,dead=True,label='vtk'),
#Leapfrog(damping=pre.damping,reset=True,label='leapfrog'),
#DynDt(stepPeriod=500),
]
S.lab.stage='compact'
## if spheres were loaded externally, compaction is done just now
##
if S.lab.compactMemoize==None: compactionDone(S)
try:
import woo.gl
S.gl(woo.gl.Renderer(dispScale=(5,5,2),rotScale=0,cell=False),woo.gl.Gl1_DemField(),woo.gl.Gl1_CPhys(),woo.gl.Gl1_Membrane(phiSplit=False,phiWd=1,relPhi=0.,uScale=0.,slices=-1,wire=True),woo.gl.Gl1_Facet(wd=2,slices=-1))
except ImportError: pass
return S
def addPlotData(S):
assert S.lab.stage in ('compact','stabilize','triax')
import woo
t=S.lab.triax
# global stress tensor
sxx,syy,szz=t.stress.diagonal()
dotE=S.cell.gradV.diagonal()
dotEMax=t.maxStrainRate
# net volume, without membrane thickness
vol=S.lab.meshVolume.netVol
# current radial stress
srr=.5*(sxx+syy)
# mean stress
p=t.stress.diagonal().sum()/3.
# deviatoric stress
q=szz-.5*(sxx+syy)
qDivP=(q/p if p!=0 else float('nan'))
if S.lab.stage in ('compact','stabilize'):
## t.strain is log(l/l0) for all components
exx,eyy,ezz=t.strain
err=.5*(exx+eyy)
# volumetric strain is not defined directly, and it is not needed either
eVol=float('nan')
else:
# triaxial phase:
# only axial strain (ezz) and volumetric strain (eVol) are known
#
# set the initial volume, if not yet done
if not hasattr(S.lab,'netVol0'): S.lab.netVol0=S.lab.meshVolume.netVol
# axial strain is known; xy components irrelevant (inactive)
ezz=t.strain[2]
# current net volume / initial net volume
eVol=math.log(vol/S.lab.netVol0)
# radial strain
err=.5*(eVol-ezz)
# undefined
exx=eyy=float('nan')
# deviatoric strain
eDev=ezz-(1/3.)*(2*err+ezz) # FIXME: is this correct?!
surfLoad=(float('nan') if S.lab.stage=='compact' else S.lab.surfLoad)
S.plot.addData(unbalanced=woo.utils.unbalancedForce(),i=S.step,
sxx=sxx,syy=syy,
srr=.5*(sxx+syy),szz=szz,
surfLoad=surfLoad,
exx=exx,eyy=eyy,
err=err,ezz=ezz,
dotE=dotE,dotErr=.5*(dotE[0]+dotE[1]),
dotEMax=dotEMax,
dotEMax_z_neg=-dotEMax[2],
eDev=eDev,eVol=eVol,
vol=vol,
p=p,q=q,qDivP=qDivP,
isTriax=(1 if S.lab.stage=='triax' else 0), # to be able to filter data
grossVol=S.lab.meshVolume.vol,
parTanPhi=S.lab.parMat.tanPhi,
memTanPhi=S.lab.memMat.tanPhi,
suppTanPhi=S.lab.suppMat.tanPhi
# save all available energy data
#Etot=O.energy.total()#,**O.energy
)
if not S.plot.plots:
S.plot.plots={
'i':('unbalanced',None,'parTanPhi','memTanPhi','suppTanPhi'),'i ':('srr','szz','surfLoad'),' i':('err','ezz','eVol'),' i':('vol','grossVol'),'i ':('dotE_z','dotEMax_z','dotEMax_z_neg')
# energy plot
#' i ':(O.energy.keys,None,'Etot'),
}
S.plot.xylabels={'i ':('step','Stress [Pa]',),' i':('step','Strains [-]','Strains [-]')}
S.plot.labels={
'sxx':r'$\sigma_{xx}$','syy':r'$\sigma_{yy}$','szz':r'$\sigma_{zz}$','srr':r'$\sigma_{rr}$','surfLoad':r'$\sigma_{\rm hydro}$',
'exx':r'$\varepsilon_{xx}$','eyy':r'$\varepsilon_{yy}$','ezz':r'$\varepsilon_{zz}$','err':r'$\varepsilon_{rr}$','eVol':r'$\varepsilon_{v}$','vol':'net volume','grossVol':'midplane volume',
'dotE_x':r'$\dot\varepsilon_{xx}$','dotE_y':r'$\dot\varepsilon_{yy}$','dotE_z':r'$\dot\varepsilon_{zz}$','dotE_rr':r'$\dot\varepsilon_{rr}$','dotEMax_z':r'$\dot\varepsilon_{zz}^{\rm max}$','dotEMax_z_neg':r'$-\dot\varepsilon_{zz}^{\rm max}$'
}
if S.lab.meshVolume.netVol>0:
S.lab.triax.relVol=S.lab.meshVolume.netVol/S.cell.volume
if S.lab.stage=='stabilize':
stable=True
#
# adjust surface load so that we approach the right value
#
sl=S.lab.surfLoad-.002*(srr-S.pre.sigIso)
#print 'Old',S.lab.surfLoad,'new',sl,'(desired',S.pre.sigIso,'current',srr,')'
del S.lab.surfLoad
S.lab.surfLoad=sl
#print 'Changing surface load to ',S.lab.surfLoad,', srr is',srr
for p in S.dem.par:
if isinstance(p.shape,Membrane): p.shape.surfLoad=S.lab.surfLoad
## 2% tolerance on stress
if (srr-S.pre.sigIso)/abs(S.pre.sigIso)>2e-2: stable=False
for m,tp in [(S.lab.parMat,S.lab.parTanPhi),(S.lab.memMat,S.lab.memTanPhi),(S.lab.suppMat,S.lab.suppTanPhi)]:
if m.tanPhi<tp:
m.tanPhi=min(m.tanPhi+.002*tp,tp)
stable=False
# once the membrane is stabilized, decrease strain rate as well
if stable:
# decrease max strain rate along z
# to avoid gross oscillations
if t.maxStrainRate[2]>.01*S.pre.maxRates[1]:
t.maxStrainRate[2]=max(t.maxStrainRate[2]-.01*S.pre.maxRates[0],.01*S.pre.maxRates[1])
stable=False
## don't do this, can take forever
# and then wait for strain rate to drop what will be applied next
# we go down to 1/1000th, that's where we start during the triaxial test then...
# wait for strain rate to settle down
# if abs(S.cell.gradV[2,2])>.001*S.pre.maxRates[1]: stable=False
# green light for triax to finish
if stable: S.lab.triax.goal[0]=0
if S.lab.stage=='triax':
t.maxStrainRate[2]=min(t.maxStrainRate[2]+.001*S.pre.maxRates[1],S.pre.maxRates[1])
def velocityFieldPlots(S,nameBase):
import woo
from woo import post2d
flattener=post2d.CylinderFlatten(useRef=False,axis=2,origin=(.5*S.cell.size[0],.5*S.cell.size[1],(.6/2.2*S.cell.size[2])))
#maxVel=float('inf') #5e-2
#exVel=lambda p: p.vel if p.vel.norm()<=maxVel else p.vel/(p.vel.norm()/maxVel)
exVel=lambda p: p.vel
exVelNorm=lambda p: exVel(p).norm()
from matplotlib.figure import Figure
fVRaw=Figure(); ax=fVRaw.add_subplot(1,1,1)
post2d.plot(post2d.data(S,exVel,flattener),axes=ax,alpha=.3,minlength=.3,cmap='jet')
fV2=Figure(); ax=fV1.add_subplot(1,1,1)
post2d.plot(post2d.data(S,exVel,flattener,stDev=.5*S.pre.psd[0][0],div=(80,80)),axes=ax,minlength=.6,cmap='jet')
fV1=Figure(); ax=fV1.add_subplot(1,1,1)
post2d.plot(post2d.data(S,exVelNorm,flattener,stDev=.5*S.pre.psd[0][0],div=(80,80)),axes=ax,cmap='jet')
outs=[]
for name,fig in [('particle-velocity',fVRaw),('smooth-velocity',fV2),('smooth-velocity-norm',fV1)]:
out=nameBase+'.%s.png'%name
fig.savefig(out)
outs.append(out)
return outs
def membraneStabilized(S):
print 'Membrane stabilized at step',S.step,'with surface pressure',S.lab.surfLoad
S.lab.triax.goal=(0,0,S.pre.stopStrain)
S.lab.triax.stressMask=0b0000 # all strain-controlled
S.lab.triax.maxStrainRate=(0,0,.001*S.pre.maxRates[1])
S.lab.triax.maxUnbalanced=10 # don't care, just compress until done
S.lab.leapfrog.damping=S.pre.model.damping
S.lab.triax.doneHook='import woo.pre.cylTriax; woo.pre.cylTriax.triaxDone(S)'
# this is the real ref config now
S.cell.trsf=Matrix3.Identity
S.cell.refHSize=S.cell.hSize
try:
import woo.gl
woo.gl.Gl1_DemField.updateRefPos=True
except ImportError: pass
# not sure if this does any good actually
for n in S.dem.nodes: n.dem.vel=n.dem.angVel=Vector3.Zero
del S.lab.stage # avoid warning
S.lab.stage='triax'
# this is no longer needed, tanPhi is constant now
S.lab.contactLoop.updatePhys=False
if S.pre.saveFmt:
out=S.pre.saveFmt.format(stage='pre-triax',S=S,**(dict(S.tags)))
print 'Saving to',out
S.save(out)
def compactionDone(S):
if S.lab.compactMemoize: print 'Compaction done at step',S.step
import woo
t=S.lab.triax
# set the current cell configuration to be the reference one
S.cell.trsf=Matrix3.Identity
S.cell.refHSize=S.cell.hSize
t.maxUnbalanced=.1*S.pre.maxUnbalanced # need more stability for triax?
S.lab.leapfrog.damping=.7 # increase damping to a larger value
t.goal=(1,0,S.pre.sigIso) # this will be set to 0 once all friction angles are OK
t.maxStrainRate=(0,0,S.pre.maxRates[0])
t.doneHook='import woo.pre.cylTriax; woo.pre.cylTriax.membraneStabilized(S)'
t.stressMask=0b0100 # z is stress-controlled, xy strain-controlled
# allow faster deformation along x,y to better maintain stresses
# make the membrane flexible: apply force on the membrane
S.lab.contactLoop.applyForces=False
S.lab.intraForce.dead=False
S.lab.meshVolume.dead=False
S.lab.vtk.dead=(S.pre.vtkStep>0 and S.pre.vtkFmt!='')
# free the nodes
top,bot=S.lab.cylNodes[:2]
tol=1e-3*abs(top.pos[2]-bot.pos[2])
for n in S.lab.cylNodes[2:]:
# supports may move in-plane, and also may rotate
if abs(n.pos[2]-top.pos[2])<tol or abs(n.pos[2]-bot.pos[2])<tol:
n.dem.blocked='z'
else: n.dem.blocked=''
# add surface load
S.lab.surfLoad=S.pre.sigIso*(1-(.5*S.lab.memThick)/(.5*S.pre.htDiam[1]))
print 'Initial surface load',S.lab.surfLoad
for p in S.dem.par:
if isinstance(p.shape,Membrane): p.shape.surfLoad=S.lab.surfLoad
# set velocity to 0 (so that when loading packing, the conditions are the same)
for n in S.dem.nodes: n.dem.vel=n.dem.angVel=Vector3.Zero
# restore friction: friction dissipates a lot of energy, and also creates stress decrease along +z
# which we want to have here, not when the membrane is already stable and the triax itself starts
S.lab.contactLaw.noFrict=False
S.lab.contactLoop.updatePhys=True # force updating CPhys at every step
# save desired values of friction angle
S.lab.parTanPhi=S.lab.parMat.tanPhi
S.lab.memTanPhi=S.lab.memMat.tanPhi
S.lab.suppTanPhi=S.lab.suppMat.tanPhi
# and set it to zero
S.lab.parMat.tanPhi=S.lab.memMat.tanPhi=S.lab.suppMat.tanPhi=0
if S.lab.compactMemoize: # if None or '', don't save
#S.save('/tmp/compact.gz')
aabb=AlignedBox3()
for n in S.lab.cylNodes: aabb.extend(n.pos)
sp=woo.pack.SpherePack()
sp.fromSimulation(S)
sp.userData=str(aabb)
sp.save(S.lab.compactMemoize)
print 'Saved compacted packing to',S.lab.compactMemoize
del S.lab.stage # avoid warning
S.lab.stage='stabilize'
def plotBatchResults(db,titleRegex=None,out=None,stressPath=True,sorter=None):
'Hook called from woo.batch.writeResults'
import re,math,woo.batch,os
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
results=woo.batch.dbReadResults(db)
if sorter: results=sorter(results)
if out==None: out='%s.pdf'%re.sub('\.results$','',db)
from matplotlib.ticker import FuncFormatter
kiloPascal=FuncFormatter(lambda x,pos=0: '%g'%(1e-3*x))
percent=FuncFormatter(lambda x,pos=0: '%g'%(1e2*x))
if stressPath:
fig1,fig2,fig3=311,312,313
fig=Figure(figsize=(8,20))
else:
fig1,fig2=121,122
fig=Figure(figsize=(8,4))
fig.subplots_adjust(left=.1,right=.98,bottom=.15,top=.9,wspace=.2,hspace=.25)
canvas=FigureCanvasAgg(fig)
ed_qp=fig.add_subplot(fig1)
ed_qp.set_xlabel(r'$\varepsilon_d$ [%]')
ed_qp.set_ylabel(r'$q/p$')
ed_qp.xaxis.set_major_formatter(percent)
ed_qp.grid(True)
ed_ev=fig.add_subplot(fig2)
ed_ev.set_xlabel(r'$\varepsilon_d$ [%]')
ed_ev.set_ylabel(r'$\varepsilon_v$ [%]')
ed_ev.xaxis.set_major_formatter(percent)
ed_ev.yaxis.set_major_formatter(percent)
ed_ev.grid(True)
if stressPath:
p_q=fig.add_subplot(fig3)
p_q.set_xlabel(r'$p$ [kPa]')
p_q.set_ylabel(r'$q$ [kPa]')
p_q.xaxis.set_major_formatter(kiloPascal)
p_q.yaxis.set_major_formatter(kiloPascal)
p_q.grid(True)
titlesSkipped,titlesIncluded=[],[]
for res in results:
series,pre=res['series'],res['pre']
title=res['title'] if res['title'] else res['sceneId']
if titleRegex and not re.match(titleRegex,title):
titlesSkipped.append(title)
continue
titlesIncluded.append(title)
isTriax=series['isTriax']
# skip the very first number, since that's the transitioning step and strains are still at their old value
ed=series['eDev'][isTriax==1][1:]
ev=series['eVol'][isTriax==1][1:]
p=series['p'][isTriax==1][1:]
q=series['q'][isTriax==1][1:]
qDivP=series['qDivP'][isTriax==1][1:]
ed_qp.plot(ed,qDivP,label=title,alpha=.6)
ed_ev.plot(ed,ev,label=title,alpha=.6)
if stressPath:
p_q.plot(p,q,label=title,alpha=.6)
if not titlesIncluded:
raise RuntimeError('No simulations in %s%s found.'%(db,(' matching %s'%titleRegex if titleRegex else '')))
ed_qp.invert_xaxis()
ed_ev.invert_xaxis()
ed_ev.invert_yaxis()
if stressPath:
p_q.invert_xaxis()
p_q.invert_yaxis()
for ax,loc in [(ed_qp,'lower right'),(ed_ev,'lower right')]+([(p_q,'upper left')] if stressPath else []):
l=ax.legend(loc=loc,labelspacing=.2,prop={'size':7})
l.get_frame().set_alpha(.4)
fig.savefig(out)
print 'Included simulations:',', '.join(titlesIncluded)
if titlesSkipped: print 'Skipped simulations:',', '.join(titlesSkipped)
print 'Batch figure saved to file://%s'%os.path.abspath(out)
def triaxDone(S):
print 'Triaxial done at step',S.step
if S.pre.saveFmt:
out=S.pre.saveFmt.format(stage='done',S=S,**(dict(S.tags)))
print 'Saving to',out
S.save(out)
S.stop()
import woo.utils
(repName,figs)=woo.utils.htmlReport(S,S.pre.reportFmt,'Cylindrical triaxial test',afterHead='',figures=[(None,f) for f in S.plot.plot(noShow=True,subPlots=False)],svgEmbed=True,show=True)
woo.batch.writeResults(S,defaultDb='cylTriax.hdf5',series=S.plot.data,postHooks=[plotBatchResults],simulationName='cylTriax',report='file://'+repName)
|
gpl-2.0
|
FerranGarcia/shape_learning
|
tools/log_replay.py
|
3
|
5727
|
#! /usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from ast import literal_eval
import sys
import re
from collections import OrderedDict
from shape_learning.shape_learner_manager import ShapeLearnerManager
from shape_learning.shape_learner import SettingsStruct
from shape_learning.shape_modeler import ShapeModeler
import os.path
import argparse
parser = argparse.ArgumentParser(description='Learn a collection of letters in parallel')
parser.add_argument('word', action="store",
help='The word to be learnt')
action = re.compile('.*INFO - (?P<letter>\w):.*(?P<type>demonstration|generated|learning).*arams: (?P<params>\[[\de., -]*\]). Path: (?P<path>\[[\de., -]*\])')
demo_letters = {}
def generateSettings(shapeType):
paramsToVary = [3];
initialBounds_stdDevMultiples = np.array([[-6, 6]]);
doGroupwiseComparison = True;
initialParamValue = np.NaN
initialBounds = np.array([[np.NaN, np.NaN]])
init_datasetFile = init_datasetDirectory + '/' + shapeType + '.dat'
update_datasetFile = update_datasetDirectory + '/' + shapeType + '.dat'
demo_datasetFile = demo_datasetDirectory + '/' + shapeType + '.dat'
if not os.path.exists(init_datasetFile):
raise RuntimeError("Dataset not found for shape" + shapeType)
if not os.path.exists(update_datasetFile):
try:
with open(update_datasetFile, 'w') as f:
pass
except IOError:
raise RuntimeError("no writing permission for file"+update_datasetFile)
if not os.path.exists(demo_datasetFile):
try:
with open(demo_datasetFile, 'w') as f:
pass
except IOError:
raise RuntimeError("no writing permission for file"+demo_datasetFile)
try:
datasetParam = init_datasetDirectory + '/params.dat'
with open(datasetParam, 'r') as f:
line = f.readline()
test = line.replace('[','').replace(']\n','')==shapeType
while test==False:
line = f.readline()
if line:
test = line.replace('[','').replace(']\n','')==shapeType
else:
break
if test:
u = f.readline().replace('\n','')
initialParamValue = [(float)(s) for s in u.split(',')]
else:
initialParamValue = 0.0
print("parameters not found for shape "+ shapeType +'\n'+'Default : 0.0')
except IOError:
raise RuntimeError("no reading permission for file"+datasetParam)
settings = SettingsStruct(shape_learning = shapeType,
paramsToVary = paramsToVary,
doGroupwiseComparison = True,
initDatasetFile = init_datasetFile,
updateDatasetFiles = [update_datasetFile,demo_datasetFile],
paramFile = datasetParam,
initialBounds = initialBounds,
initialBounds_stdDevMultiples = initialBounds_stdDevMultiples,
initialParamValue = initialParamValue,
minParamDiff = 0.4)
return settings
def showShape(shape ):
plt.figure(1)
plt.clf()
ShapeModeler.normaliseAndShowShape(shape)
if __name__ == "__main__":
plt.ion()
with open(sys.argv[1], 'r') as log:
for line in log.readlines():
found = action.search(line)
if found:
letter = found.group('letter')
type = found.group('type')
params = literal_eval(found.group('params'))
path = literal_eval(found.group('path'))
if type=='demonstration':
demo_letters.setdefault(letter,[]).append(path)
# if there is a bug inside the log file, e.g. the letter dont match with the shape
# then we want to show the shape and enter by hand the good letter :
#-------------------------------------------------------------------
#userShape = path
#userShape = np.reshape(userShape, (-1, 1))
#showShape(userShape)
#letter = raw_input('letter ? ')
for letter, value in demo_letters.items():
for path in value:
userShape = path
userShape = np.reshape(userShape, (-1, 1))
import inspect
fileName = inspect.getsourcefile(ShapeModeler)
installDirectory = fileName.split('/lib')[0]
init_datasetDirectory = installDirectory + '/share/shape_learning/letter_model_datasets/alexis_set_for_children'
update_datasetDirectory = installDirectory + '/share/shape_learning/letter_model_datasets/alexis_set_for_children'
demo_datasetDirectory = installDirectory + '/share/shape_learning/letter_model_datasets/diego_set'
if not os.path.exists(init_datasetDirectory):
raise RuntimeError("initial dataset directory not found !")
if not os.path.exists(update_datasetDirectory):
os.makedir(update_datasetDirectory)
wordManager = ShapeLearnerManager(generateSettings)
wordSeenBefore = wordManager.newCollection(letter)
shape = wordManager.startNextShapeLearner()
# learning :
print('Received demo for letter ' + letter)
shape = wordManager.respondToDemonstration(0, userShape)
wordManager.save_all(0)
wordManager.save_params(0)
|
isc
|
jaspreetj/SA_based_FPGA_placer
|
SAv3.py
|
1
|
10052
|
#Author: Jaspreet Jhoja
#contact:[email protected]
import random,copy, statistics, timeit, threading, math
from math import *
import numpy as np
import matplotlib.pyplot as plt
import plot as pt
import queue as Queue
print("SIMULATED ANNEALING BASED PLACER")
files = ['cm138a.txt', 'cm150a.txt', 'cm151a.txt', 'cm162a.txt', 'alu2.txt', 'C880.txt',
'e64.txt', 'apex1.txt', 'cps.txt', 'paira.txt', 'pairb.txt', 'apex4.txt']
for i in range(len(files)):
print('['+str(i)+']'+' - '+ files[i])
choice = input("choose files to run")
gui_choice = input("Do you want to see the progress in a GUI? y/n")
#if you want to use custom iterations and temperature, define here
user_iterations = 0
user_temp = 0
#want to run a mix of temperature handlers
hybrid = 0
#or choice in range(len(files)):
for i in range(1):
filename = files[int(choice)]
print(filename)
global nets, nodes, grid, netsn, nodesn, plot_x, plot_y
nets = [] #net details
nodes = {} #store all nodes in a dictionary
grid = [] #stores grid size
netsn = 0 #number of nets
nodesn = 0 #number of nodes
optimum = {}#optimum results
plot_x = []
plot_y = []
old_swap = [None, None]#previously swapped nodes
new_swap = [None, None] #currently proposed moves to swap
## Simulated Annealing variables
current_cost = 0 #initial or current cost
new_cost = 0 #new proposed cost
old_temp = 0 #previous temperature
current_temp = 0 #current or initial temperature
iterations = 0 #iterations
##################### NOTES ###################
#to get sinks for a node
#get nodedata by nodes[number][0]
#get sinks list by nodes[number][1]
#function to read file
def readfile(filename):
global grid, netsn, nodesn, nets, nodes
#split lines to read one by one
lines = open(filename).read().splitlines()
#extract grid
grid = [int(lines[0].split(' ')[-1]),int(lines[0].split(' ')[-2])]
nets = []
#iterate lines, extract number of nets and individual net nodes
for i in range(len(lines)):
if(i==0):
netsn = int(lines[i].split(' ')[-3]) #extract number of nets
nodesn = int(lines[i].split(' ')[0]) #extract number of nodes
#generate coordinates for nodes which we will use for cost eval
coordinates = []
for c in range(grid[0]):
for r in range(grid[1]):
coordinates.append([c,r*2])
#based on number of nodes, create dictionary keys
for each_node in range(grid[0]*grid[1]):
nodes[str(each_node)] = [coordinates[each_node],[]]
else:
#separate the net details and put them in a list
temp = list(filter(None,lines[i].split(' ')[1:]))
if(len(temp)>0):
nets.append(temp)
# associate nodes to their connections
source =temp[0]
sinks = temp[1:]
for each_sink in sinks:
nodes[source][1].append([each_sink])
# for nodes with no sinks, set none as their sinks so no arrow emerge from those nodes
for each_node in nodes:
sink_list = nodes[str(each_node)][1]
if(len(sink_list)==0):
nodes[str(each_node)][1].append(None)
#run the read function
readfile(filename)
# select two nodes which have not been repeated in the previous swap
def select_nodes(nodes_dict, previous_swaps):
new_lot = []
while True:
if(len(new_lot)==2):
#check if i am swapping two unoccupied slots
a = new_lot[0]
b = new_lot[1]
coor_a = nodes_dict[a][0][0]
coor_b = nodes_dict[b][0][0]
if(coor_a == None and coor_b == None):
print(new_lot)
new_lot = []
else:
return new_lot
new_node = random.choice([x for x in range(grid[0]*grid[1]) if x not in previous_swaps])
new_lot.append(str(new_node))
# accept moves
def make_swap(nodes_dict,swap):
a = swap[0]
b = swap[1]
coor_a = nodes_dict[a][0]
coor_b = nodes_dict[b][0]
nodes_dict[a][0] = coor_b
nodes_dict[b][0] = coor_a
return(nodes_dict)
#function to calculate cost
def calculate_cost(nodes_dict, nets):
cost = []
for each_net in nets:
net_x = []
net_y = []
dx = 0
dy = 0
for each_node in each_net:
data = nodes_dict[each_node][0]
net_x.append(data[0])
net_y.append(data[1])
#calculate half-perimeter
dx = abs(max(net_x) - min(net_x))
dy = abs(max(net_y) - min(net_y))
cost.append(dx+dy)
return(sum(cost))
#timer function
start_time = timeit.default_timer()
#setup SA
if(user_iterations == 0):
iterations = int(10*((nodesn)**(4/3)))
else:
iterations = user_iterations
initial_cost = calculate_cost(nodes, nets)
sigma = 0 #std dev of cost of accepted solutions
sigma_list = [] #list to store solutions
r_val = []
#set initial temperature
if(user_temp == 0):
for i in range(50):
sigma_node = copy.deepcopy(nodes)
sigma_swap = select_nodes(sigma_node, old_swap)
old_swap = sigma_swap
sigma_node = make_swap(sigma_node, sigma_swap)
temp_cost = calculate_cost(sigma_node, nets)
if(temp_cost<initial_cost):
sigma_list.append(temp_cost)
#calculate the standard deviation of accepted sigma values
sigma = statistics.stdev(sigma_list)
current_temp = 20*sigma
print(initial_cost, current_temp, iterations)
old_swap=[None, None]
#start with simulated annealing
#start plotting
if(gui_choice == "y"):
queue = Queue.Queue()
plot_thread = threading.Thread(target=pt.plotter, args=(queue, ))
plot_thread.start()
#check if cost is being repeated
isrepeating = 0
#record optimum node ever came across
optimum = nodes
while current_temp!=0:
sigma_list = []
for i in range(iterations):
current_cost = calculate_cost(nodes, nets)
#copy nodes data
temp_nodes = copy.deepcopy(nodes)
#get nodes to swap for temp_nodes
new_swap = select_nodes(temp_nodes, old_swap)
old_swap = new_swap
#modify node data
temp_nodes = make_swap(temp_nodes, new_swap)
#get cost for new swap
new_cost = calculate_cost(temp_nodes, nets)
dc = new_cost - current_cost
#if good
if(dc<0):
nodes = temp_nodes
sigma_list.append(new_cost)
#update best
#if bad
else:
r = random.random()
if(r< math.e**(-dc/current_temp)):
nodes = temp_nodes
sigma_list.append(new_cost)
if(calculate_cost(optimum,nets)<calculate_cost(nodes, nets)):
optimum = nodes
#current_temp = 0.98 *current_temp
#acceptance ratio of moves accepted to total tried
R_accept = len(sigma_list)/iterations
previous_temp = copy.deepcopy(current_temp)
if(0.96 < R_accept):
alpha = 0.5
elif(0.8 < R_accept and R_accept<=0.96):
alpha = 0.9
elif(0.05 < R_accept and R_accept<=0.8):
if(iterations==500):
alpha = 0.98
else:
alpha = 0.95
elif(R_accept<=0.05):
alpha = 0.8
r_val.append(alpha)
try:
if(hybrid == 1):
#check if temperature is stuck
if(isrepeating ==5):
current_temp = alpha*current_temp
isrepeating = 0
elif(isrepeating >=10):
current_temp = 0
else:
sigma = statistics.stdev(sigma_list)
current_temp = current_temp *math.e**(-0.7*(current_temp/sigma))
isrepeating = 0
else:
current_temp = alpha*current_temp
isrepeating = 0
except Exception as e:
None
#COMMENT THIS LINE IF DONT WANT ANY UPDATES
print(alpha,calculate_cost(nodes, nets), current_temp )
if(str(previous_temp)[:7] == str(current_temp)[:7]):
isrepeating = isrepeating + 1
#print(isrepeating)
if(current_temp<5e-6):
current_temp = 0
#add for plotting
if(gui_choice == "y"):
pt.update_data_sync(current_temp, calculate_cost(nodes, nets))
queue.put("GO")
# print(calculate_cost(nodes,nets), current_temp)
final_cost = calculate_cost(nodes, nets)
elapsed = timeit.default_timer() - start_time
print("time elapsed : ", elapsed)
print("final cost :", final_cost)
if(gui_choice == 'y'):
queue.put('BYE')
|
mit
|
StongeEtienne/dipy
|
dipy/data/__init__.py
|
1
|
12766
|
"""
Read test or example data
"""
from __future__ import division, print_function, absolute_import
import sys
import json
from nibabel import load
from os.path import join as pjoin, dirname
import gzip
import numpy as np
from dipy.core.gradients import GradientTable, gradient_table
from dipy.core.sphere import Sphere, HemiSphere
from dipy.sims.voxel import SticksAndBall
from dipy.data.fetcher import (fetch_scil_b0,
read_scil_b0,
fetch_stanford_hardi,
read_stanford_hardi,
fetch_taiwan_ntu_dsi,
read_taiwan_ntu_dsi,
fetch_sherbrooke_3shell,
read_sherbrooke_3shell,
fetch_isbi2013_2shell,
read_isbi2013_2shell,
read_stanford_labels,
fetch_syn_data,
read_syn_data,
fetch_stanford_t1,
read_stanford_t1,
fetch_stanford_pve_maps,
read_stanford_pve_maps,
fetch_viz_icons,
read_viz_icons,
fetch_bundles_2_subjects,
read_bundles_2_subjects,
fetch_cenir_multib,
read_cenir_multib,
fetch_mni_template,
read_mni_template)
from ..utils.arrfuncs import as_native_array
from dipy.tracking.streamline import relist_streamlines
if sys.version_info[0] < 3:
import cPickle
def loads_compat(bytes):
return cPickle.loads(bytes)
else: # Python 3
import pickle
# Need to load pickles saved in Python 2
def loads_compat(bytes):
return pickle.loads(bytes, encoding='latin1')
DATA_DIR = pjoin(dirname(__file__), 'files')
SPHERE_FILES = {
'symmetric362': pjoin(DATA_DIR, 'evenly_distributed_sphere_362.npz'),
'symmetric642': pjoin(DATA_DIR, 'evenly_distributed_sphere_642.npz'),
'symmetric724': pjoin(DATA_DIR, 'evenly_distributed_sphere_724.npz'),
'repulsion724': pjoin(DATA_DIR, 'repulsion724.npz'),
'repulsion100': pjoin(DATA_DIR, 'repulsion100.npz')
}
class DataError(Exception):
pass
def get_sim_voxels(name='fib1'):
""" provide some simulated voxel data
Parameters
------------
name : str, which file?
'fib0', 'fib1' or 'fib2'
Returns
---------
dix : dictionary, where dix['data'] returns a 2d array
where every row is a simulated voxel with different orientation
Examples
----------
>>> from dipy.data import get_sim_voxels
>>> sv=get_sim_voxels('fib1')
>>> sv['data'].shape == (100, 102)
True
>>> sv['fibres']
'1'
>>> sv['gradients'].shape == (102, 3)
True
>>> sv['bvals'].shape == (102,)
True
>>> sv['snr']
'60'
>>> sv2=get_sim_voxels('fib2')
>>> sv2['fibres']
'2'
>>> sv2['snr']
'80'
Notes
-------
These sim voxels were provided by M.M. Correia using Rician noise.
"""
if name == 'fib0':
fname = pjoin(DATA_DIR, 'fib0.pkl.gz')
if name == 'fib1':
fname = pjoin(DATA_DIR, 'fib1.pkl.gz')
if name == 'fib2':
fname = pjoin(DATA_DIR, 'fib2.pkl.gz')
return loads_compat(gzip.open(fname, 'rb').read())
def get_skeleton(name='C1'):
""" provide skeletons generated from Local Skeleton Clustering (LSC)
Parameters
-----------
name : str, 'C1' or 'C3'
Returns
-------
dix : dictionary
Examples
---------
>>> from dipy.data import get_skeleton
>>> C=get_skeleton('C1')
>>> len(C.keys())
117
>>> for c in C: break
>>> sorted(C[c].keys())
['N', 'hidden', 'indices', 'most']
"""
if name == 'C1':
fname = pjoin(DATA_DIR, 'C1.pkl.gz')
if name == 'C3':
fname = pjoin(DATA_DIR, 'C3.pkl.gz')
return loads_compat(gzip.open(fname, 'rb').read())
def get_sphere(name='symmetric362'):
''' provide triangulated spheres
Parameters
------------
name : str
which sphere - one of:
* 'symmetric362'
* 'symmetric642'
* 'symmetric724'
* 'repulsion724'
* 'repulsion100'
Returns
-------
sphere : a dipy.core.sphere.Sphere class instance
Examples
--------
>>> import numpy as np
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric362')
>>> verts, faces = sphere.vertices, sphere.faces
>>> verts.shape == (362, 3)
True
>>> faces.shape == (720, 3)
True
>>> verts, faces = get_sphere('not a sphere name') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
DataError: No sphere called "not a sphere name"
'''
fname = SPHERE_FILES.get(name)
if fname is None:
raise DataError('No sphere called "%s"' % name)
res = np.load(fname)
# Set to native byte order to avoid errors in compiled routines for
# big-endian platforms, when using these spheres.
return Sphere(xyz=as_native_array(res['vertices']),
faces=as_native_array(res['faces']))
default_sphere = HemiSphere.from_sphere(get_sphere('symmetric724'))
small_sphere = HemiSphere.from_sphere(get_sphere('symmetric362'))
def get_data(name='small_64D'):
""" provides filenames of some test datasets or other useful parametrisations
Parameters
----------
name : str
the filename/s of which dataset to return, one of:
'small_64D' small region of interest nifti,bvecs,bvals 64 directions
'small_101D' small region of interest nifti,bvecs,bvals 101 directions
'aniso_vox' volume with anisotropic voxel size as Nifti
'fornix' 300 tracks in Trackvis format (from Pittsburgh
Brain Competition)
'gqi_vectors' the scanner wave vectors needed for a GQI acquisitions
of 101 directions tested on Siemens 3T Trio
'small_25' small ROI (10x8x2) DTI data (b value 2000, 25 directions)
'test_piesno' slice of N=8, K=14 diffusion data
'reg_c' small 2D image used for validating registration
'reg_o' small 2D image used for validation registration
'cb_2' two vectorized cingulum bundles
Returns
-------
fnames : tuple
filenames for dataset
Examples
----------
>>> import numpy as np
>>> from dipy.data import get_data
>>> fimg,fbvals,fbvecs=get_data('small_101D')
>>> bvals=np.loadtxt(fbvals)
>>> bvecs=np.loadtxt(fbvecs).T
>>> import nibabel as nib
>>> img=nib.load(fimg)
>>> data=img.get_data()
>>> data.shape == (6, 10, 10, 102)
True
>>> bvals.shape == (102,)
True
>>> bvecs.shape == (102, 3)
True
"""
if name == 'small_64D':
fbvals = pjoin(DATA_DIR, 'small_64D.bvals.npy')
fbvecs = pjoin(DATA_DIR, 'small_64D.gradients.npy')
fimg = pjoin(DATA_DIR, 'small_64D.nii')
return fimg, fbvals, fbvecs
if name == '55dir_grad.bvec':
return pjoin(DATA_DIR, '55dir_grad.bvec')
if name == 'small_101D':
fbvals = pjoin(DATA_DIR, 'small_101D.bval')
fbvecs = pjoin(DATA_DIR, 'small_101D.bvec')
fimg = pjoin(DATA_DIR, 'small_101D.nii.gz')
return fimg, fbvals, fbvecs
if name == 'aniso_vox':
return pjoin(DATA_DIR, 'aniso_vox.nii.gz')
if name == 'fornix':
return pjoin(DATA_DIR, 'tracks300.trk')
if name == 'gqi_vectors':
return pjoin(DATA_DIR, 'ScannerVectors_GQI101.txt')
if name == 'dsi515btable':
return pjoin(DATA_DIR, 'dsi515_b_table.txt')
if name == 'dsi4169btable':
return pjoin(DATA_DIR, 'dsi4169_b_table.txt')
if name == 'grad514':
return pjoin(DATA_DIR, 'grad_514.txt')
if name == "small_25":
fbvals = pjoin(DATA_DIR, 'small_25.bval')
fbvecs = pjoin(DATA_DIR, 'small_25.bvec')
fimg = pjoin(DATA_DIR, 'small_25.nii.gz')
return fimg, fbvals, fbvecs
if name == "S0_10":
fimg = pjoin(DATA_DIR, 'S0_10slices.nii.gz')
return fimg
if name == "test_piesno":
fimg = pjoin(DATA_DIR, 'test_piesno.nii.gz')
return fimg
if name == "reg_c":
return pjoin(DATA_DIR, 'C.npy')
if name == "reg_o":
return pjoin(DATA_DIR, 'circle.npy')
if name == 'cb_2':
return pjoin(DATA_DIR, 'cb_2.npz')
if name == "t1_coronal_slice":
return pjoin(DATA_DIR, 't1_coronal_slice.npy')
def _gradient_from_file(filename):
"""Reads a gradient file saved as a text file compatible with np.loadtxt
and saved in the dipy data directory"""
def gtab_getter():
gradfile = pjoin(DATA_DIR, filename)
grad = np.loadtxt(gradfile, delimiter=',')
gtab = GradientTable(grad)
return gtab
return gtab_getter
get_3shell_gtab = _gradient_from_file("gtab_3shell.txt")
get_isbi2013_2shell_gtab = _gradient_from_file("gtab_isbi2013_2shell.txt")
get_gtab_taiwan_dsi = _gradient_from_file("gtab_taiwan_dsi.txt")
def dsi_voxels():
fimg, fbvals, fbvecs = get_data('small_101D')
bvals = np.loadtxt(fbvals)
bvecs = np.loadtxt(fbvecs).T
img = load(fimg)
data = img.get_data()
gtab = gradient_table(bvals, bvecs)
return data, gtab
def dsi_deconv_voxels():
gtab = gradient_table(np.loadtxt(get_data('dsi515btable')))
data = np.zeros((2, 2, 2, 515))
for ix in range(2):
for iy in range(2):
for iz in range(2):
data[ix, iy, iz], dirs = SticksAndBall(gtab,
d=0.0015,
S0=100,
angles=[(0, 0),
(90, 0)],
fractions=[50, 50],
snr=None)
return data, gtab
def mrtrix_spherical_functions():
"""Spherical functions represented by spherical harmonic coefficients and
evaluated on a discrete sphere.
Returns
-------
func_coef : array (2, 3, 4, 45)
Functions represented by the coefficients associated with the
mxtrix spherical harmonic basis of order 8.
func_discrete : array (2, 3, 4, 81)
Functions evaluated on `sphere`.
sphere : Sphere
The discrete sphere, points on the surface of a unit sphere, used to
evaluate the functions.
Notes
-----
These coefficients were obtained by using the dwi2SH command of mrtrix.
"""
func_discrete = load(pjoin(DATA_DIR, "func_discrete.nii.gz")).get_data()
func_coef = load(pjoin(DATA_DIR, "func_coef.nii.gz")).get_data()
gradients = np.loadtxt(pjoin(DATA_DIR, "sphere_grad.txt"))
# gradients[0] and the first volume of func_discrete,
# func_discrete[..., 0], are associated with the b=0 signal.
# gradients[:, 3] are the b-values for each gradient/volume.
sphere = Sphere(xyz=gradients[1:, :3])
return func_coef, func_discrete[..., 1:], sphere
dipy_cmaps = None
def get_cmap(name):
"""Makes a callable, similar to maptlotlib.pyplot.get_cmap"""
global dipy_cmaps
if dipy_cmaps is None:
filename = pjoin(DATA_DIR, "dipy_colormaps.json")
with open(filename) as f:
dipy_cmaps = json.load(f)
desc = dipy_cmaps.get(name)
if desc is None:
return None
def simple_cmap(v):
"""Emulates matplotlib colormap callable"""
rgba = np.ones((len(v), 4))
for i, color in enumerate(('red', 'green', 'blue')):
x, y0, y1 = zip(*desc[color])
# Matplotlib allows more complex colormaps, but for users who do
# not have Matplotlib dipy makes a few simple colormaps available.
# These colormaps are simple because y0 == y1, and therefor we
# ignore y1 here.
rgba[:, i] = np.interp(v, x, y0)
return rgba
return simple_cmap
def two_cingulum_bundles():
fname = get_data('cb_2')
res = np.load(fname)
cb1 = relist_streamlines(res['points'], res['offsets'])
cb2 = relist_streamlines(res['points2'], res['offsets2'])
return cb1, cb2
def matlab_life_results():
matlab_rmse = np.load(pjoin(DATA_DIR, 'life_matlab_rmse.npy'))
matlab_weights = np.load(pjoin(DATA_DIR, 'life_matlab_weights.npy'))
return matlab_rmse, matlab_weights
|
bsd-3-clause
|
cjayb/mne-python
|
mne/externals/tqdm/_tqdm/gui.py
|
14
|
11601
|
"""
GUI progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm.gui import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import division, absolute_import
# import compatibility functions and utilities
from .utils import _range
# to inherit from the tqdm class
from .std import tqdm as std_tqdm
from .std import TqdmExperimentalWarning
from warnings import warn
__author__ = {"github.com/": ["casperdcl", "lrq3000"]}
__all__ = ['tqdm_gui', 'tgrange', 'tqdm', 'trange']
class tqdm_gui(std_tqdm): # pragma: no cover
"""
Experimental GUI version of tqdm!
"""
# TODO: @classmethod: write() on GUI?
def __init__(self, *args, **kwargs):
import matplotlib as mpl
import matplotlib.pyplot as plt
from collections import deque
kwargs['gui'] = True
super(tqdm_gui, self).__init__(*args, **kwargs)
# Initialize the GUI display
if self.disable or not kwargs['gui']:
return
warn('GUI is experimental/alpha', TqdmExperimentalWarning, stacklevel=2)
self.mpl = mpl
self.plt = plt
self.sp = None
# Remember if external environment uses toolbars
self.toolbar = self.mpl.rcParams['toolbar']
self.mpl.rcParams['toolbar'] = 'None'
self.mininterval = max(self.mininterval, 0.5)
self.fig, ax = plt.subplots(figsize=(9, 2.2))
# self.fig.subplots_adjust(bottom=0.2)
total = len(self)
if total is not None:
self.xdata = []
self.ydata = []
self.zdata = []
else:
self.xdata = deque([])
self.ydata = deque([])
self.zdata = deque([])
self.line1, = ax.plot(self.xdata, self.ydata, color='b')
self.line2, = ax.plot(self.xdata, self.zdata, color='k')
ax.set_ylim(0, 0.001)
if total is not None:
ax.set_xlim(0, 100)
ax.set_xlabel('percent')
self.fig.legend((self.line1, self.line2), ('cur', 'est'),
loc='center right')
# progressbar
self.hspan = plt.axhspan(0, 0.001,
xmin=0, xmax=0, color='g')
else:
# ax.set_xlim(-60, 0)
ax.set_xlim(0, 60)
ax.invert_xaxis()
ax.set_xlabel('seconds')
ax.legend(('cur', 'est'), loc='lower left')
ax.grid()
# ax.set_xlabel('seconds')
ax.set_ylabel((self.unit if self.unit else 'it') + '/s')
if self.unit_scale:
plt.ticklabel_format(style='sci', axis='y',
scilimits=(0, 0))
ax.yaxis.get_offset_text().set_x(-0.15)
# Remember if external environment is interactive
self.wasion = plt.isinteractive()
plt.ion()
self.ax = ax
def __iter__(self):
# TODO: somehow allow the following:
# if not self.gui:
# return super(tqdm_gui, self).__iter__()
iterable = self.iterable
if self.disable:
for obj in iterable:
yield obj
return
# ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
# dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
time = self._time
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = time() - last_print_t
if delta_t >= mininterval:
cur_t = time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
self.display()
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
# if not self.gui:
# return super(tqdm_gui, self).close()
if self.disable:
return
if n < 0:
self.last_print_n += n # for auto-refresh logic to work
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
self.display()
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
# if not self.gui:
# return super(tqdm_gui, self).close()
if self.disable:
return
self.disable = True
with self.get_lock():
self._instances.remove(self)
# Restore toolbars
self.mpl.rcParams['toolbar'] = self.toolbar
# Return to non-interactive mode
if not self.wasion:
self.plt.ioff()
if not self.leave:
self.plt.close(self.fig)
def display(self):
n = self.n
cur_t = self._time()
elapsed = cur_t - self.start_t
delta_it = n - self.last_print_n
delta_t = cur_t - self.last_print_t
# Inline due to multiple calls
total = self.total
xdata = self.xdata
ydata = self.ydata
zdata = self.zdata
ax = self.ax
line1 = self.line1
line2 = self.line2
# instantaneous rate
y = delta_it / delta_t
# overall rate
z = n / elapsed
# update line data
xdata.append(n * 100.0 / total if total else cur_t)
ydata.append(y)
zdata.append(z)
# Discard old values
# xmin, xmax = ax.get_xlim()
# if (not total) and elapsed > xmin * 1.1:
if (not total) and elapsed > 66:
xdata.popleft()
ydata.popleft()
zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
line1.set_data(xdata, ydata)
line2.set_data(xdata, zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = self.plt.axhspan(
0, 0.001, xmin=0, xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in xdata]
line1.set_data(t_ago, ydata)
line2.set_data(t_ago, zdata)
ax.set_title(self.format_meter(
n, total, elapsed, 0,
self.desc, self.ascii, self.unit, self.unit_scale,
1 / self.avg_time if self.avg_time else None, self.bar_format,
self.postfix, self.unit_divisor),
fontname="DejaVu Sans Mono", fontsize=11)
self.plt.pause(1e-9)
def tgrange(*args, **kwargs):
"""
A shortcut for `tqdm.gui.tqdm(xrange(*args), **kwargs)`.
On Python3+, `range` is used instead of `xrange`.
"""
return tqdm_gui(_range(*args), **kwargs)
# Aliases
tqdm = tqdm_gui
trange = tgrange
|
bsd-3-clause
|
CGATOxford/CGATPipelines
|
obsolete/reports/pipeline_capseq/trackers/macs_replicated_intervals.py
|
1
|
7054
|
import os
import sys
import re
import types
import itertools
import matplotlib.pyplot as plt
import numpy
import scipy.stats
import numpy.ma
import Stats
import Histogram
from CGATReport.Tracker import *
from cpgReport import *
##########################################################################
class replicatedIntervalSummary(cpgTracker):
"""Summary stats of intervals called by the peak finder. """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getRow(
"SELECT COUNT(*) as Intervals, round(AVG(length),0) as Mean_length, round(AVG(nprobes),0) as Mean_reads FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalLengths(cpgTracker):
"""Distribution of interval length. """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT length FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalPeakValues(cpgTracker):
"""Distribution of maximum interval coverage (the number of reads at peak). """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT peakval FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalAverageValues(cpgTracker):
"""Distribution of average coverage (the average number of reads within the interval) """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT avgval FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalFoldChange(cpgTracker):
"""return fold changes for all intervals. """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT fold FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
##########################################################################
##########################################################################
class replicatedIntervalPeakLocation(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT (PeakCenter - start) / CAST( Length as FLOAT) - 0.5 FROM %(track)s_replicated_intervals" % locals())
data2 = self.getValues(
"SELECT (end - PeakCenter) / CAST( Length as FLOAT) - 0.5 FROM %(track)s_replicated_intervals" % locals())
return {"distance": data1 + data2}
##########################################################################
class replicatedIntervalPeakDistance(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT PeakCenter - start FROM %(track)s_replicated_intervals" % locals())
data2 = self.getValues(
"SELECT end - PeakCenter FROM %(track)s_replicated_intervals" % locals())
return {"distance": data1 + data2}
##########################################################################
##########################################################################
##########################################################################
class replicatedIntervalCpGDensity(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
##########################################################################
class replicatedIntervalCpGObsExp1(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
##########################################################################
class replicatedIntervalCpGObsExp2(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
##########################################################################
class replicatedIntervalGCContent(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
|
mit
|
rchatterjee/word2keypress
|
src/word2keypress/typos.py
|
1
|
7083
|
from __future__ import print_function
import os, sys, re, json
from collections import defaultdict
import numpy as np
import pandas as pd
try:
from word2keypress.weight_matrix import WEIGHT_MATRIX
from word2keypress.weighted_edist import (
STARTSTR, ENDSTR, KB, BLANK, SHIFT_KEY, CAPS_KEY, all_edits, _editdist)
except ImportError:
from weight_matrix import WEIGHT_MATRIX
from weighted_edist import (
STARTSTR, ENDSTR, KB, BLANK, SHIFT_KEY, CAPS_KEY, all_edits, _editdist
)
EDIT_DIST_CUTOFF = 1
WEIGHT_MATRIX = [
(e,w) for e, w in WEIGHT_MATRIX
if _editdist(e[0], e[1], limit=EDIT_DIST_CUTOFF)[1]
]
# giant_regex = re.complie('|'.join(
# re.escape(l) for ((l,r),w) in WEIGHT_MATRIX))
def allowed_edits(pw_key_str):
"""
Returns all the edits that are allowed for @pw.
An edit=(l -> r) is allowed if @l is in @pw
returns the filtered WEIGHT_MATRIX
"""
if not pw_key_str.startswith(STARTSTR):
pw_key_str = STARTSTR + pw_key_str + ENDSTR
# print(pw_key_str)
return sorted(
[((l,r), w) for ((l,r),w) in WEIGHT_MATRIX
if l.replace(BLANK, '') in pw_key_str],
key=lambda x: x[1], reverse=True
)
def edit_locs(pw_key_str, l):
matched_indexes = [
(m.start(), m.end())
for m in re.finditer('({})'.format(re.escape(l.replace(BLANK, ''))),
pw_key_str)
if m.start()<len(pw_key_str) and m.end()>0
]
return matched_indexes
def apply_edit(pw_key_str, e):
"""
Applies edits on the pw_key_str, whereever the edit e is possible
If there are multiple location, then apply only at one location.
"""
l, r = e
matched_indexes = edit_locs(pw_key_str, l)
assert matched_indexes, "Wow!! matched index is empty for pw={}, e={}"\
.format(pw, e)
# Choose one index at random from the possible options
# i = np.random.randint(0, len(matched_indexes))
# pos_s, pos_e = matched_indexes[i]
# if BLANK in l:
# typo_key_str = _insert_edit(pw_key_str, l, r, pos_s, pos_e)
# else:
for m in matched_indexes:
pos_s, pos_e = m
typo_key_str = pw_key_str[:pos_s] + r + pw_key_str[pos_e:]
yield typo_key_str.replace(BLANK, ''), 1.0/len(matched_indexes)
def num_typos(n, ed):
# type: (int, int) -> int
assert ed>=0, "edit distance should be no less than 0. Got = {}".format(ed)
t = (2*96)**ed
a = n+1
for i in range(2, ed+1):
a *= (n+i)
return a*t
def get_prob(rpw, tpw):
"""
Probability that rpw is mistyped to tpw,
get all the edit locations. sum their probabiliries
"""
edits = set(all_edits(rpw, tpw, N=1, edit_cutoff=1))
pw_key_str = STARTSTR + KB.word_to_keyseq(rpw) + ENDSTR
E = allowed_edits(pw_key_str)
s = float(sum(x[1] for x in E))
if(s==0): return 0.0
# print("s = {} (len(E)={})".format(s, len(E)))
# print(edits)
total_ed1_typos_estimate = 2*96*(len(rpw) + 1)
f = 1.0/num_typos(len(rpw), 1 if edits else 2)
for e, w in E:
if e not in edits: continue
for typo_key_str, w_frac in apply_edit(pw_key_str, e):
typo_key_str = typo_key_str.strip(STARTSTR).strip(ENDSTR)
typo = KB.keyseq_to_word(typo_key_str)
if typo == tpw:
f += w*w_frac
return f/s
def get_topk_typos(pw, k=10):
"""
Returns top k typos of the word pw
"""
pw_key_str = STARTSTR + KB.word_to_keyseq(pw) + ENDSTR
E = sorted(allowed_edits(pw_key_str), key=lambda x: x[1]*len(x[0][0]),
reverse=True)
tt = defaultdict(float)
s = float(sum(x[1] for x in E))
# print("s = {} (len(E)={})".format(s, len(E)))
i = 0
debug_pw = {pw.swapcase()}
while len(tt)<k*len(pw)*10 and i <len(E):
e, w = E[i]
for typo_key_str, w_frac in apply_edit(pw_key_str, e):
typo_key_str = typo_key_str.strip(STARTSTR).strip(ENDSTR)
typo = KB.keyseq_to_word(typo_key_str)
tt[typo] += w * w_frac/s
# if typo in debug_pw:
# print("{!r} ->> {} ({}, {})".format(typo_key_str, e, w, w*w_frac/s))
i += 1
return sorted(tt.items(), key=lambda x: x[1], reverse=True)[:k]
def read_typos(f_name):
d = pd.read_csv(f_name, skipinitialspace=False)\
.astype(str)
d_ts = d[d.rpw != d.tpw].sample(int(0.03*len(d.index)), random_state=435)
return d_ts
def test_model_rank(train_f):
from pyxdameraulevenshtein import damerau_levenshtein_distance as dldist
d_ts = read_typos(train_f)
a = np.array([get_prob(rpw, tpw)
for rpw, tpw in zip(d_ts.rpw, d_ts.tpw)
if dldist(rpw.lower(), tpw.lower())<=1])
a = a[a>0]
rank = []
for rpw, tpw in zip(d_ts.rpw, d_ts.tpw):
if dldist(rpw.lower(), tpw.lower())>1: continue
k = 20
typos = [tp for tp, w in get_topk_typos(rpw, k)]
if tpw in typos:
rank.append(typos.index(tpw)+1)
else:
rank.append(k)
print("Avg_rank: ", sum(rank)/float(len(rank)*k))
print(d_ts.shape, a.shape, a.mean(), a.std())
return a
def test_model_likelihood(train_f):
from pyxdameraulevenshtein import damerau_levenshtein_distance as dldist
d_ts = read_typos(train_f)
ed = d_ts.apply(lambda r: dldist(r.rpw, r.tpw), axis=1)
probs = d_ts[ed<=1].apply(lambda r: get_prob(r.rpw, r.tpw), axis=1)
likelihood = np.log(probs[probs>0]).mean()
return likelihood
if __name__ == '__main__':
USAGE = """Usage:
$ {} [options] [arguments]
-allowed-edits <rpw> : returns the allowed edits of rpw
-sample <password>: samples typos for the password from the model
-prob <rpw> <tpw>: probability of rpw -> tpw
-topktypos <rpw> [<n>] : returns n (default 10) typos of rpw
-test <typo-fname> : Tests the efficacy of the model, ~/pwtypos-code/typodata/typos.csv
-keypress <rpw> : Return the keypress representation of
""".format(sys.argv[0])
if len(sys.argv)<=1:
print(USAGE)
exit(1)
if sys.argv[1] == '-allowed-edits':
pw = KB.word_to_keyseq(sys.argv[2])
(l,r),w = WEIGHT_MATRIX[0]
assert l.replace(BLANK, '') in pw, "{!r}, {} {}"\
.format(l.replace(BLANK, ''), pw, w)
print(allowed_edits(pw))
elif sys.argv[1] == '-sample':
pw = sys.argv[2]
print("{} --> {}".format(pw, len(set((sample_typos(pw, 100))))))
elif sys.argv[1] == '-topktypos':
pw = sys.argv[2]
n = int(sys.argv[3]) if len(sys.argv)>3 else 10
typos = get_topk_typos(pw, n)
print('\n'.join("{}: {:.5f}".format(x,y) for x, y in typos))
print("{} --> {}".format(pw, len(typos)))
elif sys.argv[1] == '-prob':
print(get_prob(sys.argv[2], sys.argv[3]))
elif sys.argv[1] == '-test':
# test_model_rank(sys.argv[2])
print("Log-Likelihood: ", test_model_likelihood(sys.argv[2]))
elif sys.argv[1] == '-keypress':
print(repr(KB.word_to_keyseq(sys.argv[2])))
else:
print(USAGE)
|
mit
|
AtsushiSakai/PythonRobotics
|
PathPlanning/BugPlanning/bug.py
|
1
|
12636
|
"""
Bug Planning
author: Sarim Mehdi([email protected])
Source: https://sites.google.com/site/ece452bugalgorithms/
"""
import numpy as np
import matplotlib.pyplot as plt
show_animation = True
class BugPlanner:
def __init__(self, start_x, start_y, goal_x, goal_y, obs_x, obs_y):
self.goal_x = goal_x
self.goal_y = goal_y
self.obs_x = obs_x
self.obs_y = obs_y
self.r_x = [start_x]
self.r_y = [start_y]
self.out_x = []
self.out_y = []
for o_x, o_y in zip(obs_x, obs_y):
for add_x, add_y in zip([1, 0, -1, -1, -1, 0, 1, 1],
[1, 1, 1, 0, -1, -1, -1, 0]):
cand_x, cand_y = o_x+add_x, o_y+add_y
valid_point = True
for _x, _y in zip(obs_x, obs_y):
if cand_x == _x and cand_y == _y:
valid_point = False
break
if valid_point:
self.out_x.append(cand_x), self.out_y.append(cand_y)
def mov_normal(self):
return self.r_x[-1] + np.sign(self.goal_x - self.r_x[-1]), \
self.r_y[-1] + np.sign(self.goal_y - self.r_y[-1])
def mov_to_next_obs(self, visited_x, visited_y):
for add_x, add_y in zip([1, 0, -1, 0], [0, 1, 0, -1]):
c_x, c_y = self.r_x[-1] + add_x, self.r_y[-1] + add_y
for _x, _y in zip(self.out_x, self.out_y):
use_pt = True
if c_x == _x and c_y == _y:
for v_x, v_y in zip(visited_x, visited_y):
if c_x == v_x and c_y == v_y:
use_pt = False
break
if use_pt:
return c_x, c_y, False
if not use_pt:
break
return self.r_x[-1], self.r_y[-1], True
def bug0(self):
"""
Greedy algorithm where you move towards goal
until you hit an obstacle. Then you go around it
(pick an arbitrary direction), until it is possible
for you to start moving towards goal in a greedy manner again
"""
mov_dir = 'normal'
cand_x, cand_y = -np.inf, -np.inf
if show_animation:
plt.plot(self.obs_x, self.obs_y, ".k")
plt.plot(self.r_x[-1], self.r_y[-1], "og")
plt.plot(self.goal_x, self.goal_y, "xb")
plt.plot(self.out_x, self.out_y, ".")
plt.grid(True)
plt.title('BUG 0')
for x_ob, y_ob in zip(self.out_x, self.out_y):
if self.r_x[-1] == x_ob and self.r_y[-1] == y_ob:
mov_dir = 'obs'
break
visited_x, visited_y = [], []
while True:
if self.r_x[-1] == self.goal_x and \
self.r_y[-1] == self.goal_y:
break
if mov_dir == 'normal':
cand_x, cand_y = self.mov_normal()
if mov_dir == 'obs':
cand_x, cand_y, _ = self.mov_to_next_obs(visited_x, visited_y)
if mov_dir == 'normal':
found_boundary = False
for x_ob, y_ob in zip(self.out_x, self.out_y):
if cand_x == x_ob and cand_y == y_ob:
self.r_x.append(cand_x), self.r_y.append(cand_y)
visited_x[:], visited_y[:] = [], []
visited_x.append(cand_x), visited_y.append(cand_y)
mov_dir = 'obs'
found_boundary = True
break
if not found_boundary:
self.r_x.append(cand_x), self.r_y.append(cand_y)
elif mov_dir == 'obs':
can_go_normal = True
for x_ob, y_ob in zip(self.obs_x, self.obs_y):
if self.mov_normal()[0] == x_ob and \
self.mov_normal()[1] == y_ob:
can_go_normal = False
break
if can_go_normal:
mov_dir = 'normal'
else:
self.r_x.append(cand_x), self.r_y.append(cand_y)
visited_x.append(cand_x), visited_y.append(cand_y)
if show_animation:
plt.plot(self.r_x, self.r_y, "-r")
plt.pause(0.001)
if show_animation:
plt.show()
def bug1(self):
"""
Move towards goal in a greedy manner.
When you hit an obstacle, you go around it and
back to where you hit the obstacle initially.
Then, you go to the point on the obstacle that is
closest to your goal and you start moving towards
goal in a greedy manner from that new point.
"""
mov_dir = 'normal'
cand_x, cand_y = -np.inf, -np.inf
exit_x, exit_y = -np.inf, -np.inf
dist = np.inf
back_to_start = False
second_round = False
if show_animation:
plt.plot(self.obs_x, self.obs_y, ".k")
plt.plot(self.r_x[-1], self.r_y[-1], "og")
plt.plot(self.goal_x, self.goal_y, "xb")
plt.plot(self.out_x, self.out_y, ".")
plt.grid(True)
plt.title('BUG 1')
for xob, yob in zip(self.out_x, self.out_y):
if self.r_x[-1] == xob and self.r_y[-1] == yob:
mov_dir = 'obs'
break
visited_x, visited_y = [], []
while True:
if self.r_x[-1] == self.goal_x and \
self.r_y[-1] == self.goal_y:
break
if mov_dir == 'normal':
cand_x, cand_y = self.mov_normal()
if mov_dir == 'obs':
cand_x, cand_y, back_to_start = \
self.mov_to_next_obs(visited_x, visited_y)
if mov_dir == 'normal':
found_boundary = False
for x_ob, y_ob in zip(self.out_x, self.out_y):
if cand_x == x_ob and cand_y == y_ob:
self.r_x.append(cand_x), self.r_y.append(cand_y)
visited_x[:], visited_y[:] = [], []
visited_x.append(cand_x), visited_y.append(cand_y)
mov_dir = 'obs'
dist = np.inf
back_to_start = False
second_round = False
found_boundary = True
break
if not found_boundary:
self.r_x.append(cand_x), self.r_y.append(cand_y)
elif mov_dir == 'obs':
d = np.linalg.norm(np.array([cand_x, cand_y] -
np.array([self.goal_x,
self.goal_y])))
if d < dist and not second_round:
exit_x, exit_y = cand_x, cand_y
dist = d
if back_to_start and not second_round:
second_round = True
del self.r_x[-len(visited_x):]
del self.r_y[-len(visited_y):]
visited_x[:], visited_y[:] = [], []
self.r_x.append(cand_x), self.r_y.append(cand_y)
visited_x.append(cand_x), visited_y.append(cand_y)
if cand_x == exit_x and \
cand_y == exit_y and \
second_round:
mov_dir = 'normal'
if show_animation:
plt.plot(self.r_x, self.r_y, "-r")
plt.pause(0.001)
if show_animation:
plt.show()
def bug2(self):
"""
Move towards goal in a greedy manner.
When you hit an obstacle, you go around it and
keep track of your distance from the goal.
If the distance from your goal was decreasing before
and now it starts increasing, that means the current
point is probably the closest point to the
goal (this may or may not be true because the algorithm
doesn't explore the entire boundary around the obstacle).
So, you depart from this point and continue towards the
goal in a greedy manner
"""
mov_dir = 'normal'
cand_x, cand_y = -np.inf, -np.inf
if show_animation:
plt.plot(self.obs_x, self.obs_y, ".k")
plt.plot(self.r_x[-1], self.r_y[-1], "og")
plt.plot(self.goal_x, self.goal_y, "xb")
plt.plot(self.out_x, self.out_y, ".")
straight_x, straight_y = [self.r_x[-1]], [self.r_y[-1]]
hit_x, hit_y = [], []
while True:
if straight_x[-1] == self.goal_x and \
straight_y[-1] == self.goal_y:
break
c_x = straight_x[-1] + np.sign(self.goal_x - straight_x[-1])
c_y = straight_y[-1] + np.sign(self.goal_y - straight_y[-1])
for x_ob, y_ob in zip(self.out_x, self.out_y):
if c_x == x_ob and c_y == y_ob:
hit_x.append(c_x), hit_y.append(c_y)
break
straight_x.append(c_x), straight_y.append(c_y)
if show_animation:
plt.plot(straight_x, straight_y, ",")
plt.plot(hit_x, hit_y, "d")
plt.grid(True)
plt.title('BUG 2')
for x_ob, y_ob in zip(self.out_x, self.out_y):
if self.r_x[-1] == x_ob and self.r_y[-1] == y_ob:
mov_dir = 'obs'
break
visited_x, visited_y = [], []
while True:
if self.r_x[-1] == self.goal_x \
and self.r_y[-1] == self.goal_y:
break
if mov_dir == 'normal':
cand_x, cand_y = self.mov_normal()
if mov_dir == 'obs':
cand_x, cand_y, _ = self.mov_to_next_obs(visited_x, visited_y)
if mov_dir == 'normal':
found_boundary = False
for x_ob, y_ob in zip(self.out_x, self.out_y):
if cand_x == x_ob and cand_y == y_ob:
self.r_x.append(cand_x), self.r_y.append(cand_y)
visited_x[:], visited_y[:] = [], []
visited_x.append(cand_x), visited_y.append(cand_y)
del hit_x[0]
del hit_y[0]
mov_dir = 'obs'
found_boundary = True
break
if not found_boundary:
self.r_x.append(cand_x), self.r_y.append(cand_y)
elif mov_dir == 'obs':
self.r_x.append(cand_x), self.r_y.append(cand_y)
visited_x.append(cand_x), visited_y.append(cand_y)
for i_x, i_y in zip(range(len(hit_x)), range(len(hit_y))):
if cand_x == hit_x[i_x] and cand_y == hit_y[i_y]:
del hit_x[i_x]
del hit_y[i_y]
mov_dir = 'normal'
break
if show_animation:
plt.plot(self.r_x, self.r_y, "-r")
plt.pause(0.001)
if show_animation:
plt.show()
def main(bug_0, bug_1, bug_2):
# set obstacle positions
o_x, o_y = [], []
s_x = 0.0
s_y = 0.0
g_x = 167.0
g_y = 50.0
for i in range(20, 40):
for j in range(20, 40):
o_x.append(i)
o_y.append(j)
for i in range(60, 100):
for j in range(40, 80):
o_x.append(i)
o_y.append(j)
for i in range(120, 140):
for j in range(80, 100):
o_x.append(i)
o_y.append(j)
for i in range(80, 140):
for j in range(0, 20):
o_x.append(i)
o_y.append(j)
for i in range(0, 20):
for j in range(60, 100):
o_x.append(i)
o_y.append(j)
for i in range(20, 40):
for j in range(80, 100):
o_x.append(i)
o_y.append(j)
for i in range(120, 160):
for j in range(40, 60):
o_x.append(i)
o_y.append(j)
if bug_0:
my_Bug = BugPlanner(s_x, s_y, g_x, g_y, o_x, o_y)
my_Bug.bug0()
if bug_1:
my_Bug = BugPlanner(s_x, s_y, g_x, g_y, o_x, o_y)
my_Bug.bug1()
if bug_2:
my_Bug = BugPlanner(s_x, s_y, g_x, g_y, o_x, o_y)
my_Bug.bug2()
if __name__ == '__main__':
main(bug_0=True, bug_1=False, bug_2=False)
|
mit
|
arkadoel/AprendiendoPython
|
Udacity_tutorial/p_pandas.py
|
1
|
1925
|
import pandas as pd
'''
The following code is to help you play with the concept of Series in Pandas.
You can think of Series as an one-dimensional object that is similar to
an array, list, or column in a database. By default, it will assign an
index label to each item in the Series ranging from 0 to N, where N is
the number of items in the Series minus one.
Please feel free to play around with the concept of Series and see what it does
*This playground is inspired by Greg Reda's post on Intro to Pandas Data Structures:
http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/
'''
# Change False to True to create a Series object
if False:
series = pd.Series(['Dave', 'Cheng-Han', 'Udacity', 42, -1789710578])
print series
'''
You can also manually assign indices to the items in the Series when
creating the series
'''
# Change False to True to see custom index in action
if True:
series = pd.Series(['Dave', 'Cheng-Han', 359, 9001],
index=['Instructor', 'Curriculum Manager',
'Course Number', 'Power Level'])
print series
'''
You can use index to select specific items from the Series
'''
# Change False to True to see Series indexing in action
if False:
series = pd.Series(['Dave', 'Cheng-Han', 359, 9001],
index=['Instructor', 'Curriculum Manager',
'Course Number', 'Power Level'])
print series['Instructor']
print ""
print series[['Instructor', 'Curriculum Manager', 'Course Number']]
'''
You can also use boolean operators to selector specific items from the Series
'''
# Change False to True to see boolean indexing in action
if False:
cuteness = pd.Series([1, 2, 3, 4, 5], index=['Cockroach', 'Fish', 'Mini Pig',
'Puppy', 'Kitten'])
print cuteness > 3
print ""
print cuteness[cuteness > 3]
|
gpl-3.0
|
MickyDowns/deep-theano-rnn-lstm-car
|
plotting.py
|
4
|
2469
|
"""
Take the data in the results folder and plot it so we can stop using stupid
Excel.
"""
import glob
import os
import csv
import matplotlib.pyplot as plt
import numpy as np
def movingaverage(y, window_size):
"""
Moving average function from:
http://stackoverflow.com/questions/11352047/finding-moving-average-from-data-points-in-python
"""
window = np.ones(int(window_size))/float(window_size)
return np.convolve(y, window, 'same')
def readable_output(filename):
readable = ''
# Example:
# learn_data-1000-1000-32-10000.csv
f_parts = filename.split('-')
if f_parts[0] == 'learn_data':
readable += 'distance: '
else:
readable += 'loss: '
readable += f_parts[1] + ', ' + f_parts[2] + ' | '
readable += f_parts[3] + ' | '
readable += f_parts[4].split('.')[0]
return readable
def plot_file(filename, type='loss'):
with open(f, 'r') as csvfile:
reader = csv.reader(csvfile)
# Turn our column into an array.
y = []
for row in reader:
if type == 'loss':
y.append(float(row[0]))
else:
y.append(float(row[1]))
# Running tests will be empty.
if len(y) == 0:
return
print(readable_output(f))
# Get the moving average so the graph isn't so crazy.
if type == 'loss':
window = 100
else:
window = 10
y_av = movingaverage(y, window)
# Use our moving average to get some metrics.
arr = np.array(y_av)
if type == 'loss':
print("%f\t%f\n" % (arr.min(), arr.mean()))
else:
print("%f\t%f\n" % (arr.max(), arr.mean()))
# Plot it.
plt.clf() # Clear.
plt.title(f)
# The -50 removes an artificial drop at the end caused by the moving
# average.
if type == 'loss':
plt.plot(y_av[:-50])
plt.ylabel('Smoothed Loss')
plt.ylim(0, 5000)
plt.xlim(0, 250000)
else:
plt.plot(y_av[:-5])
plt.ylabel('Smoothed Distance')
plt.ylim(0, 4000)
plt.savefig(f + '.png', bbox_inches='tight')
if __name__ == "__main__":
# Get our loss result files.
os.chdir("results/sonar-frames")
for f in glob.glob("learn*.csv"):
plot_file(f, 'learn')
for f in glob.glob("loss*.csv"):
plot_file(f, 'loss')
|
mit
|
perimosocordiae/scipy
|
scipy/optimize/_shgo_lib/triangulation.py
|
11
|
21463
|
import numpy as np
import copy
class Complex:
def __init__(self, dim, func, func_args=(), symmetry=False, bounds=None,
g_cons=None, g_args=()):
self.dim = dim
self.bounds = bounds
self.symmetry = symmetry # TODO: Define the functions to be used
# here in init to avoid if checks
self.gen = 0
self.perm_cycle = 0
# Every cell is stored in a list of its generation,
# e.g., the initial cell is stored in self.H[0]
# 1st get new cells are stored in self.H[1] etc.
# When a cell is subgenerated it is removed from this list
self.H = [] # Storage structure of cells
# Cache of all vertices
self.V = VertexCache(func, func_args, bounds, g_cons, g_args)
# Generate n-cube here:
self.n_cube(dim, symmetry=symmetry)
# TODO: Assign functions to a the complex instead
if symmetry:
self.generation_cycle = 1
# self.centroid = self.C0()[-1].x
# self.C0.centroid = self.centroid
else:
self.add_centroid()
self.H.append([])
self.H[0].append(self.C0)
self.hgr = self.C0.homology_group_rank()
self.hgrd = 0 # Complex group rank differential
# self.hgr = self.C0.hg_n
# Build initial graph
self.graph_map()
self.performance = []
self.performance.append(0)
self.performance.append(0)
def __call__(self):
return self.H
def n_cube(self, dim, symmetry=False, printout=False):
"""
Generate the simplicial triangulation of the N-D hypercube
containing 2**n vertices
"""
origin = list(np.zeros(dim, dtype=int))
self.origin = origin
supremum = list(np.ones(dim, dtype=int))
self.supremum = supremum
# tuple versions for indexing
origintuple = tuple(origin)
supremumtuple = tuple(supremum)
x_parents = [origintuple]
if symmetry:
self.C0 = Simplex(0, 0, 0, self.dim) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
i_s = 0
self.perm_symmetry(i_s, x_parents, origin)
self.C0.add_vertex(self.V[supremumtuple])
else:
self.C0 = Cell(0, 0, origin, supremum) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
self.C0.add_vertex(self.V[supremumtuple])
i_parents = []
self.perm(i_parents, x_parents, origin)
if printout:
print("Initial hyper cube:")
for v in self.C0():
v.print_out()
def perm(self, i_parents, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
# Construct required iterator
iter_range = [x for x in range(self.dim) if x not in i_parents]
for i in iter_range:
i2_parents = copy.copy(i_parents)
i2_parents.append(i)
xi2 = copy.copy(xi)
xi2[i] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbors and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
# Permutate
self.perm(i2_parents, x_parents2, xi2)
def perm_symmetry(self, i_s, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
xi2 = copy.copy(xi)
xi2[i_s] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbors and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
i_s += 1
if i_s == self.dim:
return
# Permutate
self.perm_symmetry(i_s, x_parents2, xi2)
def add_centroid(self):
"""Split the central edge between the origin and supremum of
a cell and add the new vertex to the complex"""
self.centroid = list(
(np.array(self.origin) + np.array(self.supremum)) / 2.0)
self.C0.add_vertex(self.V[tuple(self.centroid)])
self.C0.centroid = self.centroid
# Disconnect origin and supremum
self.V[tuple(self.origin)].disconnect(self.V[tuple(self.supremum)])
# Connect centroid to all other vertices
for v in self.C0():
self.V[tuple(self.centroid)].connect(self.V[tuple(v.x)])
self.centroid_added = True
return
# Construct incidence array:
def incidence(self):
if self.centroid_added:
self.structure = np.zeros([2 ** self.dim + 1, 2 ** self.dim + 1],
dtype=int)
else:
self.structure = np.zeros([2 ** self.dim, 2 ** self.dim],
dtype=int)
for v in self.HC.C0():
for v2 in v.nn:
self.structure[v.index, v2.index] = 1
return
# A more sparse incidence generator:
def graph_map(self):
""" Make a list of size 2**n + 1 where an entry is a vertex
incidence, each list element contains a list of indexes
corresponding to that entries neighbors"""
self.graph = [[v2.index for v2 in v.nn] for v in self.C0()]
# Graph structure method:
# 0. Capture the indices of the initial cell.
# 1. Generate new origin and supremum scalars based on current generation
# 2. Generate a new set of vertices corresponding to a new
# "origin" and "supremum"
# 3. Connected based on the indices of the previous graph structure
# 4. Disconnect the edges in the original cell
def sub_generate_cell(self, C_i, gen):
"""Subgenerate a cell `C_i` of generation `gen` and
homology group rank `hgr`."""
origin_new = tuple(C_i.centroid)
centroid_index = len(C_i()) - 1
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Generate subcubes using every extreme vertex in C_i as a supremum
# and the centroid of C_i as the origin
H_new = [] # list storing all the new cubes split from C_i
for i, v in enumerate(C_i()[:-1]):
supremum = tuple(v.x)
H_new.append(
self.construct_hypercube(origin_new, supremum, gen, C_i.hg_n))
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
if i == centroid_index: # Break out of centroid
break
for j in connections:
C_i()[i].disconnect(C_i()[j])
# Destroy the old cell
if C_i is not self.C0: # Garbage collector does this anyway; not needed
del C_i
# TODO: Recalculate all the homology group ranks of each cell
return H_new
def split_generation(self):
"""
Run sub_generate_cell for every cell in the current complex self.gen
"""
no_splits = False # USED IN SHGO
try:
for c in self.H[self.gen]:
if self.symmetry:
# self.sub_generate_cell_symmetry(c, self.gen + 1)
self.split_simplex_symmetry(c, self.gen + 1)
else:
self.sub_generate_cell(c, self.gen + 1)
except IndexError:
no_splits = True # USED IN SHGO
self.gen += 1
return no_splits # USED IN SHGO
def construct_hypercube(self, origin, supremum, gen, hgr,
printout=False):
"""
Build a hypercube with triangulations symmetric to C0.
Parameters
----------
origin : vec
supremum : vec (tuple)
gen : generation
hgr : parent homology group rank
"""
# Initiate new cell
v_o = np.array(origin)
v_s = np.array(supremum)
C_new = Cell(gen, hgr, origin, supremum)
C_new.centroid = tuple((v_o + v_s) * .5)
# Build new indexed vertex list
V_new = []
for i, v in enumerate(self.C0()[:-1]):
v_x = np.array(v.x)
sub_cell_t1 = v_o - v_o * v_x
sub_cell_t2 = v_s * v_x
vec = sub_cell_t1 + sub_cell_t2
vec = tuple(vec)
C_new.add_vertex(self.V[vec])
V_new.append(vec)
# Add new centroid
C_new.add_vertex(self.V[C_new.centroid])
V_new.append(C_new.centroid)
# Connect new vertices #TODO: Thread into other loop; no need for V_new
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
for j in connections:
self.V[V_new[i]].connect(self.V[V_new[j]])
if printout:
print("A sub hyper cube with:")
print("origin: {}".format(origin))
print("supremum: {}".format(supremum))
for v in C_new():
v.print_out()
# Append the new cell to the to complex
self.H[gen].append(C_new)
return C_new
def split_simplex_symmetry(self, S, gen):
"""
Split a hypersimplex S into two sub simplices by building a hyperplane
which connects to a new vertex on an edge (the longest edge in
dim = {2, 3}) and every other vertex in the simplex that is not
connected to the edge being split.
This function utilizes the knowledge that the problem is specified
with symmetric constraints
The longest edge is tracked by an ordering of the
vertices in every simplices, the edge between first and second
vertex is the longest edge to be split in the next iteration.
"""
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Find new vertex.
# V_new_x = tuple((np.array(C()[0].x) + np.array(C()[1].x)) / 2.0)
s = S()
firstx = s[0].x
lastx = s[-1].x
V_new = self.V[tuple((np.array(firstx) + np.array(lastx)) / 2.0)]
# Disconnect old longest edge
self.V[firstx].disconnect(self.V[lastx])
# Connect new vertices to all other vertices
for v in s[:]:
v.connect(self.V[V_new.x])
# New "lower" simplex
S_new_l = Simplex(gen, S.hg_n, self.generation_cycle,
self.dim)
S_new_l.add_vertex(s[0])
S_new_l.add_vertex(V_new) # Add new vertex
for v in s[1:-1]: # Add all other vertices
S_new_l.add_vertex(v)
# New "upper" simplex
S_new_u = Simplex(gen, S.hg_n, S.generation_cycle, self.dim)
# First vertex on new long edge
S_new_u.add_vertex(s[S_new_u.generation_cycle + 1])
for v in s[1:-1]: # Remaining vertices
S_new_u.add_vertex(v)
for k, v in enumerate(s[1:-1]): # iterate through inner vertices
if k == S.generation_cycle:
S_new_u.add_vertex(V_new)
else:
S_new_u.add_vertex(v)
S_new_u.add_vertex(s[-1]) # Second vertex on new long edge
self.H[gen].append(S_new_l)
self.H[gen].append(S_new_u)
return
# Plots
def plot_complex(self):
"""
Here, C is the LIST of simplexes S in the
2- or 3-D complex
To plot a single simplex S in a set C, use e.g., [C[0]]
"""
from matplotlib import pyplot # type: ignore[import]
if self.dim == 2:
pyplot.figure()
for C in self.H:
for c in C:
for v in c():
if self.bounds is None:
x_a = np.array(v.x, dtype=float)
else:
x_a = np.array(v.x, dtype=float)
for i in range(len(self.bounds)):
x_a[i] = (x_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('v.x_a = {}'.format(x_a))
pyplot.plot([x_a[0]], [x_a[1]], 'o')
xlines = []
ylines = []
for vn in v.nn:
if self.bounds is None:
xn_a = np.array(vn.x, dtype=float)
else:
xn_a = np.array(vn.x, dtype=float)
for i in range(len(self.bounds)):
xn_a[i] = (xn_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('vn.x = {}'.format(vn.x))
xlines.append(xn_a[0])
ylines.append(xn_a[1])
xlines.append(x_a[0])
ylines.append(x_a[1])
pyplot.plot(xlines, ylines)
if self.bounds is None:
pyplot.ylim([-1e-2, 1 + 1e-2])
pyplot.xlim([-1e-2, 1 + 1e-2])
else:
pyplot.ylim(
[self.bounds[1][0] - 1e-2, self.bounds[1][1] + 1e-2])
pyplot.xlim(
[self.bounds[0][0] - 1e-2, self.bounds[0][1] + 1e-2])
pyplot.show()
elif self.dim == 3:
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
for C in self.H:
for c in C:
for v in c():
x = []
y = []
z = []
# logging.info('v.x = {}'.format(v.x))
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
for vn in v.nn:
x.append(vn.x[0])
y.append(vn.x[1])
z.append(vn.x[2])
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
# logging.info('vn.x = {}'.format(vn.x))
ax.plot(x, y, z, label='simplex')
pyplot.show()
else:
print("dimension higher than 3 or wrong complex format")
return
class VertexGroup:
def __init__(self, p_gen, p_hgr):
self.p_gen = p_gen # parent generation
self.p_hgr = p_hgr # parent homology group rank
self.hg_n = None
self.hg_d = None
# Maybe add parent homology group rank total history
# This is the sum off all previously split cells
# cumulatively throughout its entire history
self.C = []
def __call__(self):
return self.C
def add_vertex(self, V):
if V not in self.C:
self.C.append(V)
def homology_group_rank(self):
"""
Returns the homology group order of the current cell
"""
if self.hg_n is None:
self.hg_n = sum(1 for v in self.C if v.minimiser())
return self.hg_n
def homology_group_differential(self):
"""
Returns the difference between the current homology group of the
cell and its parent group
"""
if self.hg_d is None:
self.hgd = self.hg_n - self.p_hgr
return self.hgd
def polytopial_sperner_lemma(self):
"""
Returns the number of stationary points theoretically contained in the
cell based information currently known about the cell
"""
pass
def print_out(self):
"""
Print the current cell to console
"""
for v in self():
v.print_out()
class Cell(VertexGroup):
"""
Contains a cell that is symmetric to the initial hypercube triangulation
"""
def __init__(self, p_gen, p_hgr, origin, supremum):
super().__init__(p_gen, p_hgr)
self.origin = origin
self.supremum = supremum
self.centroid = None # (Not always used)
# TODO: self.bounds
class Simplex(VertexGroup):
"""
Contains a simplex that is symmetric to the initial symmetry constrained
hypersimplex triangulation
"""
def __init__(self, p_gen, p_hgr, generation_cycle, dim):
super().__init__(p_gen, p_hgr)
self.generation_cycle = (generation_cycle + 1) % (dim - 1)
class Vertex:
def __init__(self, x, bounds=None, func=None, func_args=(), g_cons=None,
g_cons_args=(), nn=None, index=None):
self.x = x
self.order = sum(x)
x_a = np.array(x, dtype=float)
if bounds is not None:
for i, (lb, ub) in enumerate(bounds):
x_a[i] = x_a[i] * (ub - lb) + lb
# TODO: Make saving the array structure optional
self.x_a = x_a
# Note Vertex is only initiated once for all x so only
# evaluated once
if func is not None:
self.feasible = True
if g_cons is not None:
for g, args in zip(g_cons, g_cons_args):
if g(self.x_a, *args) < 0.0:
self.f = np.inf
self.feasible = False
break
if self.feasible:
self.f = func(x_a, *func_args)
if nn is not None:
self.nn = nn
else:
self.nn = set()
self.fval = None
self.check_min = True
# Index:
if index is not None:
self.index = index
def __hash__(self):
return hash(self.x)
def connect(self, v):
if v is not self and v not in self.nn:
self.nn.add(v)
v.nn.add(self)
if self.minimiser():
v._min = False
v.check_min = False
# TEMPORARY
self.check_min = True
v.check_min = True
def disconnect(self, v):
if v in self.nn:
self.nn.remove(v)
v.nn.remove(self)
self.check_min = True
v.check_min = True
def minimiser(self):
"""Check whether this vertex is strictly less than all its neighbors"""
if self.check_min:
self._min = all(self.f < v.f for v in self.nn)
self.check_min = False
return self._min
def print_out(self):
print("Vertex: {}".format(self.x))
constr = 'Connections: '
for vc in self.nn:
constr += '{} '.format(vc.x)
print(constr)
print('Order = {}'.format(self.order))
class VertexCache:
def __init__(self, func, func_args=(), bounds=None, g_cons=None,
g_cons_args=(), indexed=True):
self.cache = {}
self.func = func
self.g_cons = g_cons
self.g_cons_args = g_cons_args
self.func_args = func_args
self.bounds = bounds
self.nfev = 0
self.size = 0
if indexed:
self.index = -1
def __getitem__(self, x, indexed=True):
try:
return self.cache[x]
except KeyError:
if indexed:
self.index += 1
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args,
index=self.index)
else:
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args)
# logging.info("New generated vertex at x = {}".format(x))
# NOTE: Surprisingly high performance increase if logging is commented out
self.cache[x] = xval
# TODO: Check
if self.func is not None:
if self.g_cons is not None:
if xval.feasible:
self.nfev += 1
self.size += 1
else:
self.size += 1
else:
self.nfev += 1
self.size += 1
return self.cache[x]
|
bsd-3-clause
|
jayhetee/pandashells
|
pandashells/test/config_lib_tests.py
|
8
|
2148
|
#! /usr/bin/env python
import os
import json
from unittest import TestCase
from pandashells.lib import config_lib
class GlobalArgTests(TestCase):
def test_home_path_looks_right(self):
"""
The path to the users home directory looks right
"""
home = os.path.expanduser('~')
self.assertEqual(config_lib.HOME, home)
def test_default_opt_dict_exists(self):
"""
The dictionary of default options exists
"""
self.assertTrue(len(config_lib.DEFAULT_DICT) > 0)
class GetConfigTests(TestCase):
def setUp(self):
if os.path.isfile(config_lib.CONFIG_FILE_NAME):
os.system('cp {f} {f}_orig'.format(f=config_lib.CONFIG_FILE_NAME))
def tearDown(self):
if os.path.isfile(config_lib.CONFIG_FILE_NAME + '_orig'):
os.system('mv {f}_orig {f}'.format(f=config_lib.CONFIG_FILE_NAME))
else: # pragma: no cover
os.system('rm {f}'.format(f=config_lib.CONFIG_FILE_NAME))
def test_set_config_creates_file(self):
"""
set_config() function writes to file
"""
expected_dict = {'name': 'John'}
config_lib.set_config(expected_dict)
saved_dict = json.loads(open(config_lib.CONFIG_FILE_NAME).read())
self.assertEqual(expected_dict, saved_dict)
def test_get_config_non_existent_file(self):
"""
get_config() creates config file when it doesn't exist
"""
if os.path.isfile(config_lib.CONFIG_FILE_NAME):
os.system('rm {}'.format(config_lib.CONFIG_FILE_NAME))
config = config_lib.get_config()
self.assertEqual(config_lib.DEFAULT_DICT, config)
def test_get_config_existing_file(self):
"""
get_config() reads existing file
"""
if os.path.isfile(config_lib.CONFIG_FILE_NAME):
os.system('rm {}'.format(config_lib.CONFIG_FILE_NAME))
test_config = {'name': 'Bill'}
with open(config_lib.CONFIG_FILE_NAME, 'w') as f:
f.write(json.dumps(test_config))
config = config_lib.get_config()
self.assertEqual(test_config, config)
|
bsd-2-clause
|
xiaoxq/apollo
|
modules/tools/realtime_plot/xyitem.py
|
3
|
6868
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
X Y Item
"""
import math
import numpy as np
from matplotlib import lines
from matplotlib import patches
class Xyitem(object):
"""XY item to plot"""
def __init__(self, ax, windowsize, vehiclelength, title, xlabel, ylabel):
self.ax = ax
self.windowsize = windowsize
self.vehiclelength = vehiclelength
self.ax.set_title(title)
self.ax.set_xlabel(xlabel, fontsize=10)
self.ax.set_ylabel(ylabel, fontsize=10)
self.lines = []
self.pathstartx = []
self.pathstarty = []
self.carxhist = []
self.caryhist = []
self.targetx = []
self.targety = []
self.pathstartidx = -1
self.carxyhistidx = -1
self.carposidx = -1
self.targethistidx = -1
self.axx = float('inf')
self.axy = float('inf')
self.planningavailable = False
def reset(self):
"""Reset"""
del self.pathstartx[:]
del self.pathstarty[:]
del self.carxhist[:]
del self.caryhist[:]
del self.targetx[:]
del self.targety[:]
self.ax.cla()
self.pathstartidx = -1
self.carxyhistidx = -1
self.carposidx = -1
self.targethistidx = -1
self.axx = float('inf')
self.axy = float('inf')
self.planningavailable = False
def new_planning(self, time, x, y):
"""new planning"""
self.planningtime = time
self.planningx = x
self.planningy = y
self.pathstartx.append(x[0])
self.pathstarty.append(y[0])
if self.pathstartidx == -1:
self.ax.plot(
self.pathstartx,
self.pathstarty,
color='red',
marker='*',
ls='None')
self.pathstartidx = len(self.ax.lines) - 1
self.current_line = lines.Line2D(x, y, color='red', lw=1.5)
self.ax.add_line(self.current_line)
else:
self.ax.lines[self.pathstartidx].set_data(self.pathstartx,
self.pathstarty)
self.current_line.set_data(x, y)
self.planningavailable = True
def new_carstatus(self, time, x, y, heading, steer_angle, autodriving):
"""new carstatus"""
self.carxhist.append(x)
self.caryhist.append(y)
angle = math.degrees(heading) - 90
carcolor = 'red' if autodriving else 'blue'
if self.carxyhistidx == -1:
self.ax.plot(self.carxhist, self.caryhist, color="blue")
self.carxyhistidx = len(self.ax.lines) - 1
self.ax.plot(
self.carxhist,
self.caryhist,
marker=(3, 0, angle),
markersize=20,
mfc=carcolor)
self.carposidx = len(self.ax.lines) - 1
else:
self.ax.lines[self.carxyhistidx].set_data(self.carxhist,
self.caryhist)
self.ax.lines[self.carposidx].set_data(x, y)
self.ax.lines[self.carposidx].set_marker((3, 0, angle))
self.ax.lines[self.carposidx].set_mfc(carcolor)
self.ax.patches[0].remove()
if self.planningavailable:
xtarget = np.interp(time, self.planningtime, self.planningx)
self.targetx.append(xtarget)
ytarget = np.interp(time, self.planningtime, self.planningy)
self.targety.append(ytarget)
if self.targethistidx == -1:
self.ax.plot(self.targetx, self.targety, color="green", lw=1.5)
self.targethistidx = len(self.ax.lines) - 1
else:
self.ax.lines[self.targethistidx].set_data(
self.targetx, self.targety)
self.ax.add_patch(self.gen_steer_curve(x, y, heading, steer_angle))
# Update Window X, Y Axis Limits
xcenter = x + math.cos(heading) * 40
ycenter = y + math.sin(heading) * 40
if xcenter >= (self.axx + 20) or xcenter <= (self.axx - 20) or \
ycenter >= (self.axy + 20) or ycenter <= (self.axy - 20):
scale = self.ax.get_window_extent(
)._transform._boxout._bbox.get_points()[1]
original = self.ax.get_position().get_points()
finalscale = (original[1] - original[0]) * scale
ratio = finalscale[1] / finalscale[0]
self.axx = xcenter
self.axy = ycenter
self.ax.set_xlim(
[xcenter - self.windowsize, xcenter + self.windowsize])
self.ax.set_ylim([
ycenter - self.windowsize * ratio,
ycenter + self.windowsize * ratio
])
def gen_steer_curve(self, x, y, heading, steer_angle):
"""Generate Steering Curve to predict car trajectory"""
if abs(math.tan(math.radians(steer_angle))) > 0.0001:
R = self.vehiclelength / math.tan(math.radians(steer_angle))
else:
R = 100000
radius = abs(R)
lengthangle = 7200 / (2 * math.pi * radius)
if R >= 0:
centerangle = math.pi / 2 + heading
startangle = math.degrees(heading - math.pi / 2)
theta1 = 0
theta2 = lengthangle
else:
centerangle = heading - math.pi / 2
startangle = math.degrees(math.pi / 2 + heading)
theta1 = -lengthangle
theta2 = 0
centerx = x + math.cos(centerangle) * radius
centery = y + math.sin(centerangle) * radius
curve = patches.Arc(
(centerx, centery),
2 * radius,
2 * radius,
angle=startangle,
theta1=theta1,
theta2=theta2,
linewidth=2,
zorder=2)
return curve
def draw_lines(self):
"""plot lines"""
for polygon in self.ax.patches:
self.ax.draw_artist(polygon)
for line in self.ax.lines:
self.ax.draw_artist(line)
|
apache-2.0
|
losonczylab/Zaremba_NatNeurosci_2017
|
enrichment_model/enrichment_model_plotting.py
|
1
|
7208
|
import matplotlib.pyplot as plt
import cPickle as pkl
import numpy as np
import seaborn.apionly as sns
from lab.plotting import histogram
def enrichment(positions):
distances = np.abs(positions[np.isfinite(positions)])
return np.mean(distances), np.std(distances) / np.sqrt(len(distances))
def calc_enrichment(pos, masks):
enrich = []
for rep_positions, rep_masks in zip(pos, masks):
enrich.append(
[np.pi / 2 - enrichment(iter_positions[iter_mask])[0]
for iter_positions, iter_mask in zip(
rep_positions, rep_masks)])
return enrich
def calc_final_distributions(pos, masks):
final_dist = []
for rep_positions, rep_masks in zip(pos, masks):
final_dist.extend(rep_positions[-1][rep_masks[-1]].tolist())
return final_dist
def plot_enrichment(ax, enrichment, color, title='', rad=True):
ax.plot(range(9), np.mean(enrichment, axis=0), color=color)
ax.plot(range(9), np.percentile(enrichment, 5, axis=0), ls='--',
color=color)
ax.plot(range(9), np.percentile(enrichment, 95, axis=0), ls='--',
color=color)
ax.fill_between(
range(9), np.percentile(enrichment, 5, axis=0),
np.percentile(enrichment, 95, axis=0), facecolor=color, alpha=0.5)
sns.despine(ax=ax)
ax.tick_params(length=3, pad=2, direction='out')
ax.set_xlim(-0.5, 8.5)
if rad:
ax.set_ylim(-0.15, 0.5)
ax.set_ylabel('Enrichment (rad)')
else:
ax.set_ylim(-0.15, 0.10 * 2 * np.pi)
y_ticks = np.array(['0', '0.05', '0.10'])
ax.set_yticks(y_ticks.astype('float') * 2 * np.pi)
ax.set_yticklabels(y_ticks)
ax.set_ylabel('Enrichment (fraction of belt)')
ax.set_xlabel("Iteration ('session' #)")
ax.set_title(title)
def plot_final_distributions(
ax, final_dists, colors, labels=None, title='', rad=True):
if labels is None:
labels = [None] * len(final_dists)
for final_dist, color, label in zip(final_dists, colors, labels):
histogram(
ax, final_dist, bins=50, range=(-np.pi, np.pi),
color=color, filled=False, plot_mean=False, normed=True,
label=label)
ax.tick_params(length=3, pad=2, direction='out')
ax.axvline(ls='--', color='0.3')
ax.set_xlim(-np.pi, np.pi)
if rad:
ax.set_xlabel('Distance from reward (rad)')
else:
ax.set_xlabel('Distance from reward (fraction of belt)')
ax.set_xticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi])
ax.set_xticklabels(['-0.50', '-0.25', '0', '0.25', '0.50'])
ax.set_ylim(0, 0.3)
ax.set_ylabel('Normalized density')
ax.set_title(title)
def plot_parameters(axs, model, enrich):
positions = np.linspace(-np.pi, np.pi, 1000)
bs, ks = model.shift_mean_var(positions)
recur = model.recur_by_position(positions)
axs[0].plot(positions, recur)
axs[0].set_xlim(-np.pi, np.pi)
axs[0].set_ylim(0., 1.)
axs[0].set_xlabel('Position')
axs[0].set_ylabel('Recurrence probability')
axs[1].plot(positions, bs)
axs[1].set_xlim(-np.pi, np.pi)
axs[1].set_xlabel('Position')
axs[1].set_ylabel('Offset')
axs[2].plot(positions, 1 / ks)
axs[2].set_xlim(-np.pi, np.pi)
axs[2].set_xlabel('Position')
axs[2].set_ylabel('Variance')
axs[3].plot(range(9), np.mean(enrich, axis=0), color='b')
axs[3].fill_between(
range(9), np.percentile(enrich, 5, axis=0),
np.percentile(enrich, 95, axis=0), facecolor='b', alpha=0.5)
axs[3].set_xlabel('Iteration')
axs[3].set_ylabel('Enrichment (rad)')
def plot_models(
models, model_labels=None, n_cells=1000, n_runs=100, n_iterations=8):
if model_labels is None:
model_labels = ['Model {}'.format(idx) for idx in range(len(models))]
fig, axs = plt.subplots(4, len(models), figsize=(10, 10))
models[0].initialize(n_cells=n_cells)
for model in models[1:]:
model.initialize_like(models[0])
initial_mask = models[0].mask
initial_positions = models[0].positions
masks = []
positions = []
enrichment = []
for model, model_axs in zip(models, axs.T):
masks.append([])
positions.append([])
for _ in range(n_runs):
model.initialize(
initial_mask=initial_mask, initial_positions=initial_positions)
model.run(n_iterations)
masks[-1].append(model._masks)
positions[-1].append(model._positions)
enrichment.append(calc_enrichment(positions[-1], masks[-1]))
plot_parameters(model_axs, model, enrichment[-1])
for ax in axs[:, 1:].flat:
ax.set_ylabel('')
for ax in axs[:2, :].flat:
ax.set_xlabel('')
for label, ax in zip(model_labels, axs[0]):
ax.set_title(label)
offset_min, offset_max = np.inf, -np.inf
for ax in axs[1]:
offset_min = min(offset_min, ax.get_ylim()[0])
offset_max = max(offset_max, ax.get_ylim()[1])
for ax in axs[1]:
ax.set_ylim(offset_min, offset_max)
var_min, var_max = np.inf, -np.inf
for ax in axs[2]:
var_min = min(var_min, ax.get_ylim()[0])
var_max = max(var_max, ax.get_ylim()[1])
for ax in axs[2]:
ax.set_ylim(var_min, var_max)
enrich_min, enrich_max = np.inf, -np.inf
for ax in axs[3]:
enrich_min = min(enrich_min, ax.get_ylim()[0])
enrich_max = max(enrich_max, ax.get_ylim()[1])
for ax in axs[3]:
ax.set_ylim(enrich_min, enrich_max)
return fig
if __name__ == '__main__':
import enrichment_model as em
import enrichment_model_theoretical as emt
params_path_A = '/analysis/Jeff/Df16A/Df_remap_paper_v2/data/enrichment_model/Df_model_params_A.pkl'
params_path_B = '/analysis/Jeff/Df16A/Df_remap_paper_v2/data/enrichment_model/Df_model_params_B.pkl'
params_path_C = '/analysis/Jeff/Df16A/Df_remap_paper_v2/data/enrichment_model/Df_model_params_C.pkl'
#
# WT to theoretical
#
# WT_params_path = params_path_C
# WT_params = pkl.load(open(WT_params_path, 'r'))
# WT_model = em.EnrichmentModel2(**WT_params)
# recur_model = emt.EnrichmentModel2_recur(
# kappa=1, span=0.8, mean_recur=0.4, **WT_params)
# offset_model = emt.EnrichmentModel2_offset(alpha=0.25, **WT_params)
# var_model = emt.EnrichmentModel2_var(
# kappa=1, alpha=10, mean_k=3, **WT_params)
# models = [WT_model, recur_model, offset_model, var_model]
# model_labels = ['WT model', 'Stable recurrence', 'Shift towards reward',
# 'Stable position']
params_A = pkl.load(open(params_path_A, 'r'))
params_B = pkl.load(open(params_path_B, 'r'))
params_C = pkl.load(open(params_path_C, 'r'))
model_A = em.EnrichmentModel2(**params_A)
model_B = em.EnrichmentModel2(**params_B)
model_C = em.EnrichmentModel2(**params_C)
models = [model_A, model_B, model_C]
model_labels = ['A', 'B', 'C']
fig = plot_models(
models, model_labels, n_cells=1000, n_runs=100, n_iterations=8)
fig.savefig('Df_model_parameters.pdf')
from pudb import set_trace
set_trace()
|
mit
|
heplesser/nest-simulator
|
pynest/examples/spatial/conncon_targets.py
|
20
|
2449
|
# -*- coding: utf-8 -*-
#
# conncon_targets.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Connect two populations with convergent projection and rectangular mask, visualize connections from source perspective
-----------------------------------------------------------------------------------------------------------------------
Create two populations of iaf_psc_alpha neurons on a 30x30 grid
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
import numpy as np
nest.ResetKernel()
pos = nest.spatial.grid(shape=[30, 30], extent=[3., 3.], edge_wrap=True)
########################################################################
# create and connect two populations
a = nest.Create('iaf_psc_alpha', positions=pos)
b = nest.Create('iaf_psc_alpha', positions=pos)
cdict = {'rule': 'pairwise_bernoulli',
'p': 0.5,
'use_on_source': True,
'mask': {'rectangular': {'lower_left': [-0.2, -0.5],
'upper_right': [0.2, 0.5]}}}
nest.Connect(a, b,
conn_spec=cdict,
syn_spec={'weight': nest.random.uniform(0.5, 2.)})
#####################################################################
# first, clear existing figure, get current figure
plt.clf()
fig = plt.gcf()
# plot targets of two source neurons into same figure, with mask
for src_index in [30 * 15 + 15, 0]:
# obtain node id for center
src = a[src_index:src_index + 1]
nest.PlotTargets(src, b, mask=cdict['mask'], fig=fig)
# beautify
plt.axes().set_xticks(np.arange(-1.5, 1.55, 0.5))
plt.axes().set_yticks(np.arange(-1.5, 1.55, 0.5))
plt.grid(True)
plt.axis([-2.0, 2.0, -2.0, 2.0])
plt.axes().set_aspect('equal', 'box')
plt.title('Connection targets')
plt.show()
# plt.savefig('conncon_targets.pdf')
|
gpl-2.0
|
marcocaccin/scikit-learn
|
sklearn/feature_extraction/tests/test_feature_hasher.py
|
258
|
2861
|
from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
|
bsd-3-clause
|
BiaDarkia/scikit-learn
|
examples/linear_model/plot_polynomial_interpolation.py
|
168
|
2088
|
#!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
colors = ['teal', 'yellowgreen', 'gold']
lw = 2
plt.plot(x_plot, f(x_plot), color='cornflowerblue', linewidth=lw,
label="ground truth")
plt.scatter(x, y, color='navy', s=30, marker='o', label="training points")
for count, degree in enumerate([3, 4, 5]):
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, color=colors[count], linewidth=lw,
label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
|
bsd-3-clause
|
JackKelly/pda
|
scripts/plot_activity.py
|
1
|
2034
|
#!/usr/bin/python
from __future__ import print_function, division
import matplotlib.pyplot as plt
import pda.dataset as ds
from pda.channel import indicies_of_periods
import numpy as np
from matplotlib.ticker import MultipleLocator
import pandas as pd
"""
Plots a large bitmap showing activity for all channels over entire duration
of dataset.
"""
PWR_ON_THRESHOLD = 4 # watts
MIN_DAYS_PER_CHAN = 10
DATA_DIR = '/data/mine/vadeec/jack-merged/'
#DATA_DIR = '/data/mine/vadeec/jack/137'
print("Loading dataset...")
dataset = ds.load_dataset(DATA_DIR)
# create pd.DataFrame of all channels
print("Creating DataFrame...")
chans = []
for channel in dataset:
chans.append((channel.name, channel.series))
df = pd.DataFrame.from_items(chans)
print("Creating bitmap...")
day_range, day_boundaries = indicies_of_periods(df.index, 'D')
N_DAYS = day_range.size - 1
N_CHANNELS = df.columns.size
MINS_PER_DAY = 24 * 60
WIDTH = MINS_PER_DAY # 1 pixel per minute
HEIGHT = N_DAYS * N_CHANNELS
bitmap = np.zeros((HEIGHT, WIDTH), dtype=np.float)
for day_i in range(N_DAYS):
day = day_range[day_i]
try:
start_index, end_index = day_boundaries[day]
except KeyError:
# No data available for this day
continue
data_for_day = df[start_index:end_index]
data_for_day_minutely = data_for_day.resample('T', how='max').to_period()
midnight = day.asfreq('T', how='start')
first_minute = data_for_day_minutely.index[0] - midnight
last_minute = data_for_day_minutely.index[-1] - midnight
for chan_i in range(N_CHANNELS):
on = data_for_day_minutely.ix[:,chan_i] > PWR_ON_THRESHOLD
y = HEIGHT - chan_i - (day_i * N_CHANNELS) - 1
try:
bitmap[y][first_minute:last_minute+1] = on
except ValueError as e:
print(e, day, "likely because there's a gap in the day's data")
break
# import ipdb; ipdb.set_trace()
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1,1,1)
im = ax.imshow(bitmap, interpolation='nearest')
plt.show()
|
apache-2.0
|
americanhanko/dotfiles
|
config/ipython_config.py
|
1
|
22308
|
# Configuration file for ipython.
#------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
#c.InteractiveShellApp.code_to_run = ''
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
#c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# List of files to run at IPython startup.
#c.InteractiveShellApp.exec_files = []
# lines of code to run at IPython startup.
c.InteractiveShellApp.exec_lines = ['%autoreload 2']
c.InteractiveShellApp.exec_lines.append(
'print("Warning: disable autoreload in ipython_config.py to improve performance.")')
# A list of dotted module names of IPython extensions to load.
c.InteractiveShellApp.extensions = ['autoreload']
# dotted module name of an IPython extension to load.
#c.InteractiveShellApp.extra_extension = ''
# A file to be run
#c.InteractiveShellApp.file_to_run = ''
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk2', 'gtk3',
# 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4').
#c.InteractiveShellApp.gui = None
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
#c.InteractiveShellApp.hide_initial_ns = True
# Configure matplotlib for interactive use with the default matplotlib backend.
#c.InteractiveShellApp.matplotlib = None
# Run the module as a script.
#c.InteractiveShellApp.module_to_run = ''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
#c.InteractiveShellApp.pylab = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
#c.InteractiveShellApp.pylab_import_all = True
# Reraise exceptions encountered loading IPython extensions?
#c.InteractiveShellApp.reraise_ipython_extension_failures = False
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication(Application) configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# Whether to create profile dir if it doesn't exist
#c.BaseIPythonApplication.auto_create = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
#c.BaseIPythonApplication.copy_config_files = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
#c.BaseIPythonApplication.extra_config_file = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
#c.BaseIPythonApplication.ipython_dir = ''
# Whether to overwrite existing config files when copying
#c.BaseIPythonApplication.overwrite = False
# The IPython profile to use.
#c.BaseIPythonApplication.profile = 'default'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
#c.BaseIPythonApplication.verbose_crash = False
#------------------------------------------------------------------------------
# TerminalIPythonApp(BaseIPythonApplication,InteractiveShellApp) configuration
#------------------------------------------------------------------------------
# Whether to display a banner upon starting IPython.
#c.TerminalIPythonApp.display_banner = True
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
#c.TerminalIPythonApp.force_interact = False
# Start IPython quickly by skipping the loading of config files.
#c.TerminalIPythonApp.quick = False
#------------------------------------------------------------------------------
# InteractiveShell(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
# An enhanced, interactive shell for Python.
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
#c.InteractiveShell.ast_node_interactivity = 'last_expr'
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
#c.InteractiveShell.ast_transformers = []
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
#c.InteractiveShell.autocall = 0
# Autoindent IPython code entered interactively.
c.InteractiveShell.autoindent = True
# Enable magic commands to be called without the leading %.
c.InteractiveShell.automagic = True
# The part of the banner to be printed before the profile
#c.InteractiveShell.banner1 = 'Python 3.6.1 (default, Mar 23 2017, 16:49:06) \nType "copyright", "credits" or "license" for more information.\n\nIPython 5.3.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# The part of the banner to be printed after the profile
#c.InteractiveShell.banner2 = ''
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
#c.InteractiveShell.cache_size = 1000
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
c.InteractiveShell.color_info = True
# Set the color scheme (NoColor, Neutral, Linux, or LightBG).
c.InteractiveShell.colors = 'Linux'
##
#c.InteractiveShell.debug = False
# **Deprecated**
#
# Will be removed in IPython 6.0
#
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). `deep_reload`
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
#c.InteractiveShell.deep_reload = False
# Don't call post-execute functions that have failed in the past.
#c.InteractiveShell.disable_failing_post_execute = False
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
#c.InteractiveShell.display_page = False
# (Provisional API) enables html representation in mime bundles sent to pagers.
#c.InteractiveShell.enable_html_pager = False
# Total length of command history
#c.InteractiveShell.history_length = 10000
# The number of saved history entries to be loaded into the history buffer at
# startup.
#c.InteractiveShell.history_load_length = 1000
##
#c.InteractiveShell.ipython_dir = ''
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
#c.InteractiveShell.logappend = ''
# The name of the logfile to use.
#c.InteractiveShell.logfile = ''
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
#c.InteractiveShell.logstart = False
##
#c.InteractiveShell.object_info_string_level = 0
# Automatically call the pdb debugger after every exception.
#c.InteractiveShell.pdb = False
# Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompt_in1 = 'In [\\#]: '
# Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in2 = ' .\\D.: '
# Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompt_out = 'Out[\\#]: '
# Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompts_pad_left = True
##
#c.InteractiveShell.quiet = False
##
#c.InteractiveShell.separate_in = '\n'
##
#c.InteractiveShell.separate_out = ''
##
#c.InteractiveShell.separate_out2 = ''
# Show rewritten input, e.g. for autocall.
#c.InteractiveShell.show_rewritten_input = True
# Enables rich html representation of docstrings. (This requires the docrepr
# module).
#c.InteractiveShell.sphinxify_docstring = False
##
#c.InteractiveShell.wildcards_case_sensitive = True
##
#c.InteractiveShell.xmode = 'Context'
#------------------------------------------------------------------------------
# TerminalInteractiveShell(InteractiveShell) configuration
#------------------------------------------------------------------------------
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
#c.TerminalInteractiveShell.confirm_exit = True
# Options for displaying tab completions, 'column', 'multicolumn', and
# 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit`
# documentation for more information.
#c.TerminalInteractiveShell.display_completions = 'multicolumn'
# Shortcut style to use at the prompt. 'vi' or 'emacs'.
#c.TerminalInteractiveShell.editing_mode = 'emacs'
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
#c.TerminalInteractiveShell.editor = '/usr/local/bin/atom'
# Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. This is
# in addition to the F2 binding, which is always enabled.
#c.TerminalInteractiveShell.extra_open_editor_shortcuts = False
# Highlight matching brackets.
#c.TerminalInteractiveShell.highlight_matching_brackets = True
# The name or class of a Pygments style to use for syntax
# highlighting:
# default, emacs, friendly, colorful, autumn, murphy, manni, monokai, perldoc, pastie, borland, trac, native, fruity, bw, vim, vs, tango, rrt, xcode, igor, paraiso-light, paraiso-dark, lovelace, algol, algol_nu, arduino, rainbow_dash, abap
#c.TerminalInteractiveShell.highlighting_style = traitlets.Undefined
# Override highlighting format for specific tokens
#c.TerminalInteractiveShell.highlighting_style_overrides = {}
# Enable mouse support in the prompt
#c.TerminalInteractiveShell.mouse_support = False
# Class used to generate Prompt token for prompt_toolkit
#c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'
# Use `raw_input` for the REPL, without completion, multiline input, and prompt
# colors.
#
# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.
# Known usage are: IPython own testing machinery, and emacs inferior-shell
# integration through elpy.
#
# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment
# variable is set, or the current terminal is not a tty.
#c.TerminalInteractiveShell.simple_prompt = False
# Number of line at the bottom of the screen to reserve for the completion menu
#c.TerminalInteractiveShell.space_for_menu = 6
# Automatically set the terminal title
#c.TerminalInteractiveShell.term_title = True
# Use 24bit colors instead of 256 colors in prompt highlighting. If your
# terminal supports true color, the following command should print 'TRUECOLOR'
# in orange: printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n"
#c.TerminalInteractiveShell.true_color = False
#------------------------------------------------------------------------------
# HistoryAccessor(HistoryAccessorBase) configuration
#------------------------------------------------------------------------------
# Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
#c.HistoryAccessor.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
#c.HistoryAccessor.enabled = True
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
#
# you can also use the specific value `:memory:` (including the colon at both
# end but not the back ticks), to avoid creating an history file.
#c.HistoryAccessor.hist_file = ''
#------------------------------------------------------------------------------
# HistoryManager(HistoryAccessor) configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
#c.HistoryManager.db_cache_size = 0
# Should the history database include output? (default: no)
#c.HistoryManager.db_log_output = False
#------------------------------------------------------------------------------
# ProfileDir(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
#c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# BaseFormatter(Configurable) configuration
#------------------------------------------------------------------------------
# A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
##
#c.BaseFormatter.deferred_printers = {}
##
#c.BaseFormatter.enabled = True
##
#c.BaseFormatter.singleton_printers = {}
##
#c.BaseFormatter.type_printers = {}
#------------------------------------------------------------------------------
# PlainTextFormatter(BaseFormatter) configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
##
#c.PlainTextFormatter.float_precision = ''
# Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
#c.PlainTextFormatter.max_seq_length = 1000
##
#c.PlainTextFormatter.max_width = 79
##
#c.PlainTextFormatter.newline = '\n'
##
#c.PlainTextFormatter.pprint = True
##
#c.PlainTextFormatter.verbose = False
#------------------------------------------------------------------------------
# Completer(Configurable) configuration
#------------------------------------------------------------------------------
# Activate greedy completion PENDING DEPRECTION. this is now mostly taken care
# of with Jedi.
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
#c.Completer.greedy = False
#------------------------------------------------------------------------------
# IPCompleter(Completer) configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# DEPRECATED as of version 5.0.
#
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
#c.IPCompleter.limit_to__all__ = False
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
#c.IPCompleter.merge_completions = True
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
#c.IPCompleter.omit__names = 2
#------------------------------------------------------------------------------
# ScriptMagics(Magics) configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
#c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
#c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics(Magics) configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
#c.StoreMagics.autorestore = False
|
mit
|
mblondel/scikit-learn
|
examples/linear_model/lasso_dense_vs_sparse_data.py
|
348
|
1862
|
"""
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
|
bsd-3-clause
|
raghavrv/scikit-learn
|
sklearn/preprocessing/data.py
|
3
|
92338
|
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from __future__ import division
from itertools import chain, combinations
import numbers
import warnings
from itertools import combinations_with_replacement as combinations_w_r
import numpy as np
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import string_types
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import (check_is_fitted, check_random_state,
FLOAT_DTYPES)
BOUNDS_THRESHOLD = 1e-7
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'QuantileTransformer',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'quantile_transform',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
See also
--------
minmax_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_*
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y='deprecated', copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
normalize: Equivalent function without the estimator API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the estimator API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y='deprecated', copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and four samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
class QuantileTransformer(BaseEstimator, TransformerMixin):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X) # doctest: +ELLIPSIS
array([...])
See also
--------
quantile_transform : Equivalent function without the estimator API.
StandardScaler : perform standardization that is faster, but less robust
to outliers.
RobustScaler : perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, n_quantiles=1000, output_distribution='uniform',
ignore_implicit_zeros=False, subsample=int(1e5),
random_state=None, copy=True):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn("'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect.")
n_samples, n_features = X.shape
# for compatibility issue with numpy<=1.8.X, references
# need to be a list scaled between 0 and 100
references = (self.references_ * 100).tolist()
self.quantiles_ = []
for col in X.T:
if self.subsample < n_samples:
subsample_idx = random_state.choice(n_samples,
size=self.subsample,
replace=False)
col = col.take(subsample_idx, mode='clip')
self.quantiles_.append(np.percentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix CSC, shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative.
"""
n_samples, n_features = X.shape
# for compatibility issue with numpy<=1.8.X, references
# need to be a list scaled between 0 and 100
references = list(map(lambda x: x * 100, self.references_))
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
if len(column_nnz_data) > self.subsample:
column_subsample = (self.subsample * len(column_nnz_data) //
n_samples)
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample,
dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data),
dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(
np.percentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
self : object
Returns self
"""
if self.n_quantiles <= 0:
raise ValueError("Invalid value for 'n_quantiles': %d. "
"The number of quantiles must be at least one."
% self.n_quantiles)
if self.subsample <= 0:
raise ValueError("Invalid value for 'subsample': %d. "
"The number of subsamples must be at least one."
% self.subsample)
if self.n_quantiles > self.subsample:
raise ValueError("The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles,
self.subsample))
X = self._check_inputs(X)
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles,
endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature"""
if self.output_distribution == 'normal':
output_distribution = 'norm'
else:
output_distribution = self.output_distribution
output_distribution = getattr(stats, output_distribution)
# older version of scipy do not handle tuple as fill_value
# clipping the value before transform solve the issue
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform PDF
X_col = output_distribution.cdf(X_col)
# find index for lower and higher bounds
lower_bounds_idx = (X_col - BOUNDS_THRESHOLD <
lower_bound_x)
upper_bounds_idx = (X_col + BOUNDS_THRESHOLD >
upper_bound_x)
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do assending, and the
# lower for descending). We take the mean of these two
X_col = .5 * (np.interp(X_col, quantiles, self.references_)
- np.interp(-X_col, -quantiles[::-1],
-self.references_[::-1]))
else:
X_col = np.interp(X_col, self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output PDF
if not inverse:
X_col = output_distribution.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = output_distribution.ppf(BOUNDS_THRESHOLD -
np.spacing(1))
clip_max = output_distribution.ppf(1 - (BOUNDS_THRESHOLD -
np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
return X_col
def _check_inputs(self, X, accept_sparse_negative=False):
"""Check inputs before fit and transform"""
X = check_array(X, accept_sparse='csc', copy=self.copy,
dtype=[np.float64, np.float32])
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
if (not accept_sparse_negative and not self.ignore_implicit_zeros and
(sparse.issparse(X) and np.any(X.data < 0))):
raise ValueError('QuantileTransformer only accepts non-negative'
' sparse matrices.')
# check the output PDF
if self.output_distribution not in ('normal', 'uniform'):
raise ValueError("'output_distribution' has to be either 'normal'"
" or 'uniform'. Got '{}' instead.".format(
self.output_distribution))
return X
def _check_is_fitted(self, X):
"""Check the inputs before transforming"""
check_is_fitted(self, 'quantiles_')
# check that the dimension of X are adequate with the fitted data
if X.shape[1] != self.quantiles_.shape[1]:
raise ValueError('X does not have the same number of features as'
' the previously fitted data. Got {} instead of'
' {}.'.format(X.shape[1],
self.quantiles_.shape[1]))
def _transform(self, X, inverse=False):
"""Forward and inverse transform.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, optional (default=False)
If False, apply forward transform. If True, apply
inverse transform.
Returns
-------
X : ndarray, shape (n_samples, n_features)
Projected data
"""
if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx],
X.indptr[feature_idx + 1])
X.data[column_slice] = self._transform_col(
X.data[column_slice], self.quantiles_[:, feature_idx],
inverse)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx],
inverse)
return X
def transform(self, X):
"""Feature-wise transformation of the data.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X)
self._check_is_fitted(X)
return self._transform(X, inverse=False)
def inverse_transform(self, X):
"""Back-projection to the original space.
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X, accept_sparse_negative=True)
self._check_is_fitted(X)
return self._transform(X, inverse=True)
def quantile_transform(X, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=False):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, sparse matrix
The data to transform.
axis : int, (default=0)
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0)
... # doctest: +ELLIPSIS
array([...])
See also
--------
QuantileTransformer : Performs quantile-based scaling using the
``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`).
scale : perform standardization that is faster, but less robust
to outliers.
robust_scale : perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
|
bsd-3-clause
|
unsiloai/syntaxnet-ops-hack
|
tensorflow/contrib/learn/__init__.py
|
42
|
2596
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning.
See the @{$python/contrib.learn} guide.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNEstimator
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedEstimator
@@DNNLinearCombinedClassifier
@@DynamicRnnEstimator
@@LinearClassifier
@@LinearEstimator
@@LinearRegressor
@@LogisticRegressor
@@StateSavingRnnEstimator
@@SVM
@@SKCompat
@@Head
@@multi_class_head
@@multi_label_head
@@binary_svm_head
@@regression_head
@@poisson_regression_head
@@multi_head
@@no_op_train_fn
@@Experiment
@@ExportStrategy
@@TaskType
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
@@read_keyed_batch_examples
@@read_keyed_batch_examples_shared_queue
@@read_keyed_batch_features
@@read_keyed_batch_features_shared_queue
@@InputFnOps
@@ProblemType
@@build_parsing_serving_input_fn
@@make_export_strategy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.contrib.learn.python.learn import learn_runner_lib as learn_runner
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'learn_runner', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
ClimbsRocks/scikit-learn
|
examples/linear_model/plot_huber_vs_ridge.py
|
127
|
2206
|
"""
=======================================================
HuberRegressor vs Ridge on dataset with strong outliers
=======================================================
Fit Ridge and HuberRegressor on a dataset with outliers.
The example shows that the predictions in ridge are strongly influenced
by the outliers present in the dataset. The Huber regressor is less
influenced by the outliers since the model uses the linear loss for these.
As the parameter epsilon is increased for the Huber regressor, the decision
function approaches that of the ridge.
"""
# Authors: Manoj Kumar [email protected]
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_regression
from sklearn.linear_model import HuberRegressor, Ridge
# Generate toy data.
rng = np.random.RandomState(0)
X, y = make_regression(n_samples=20, n_features=1, random_state=0, noise=4.0,
bias=100.0)
# Add four strong outliers to the dataset.
X_outliers = rng.normal(0, 0.5, size=(4, 1))
y_outliers = rng.normal(0, 2.0, size=4)
X_outliers[:2, :] += X.max() + X.mean() / 4.
X_outliers[2:, :] += X.min() - X.mean() / 4.
y_outliers[:2] += y.min() - y.mean() / 4.
y_outliers[2:] += y.max() + y.mean() / 4.
X = np.vstack((X, X_outliers))
y = np.concatenate((y, y_outliers))
plt.plot(X, y, 'b.')
# Fit the huber regressor over a series of epsilon values.
colors = ['r-', 'b-', 'y-', 'm-']
x = np.linspace(X.min(), X.max(), 7)
epsilon_values = [1.35, 1.5, 1.75, 1.9]
for k, epsilon in enumerate(epsilon_values):
huber = HuberRegressor(fit_intercept=True, alpha=0.0, max_iter=100,
epsilon=epsilon)
huber.fit(X, y)
coef_ = huber.coef_ * x + huber.intercept_
plt.plot(x, coef_, colors[k], label="huber loss, %s" % epsilon)
# Fit a ridge regressor to compare it to huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.0, random_state=0, normalize=True)
ridge.fit(X, y)
coef_ridge = ridge.coef_
coef_ = ridge.coef_ * x + ridge.intercept_
plt.plot(x, coef_, 'g-', label="ridge regression")
plt.title("Comparison of HuberRegressor vs Ridge")
plt.xlabel("X")
plt.ylabel("y")
plt.legend(loc=0)
plt.show()
|
bsd-3-clause
|
gyglim/Recipes
|
papers/preactivation_and_wide_resnet/utils.py
|
3
|
5026
|
import random
import cPickle
import numpy as np
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
PIXELS = 32
PAD_CROP = 4
PAD_PIXELS = PIXELS + (PAD_CROP * 2)
imageSize = PIXELS * PIXELS
num_features = imageSize * 3
# ##################### Load data from CIFAR-10 dataset #######################
# this code assumes the cifar dataset from 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
# has been extracted in a 'data' folder within the working directory
def load_pickle_data_cv():
fo_1 = open('data/cifar-10-batches-py/data_batch_1', 'rb')
fo_2 = open('data/cifar-10-batches-py/data_batch_2', 'rb')
fo_3 = open('data/cifar-10-batches-py/data_batch_3', 'rb')
fo_4 = open('data/cifar-10-batches-py/data_batch_4', 'rb')
fo_5 = open('data/cifar-10-batches-py/data_batch_5', 'rb')
dict_1 = cPickle.load(fo_1)
fo_1.close()
dict_2 = cPickle.load(fo_2)
fo_2.close()
dict_3 = cPickle.load(fo_3)
fo_3.close()
dict_4 = cPickle.load(fo_4)
fo_4.close()
dict_5 = cPickle.load(fo_5)
fo_5.close()
data_1 = dict_1['data']
data_2 = dict_2['data']
data_3 = dict_3['data']
data_4 = dict_4['data']
data_5 = dict_5['data']
labels_1 = dict_1['labels']
labels_2 = dict_2['labels']
labels_3 = dict_3['labels']
labels_4 = dict_4['labels']
labels_5 = dict_5['labels']
X_train = np.vstack((data_1, data_2, data_3, data_4, data_5))
y_train = np.hstack((labels_1, labels_2, labels_3, labels_4, labels_5)).astype('int32')
X_train, y_train = shuffle(X_train, y_train)
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.1)
X_train = X_train.reshape(X_train.shape[0], 3, PIXELS, PIXELS).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 3, PIXELS, PIXELS).astype('float32')
# subtract per-pixel mean
pixel_mean = np.mean(X_train, axis=0)
print pixel_mean
np.save('data/pixel_mean.npy', pixel_mean)
X_train -= pixel_mean
X_test -= pixel_mean
return X_train, X_test, y_train, y_test
def load_pickle_data_test():
fo_test = open('data/cifar-10-batches-py/test_batch', 'rb')
dict_test = cPickle.load(fo_test)
fo_test.close()
test_X = dict_test['data']
test_y = dict_test['labels']
test_y = np.hstack(test_y).astype('int32')
test_X = test_X.reshape(test_X.shape[0], 3, PIXELS, PIXELS).astype('float32')
pixel_mean = np.load('data/pixel_mean.npy')
test_X -= pixel_mean
return test_X, test_y
def batch_iterator_train_crop_flip(data, y, batchsize, train_fn):
'''
Data augmentation batch iterator for feeding images into CNN.
Pads each image with 4 pixels on every side.
Randomly crops image with original image shape from padded image. Effectively translating it.
Flips image lr with probability 0.5.
'''
n_samples = data.shape[0]
# Shuffles indicies of training data, so we can draw batches from random indicies instead of shuffling whole data
indx = np.random.permutation(xrange(n_samples))
loss = []
acc_train = 0.
for i in range((n_samples + batchsize - 1) // batchsize):
sl = slice(i * batchsize, (i + 1) * batchsize)
X_batch = data[indx[sl]]
y_batch = y[indx[sl]]
# pad and crop settings
trans_1 = random.randint(0, (PAD_CROP*2))
trans_2 = random.randint(0, (PAD_CROP*2))
crop_x1 = trans_1
crop_x2 = (PIXELS + trans_1)
crop_y1 = trans_2
crop_y2 = (PIXELS + trans_2)
# flip left-right choice
flip_lr = random.randint(0,1)
# set empty copy to hold augmented images so that we don't overwrite
X_batch_aug = np.copy(X_batch)
# for each image in the batch do the augmentation
for j in range(X_batch.shape[0]):
# for each image channel
for k in range(X_batch.shape[1]):
# pad and crop images
img_pad = np.pad(X_batch_aug[j,k], pad_width=((PAD_CROP,PAD_CROP), (PAD_CROP,PAD_CROP)), mode='constant')
X_batch_aug[j,k] = img_pad[crop_x1:crop_x2, crop_y1:crop_y2]
# flip left-right if chosen
if flip_lr == 1:
X_batch_aug[j,k] = np.fliplr(X_batch_aug[j,k])
# fit model on each batch
loss.append(train_fn(X_batch_aug, y_batch))
return np.mean(loss)
def batch_iterator_valid(data_test, y_test, batchsize, valid_fn):
'''
Batch iterator for fine tuning network, no augmentation.
'''
n_samples_valid = data_test.shape[0]
loss_valid = []
acc_valid = []
for i in range((n_samples_valid + batchsize - 1) // batchsize):
sl = slice(i * batchsize, (i + 1) * batchsize)
X_batch_test = data_test[sl]
y_batch_test = y_test[sl]
loss_vv, acc_vv = valid_fn(X_batch_test, y_batch_test)
loss_valid.append(loss_vv)
acc_valid.append(acc_vv)
return np.mean(loss_valid), np.mean(acc_valid)
|
mit
|
siutanwong/scikit-learn
|
sklearn/tests/test_base.py
|
216
|
7045
|
# Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
|
bsd-3-clause
|
gigglesninja/senior-design
|
MissionPlanner/Lib/site-packages/scipy/misc/common.py
|
53
|
10116
|
"""
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from numpy import exp, asarray, arange, newaxis, hstack, product, array, \
where, zeros, extract, place, pi, sqrt, eye, poly1d, dot, r_
__all__ = ['factorial','factorial2','factorialk','comb',
'central_diff_weights', 'derivative', 'pade', 'lena']
# XXX: the factorial functions could move to scipy.special, and the others
# to numpy perhaps?
def factorial(n,exact=0):
"""
The factorial function, n! = special.gamma(n+1).
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
- Array argument accepted only for exact=0 case.
- If n<0, the return value is 0.
Parameters
----------
n : int or array_like of ints
Calculate ``n!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above. If `exact` is set to True, calculate the
answer exactly using integer arithmetic. Default is False.
Returns
-------
nf : float or int
Factorial of `n`, as an integer or a float depending on `exact`.
Examples
--------
>>> arr = np.array([3,4,5])
>>> sc.factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> sc.factorial(5, exact=True)
120L
"""
if exact:
if n < 0:
return 0L
val = 1L
for k in xrange(1,n+1):
val *= k
return val
else:
from scipy import special
n = asarray(n)
sv = special.errprint(0)
vals = special.gamma(n+1)
sv = special.errprint(sv)
return where(n>=0,vals,0)
def factorial2(n, exact=False):
"""
Double factorial.
This is the factorial with every second value skipped, i.e.,
``7!! = 7 * 5 * 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0L
if n <= 0:
return 1L
val = 1L
for k in xrange(n,0,-2):
val *= k
return val
else:
from scipy import special
n = asarray(n)
vals = zeros(n.shape,'d')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1,n)
evenn = extract(cond2,n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals,cond1,special.gamma(nd2o+1)/sqrt(pi)*pow(2.0,nd2o+0.5))
place(vals,cond2,special.gamma(nd2e+1) * pow(2.0,nd2e))
return vals
def factorialk(n,k,exact=1):
"""
n(!!...!) = multifactorial of order k
k times
Parameters
----------
n : int, array_like
Calculate multifactorial. Arrays are only supported with exact
set to False. If n < 0, the return value is 0.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multi factorial of n.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> sc.factorialk(5, 1, exact=True)
120L
>>> sc.factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0L
if n<=0:
return 1L
val = 1L
for j in xrange(n,0,-k):
val = val*j
return val
else:
raise NotImplementedError
def comb(N,k,exact=0):
"""
The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, array
Number of things.
k : int, array
Number of elements taken.
exact : int, optional
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, array
The total number of combinations.
Notes
-----
- Array arguments accepted only for exact=0 case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> sc.comb(n, k, exact=False)
array([ 120., 210.])
>>> sc.comb(10, 3, exact=True)
120L
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0L
val = 1L
for j in xrange(min(k, N-k)):
val = (val*(N-j))//(j+1)
return val
else:
from scipy import special
k,N = asarray(k), asarray(N)
lgam = special.gammaln
cond = (k <= N) & (N >= 0) & (k >= 0)
sv = special.errprint(0)
vals = exp(lgam(N+1) - lgam(N-k+1) - lgam(k+1))
sv = special.errprint(sv)
return where(cond, vals, 0.0)
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative of order ndiv
assuming equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at point x0.
Given a function, use a central difference formula with spacing `dx` to
compute the n-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which nth derivative is found.
dx : int, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> def x2(x):
... return x*x
...
>>> derivative(x2, 2)
4.0
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n==1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n==2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / product((dx,)*n,axis=0)
def pade(an, m):
"""Given Taylor series coefficients in an, return a Pade approximation to
the function as the ratio of two polynomials p / q where the order of q is m.
"""
from scipy import linalg
an = asarray(an)
N = len(an) - 1
n = N-m
if (n < 0):
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
Akj = eye(N+1,n+1)
Bkj = zeros((N+1,m),'d')
for row in range(1,m+1):
Bkj[row,:row] = -(an[:row])[::-1]
for row in range(m+1,N+1):
Bkj[row,:] = -(an[row-m:row])[::-1]
C = hstack((Akj,Bkj))
pq = dot(linalg.inv(C),an)
p = pq[:n+1]
q = r_[1.0,pq[n+1:]]
return poly1d(p[::-1]), poly1d(q[::-1])
def lena():
"""
Get classic image processing example image, Lena, at 8-bit grayscale
bit-depth, 512 x 512 size.
Parameters
----------
None
Returns
-------
lena : ndarray
Lena image
Examples
--------
>>> import scipy.misc
>>> lena = scipy.misc.lena()
>>> lena.shape
(512, 512)
>>> lena.max()
245
>>> lena.dtype
dtype('int32')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(lena)
>>> plt.show()
"""
import cPickle, os
fname = os.path.join(os.path.dirname(__file__),'lena.dat')
f = open(fname,'rb')
lena = array(cPickle.load(f))
f.close()
return lena
|
gpl-2.0
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/tensorflow/models/skip_thoughts/skip_thoughts/vocabulary_expansion.py
|
18
|
7370
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute an expanded vocabulary of embeddings using a word2vec model.
This script loads the word embeddings from a trained skip-thoughts model and
from a trained word2vec model (typically with a larger vocabulary). It trains a
linear regression model without regularization to learn a linear mapping from
the word2vec embedding space to the skip-thoughts embedding space. The model is
then applied to all words in the word2vec vocabulary, yielding vectors in the
skip-thoughts word embedding space for the union of the two vocabularies.
The linear regression task is to learn a parameter matrix W to minimize
|| X - Y * W ||^2,
where X is a matrix of skip-thoughts embeddings of shape [num_words, dim1],
Y is a matrix of word2vec embeddings of shape [num_words, dim2], and W is a
matrix of shape [dim2, dim1].
This is based on the "Translation Matrix" method from the paper:
"Exploiting Similarities among Languages for Machine Translation"
Tomas Mikolov, Quoc V. Le, Ilya Sutskever
https://arxiv.org/abs/1309.4168
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import gensim.models
import numpy as np
import sklearn.linear_model
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("skip_thoughts_model", None,
"Checkpoint file or directory containing a checkpoint "
"file.")
tf.flags.DEFINE_string("skip_thoughts_vocab", None,
"Path to vocabulary file containing a list of newline-"
"separated words where the word id is the "
"corresponding 0-based index in the file.")
tf.flags.DEFINE_string("word2vec_model", None,
"File containing a word2vec model in binary format.")
tf.flags.DEFINE_string("output_dir", None, "Output directory.")
tf.logging.set_verbosity(tf.logging.INFO)
def _load_skip_thoughts_embeddings(checkpoint_path):
"""Loads the embedding matrix from a skip-thoughts model checkpoint.
Args:
checkpoint_path: Model checkpoint file or directory containing a checkpoint
file.
Returns:
word_embedding: A numpy array of shape [vocab_size, embedding_dim].
Raises:
ValueError: If no checkpoint file matches checkpoint_path.
"""
if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_file = tf.train.latest_checkpoint(checkpoint_path)
if not checkpoint_file:
raise ValueError("No checkpoint file found in %s" % checkpoint_path)
else:
checkpoint_file = checkpoint_path
tf.logging.info("Loading skip-thoughts embedding matrix from %s",
checkpoint_file)
reader = tf.train.NewCheckpointReader(checkpoint_file)
word_embedding = reader.get_tensor("word_embedding")
tf.logging.info("Loaded skip-thoughts embedding matrix of shape %s",
word_embedding.shape)
return word_embedding
def _load_vocabulary(filename):
"""Loads a vocabulary file.
Args:
filename: Path to text file containing newline-separated words.
Returns:
vocab: A dictionary mapping word to word id.
"""
tf.logging.info("Reading vocabulary from %s", filename)
vocab = collections.OrderedDict()
with tf.gfile.GFile(filename, mode="r") as f:
for i, line in enumerate(f):
word = line.decode("utf-8").strip()
assert word not in vocab, "Attempting to add word twice: %s" % word
vocab[word] = i
tf.logging.info("Read vocabulary of size %d", len(vocab))
return vocab
def _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab, word2vec):
"""Runs vocabulary expansion on a skip-thoughts model using a word2vec model.
Args:
skip_thoughts_emb: A numpy array of shape [skip_thoughts_vocab_size,
skip_thoughts_embedding_dim].
skip_thoughts_vocab: A dictionary of word to id.
word2vec: An instance of gensim.models.Word2Vec.
Returns:
combined_emb: A dictionary mapping words to embedding vectors.
"""
# Find words shared between the two vocabularies.
tf.logging.info("Finding shared words")
shared_words = [w for w in word2vec.vocab if w in skip_thoughts_vocab]
# Select embedding vectors for shared words.
tf.logging.info("Selecting embeddings for %d shared words", len(shared_words))
shared_st_emb = skip_thoughts_emb[[
skip_thoughts_vocab[w] for w in shared_words
]]
shared_w2v_emb = word2vec[shared_words]
# Train a linear regression model on the shared embedding vectors.
tf.logging.info("Training linear regression model")
model = sklearn.linear_model.LinearRegression()
model.fit(shared_w2v_emb, shared_st_emb)
# Create the expanded vocabulary.
tf.logging.info("Creating embeddings for expanded vocabuary")
combined_emb = collections.OrderedDict()
for w in word2vec.vocab:
# Ignore words with underscores (spaces).
if "_" not in w:
w_emb = model.predict(word2vec[w].reshape(1, -1))
combined_emb[w] = w_emb.reshape(-1)
for w in skip_thoughts_vocab:
combined_emb[w] = skip_thoughts_emb[skip_thoughts_vocab[w]]
tf.logging.info("Created expanded vocabulary of %d words", len(combined_emb))
return combined_emb
def main(unused_argv):
if not FLAGS.skip_thoughts_model:
raise ValueError("--skip_thoughts_model is required.")
if not FLAGS.skip_thoughts_vocab:
raise ValueError("--skip_thoughts_vocab is required.")
if not FLAGS.word2vec_model:
raise ValueError("--word2vec_model is required.")
if not FLAGS.output_dir:
raise ValueError("--output_dir is required.")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load the skip-thoughts embeddings and vocabulary.
skip_thoughts_emb = _load_skip_thoughts_embeddings(FLAGS.skip_thoughts_model)
skip_thoughts_vocab = _load_vocabulary(FLAGS.skip_thoughts_vocab)
# Load the Word2Vec model.
word2vec = gensim.models.Word2Vec.load_word2vec_format(
FLAGS.word2vec_model, binary=True)
# Run vocabulary expansion.
embedding_map = _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab,
word2vec)
# Save the output.
vocab = embedding_map.keys()
vocab_file = os.path.join(FLAGS.output_dir, "vocab.txt")
with tf.gfile.GFile(vocab_file, "w") as f:
f.write("\n".join(vocab))
tf.logging.info("Wrote vocabulary file to %s", vocab_file)
embeddings = np.array(embedding_map.values())
embeddings_file = os.path.join(FLAGS.output_dir, "embeddings.npy")
np.save(embeddings_file, embeddings)
tf.logging.info("Wrote embeddings file to %s", embeddings_file)
if __name__ == "__main__":
tf.app.run()
|
bsd-2-clause
|
gmierz/pupil-lib
|
pupillib/core/workers/processors/trigger_processor.py
|
1
|
9947
|
'''
(*)~---------------------------------------------------------------------------
This file is part of Pupil-lib.
Pupil-lib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Pupil-lib is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Pupil-lib. If not, see <https://www.gnu.org/licenses/>.
Copyright (C) 2018 Gregory W. Mierzwinski
---------------------------------------------------------------------------~(*)
'''
import os
import threading
import numpy as np
from pupillib.core.utilities.MPLogger import MultiProcessingLog
from pupillib.core.workers.processors.decorator_registrar import *
# Imports for pre and post processing functions go below this line and above
# the end line below. This is the recommended method of adding new and long
# pre and post processing functions. Import them from the folder and run
# them with some sort of main function. Also, they must only ever accept two
# parameters. Use the config dictionary to modify what you get without
# complicating the code.
#
# --------------------------- Imports start line ----------------------------#
from pupillib.core.workers.processors.processing_functions.testing_functions import *
from matplotlib import pyplot as plt
from pupillib.core.utilities.utilities import *
# --------------------------- Imports end line ----------------------------#
class TriggerDefaults():
@staticmethod
def pre_defaults():
return []
@staticmethod
def post_defaults():
return [{'name': 'custom_resample', 'config': [{'srate': 256}]},
{'name': 'rm_baseline', 'config': []},
{'name': 'get_percent_change', 'config': []}]
class TriggerProcessor():
def __init__(self):
self.logger = MultiProcessingLog.get_logger()
pre = makeregistrar()
post = makeregistrar()
@pre
def tester(trigger_data, config):
print('helloooooo')
@post
def tester2(trigger_data, config):
print('done.')
@post
def tester3(trigger_data, config):
a_test_to_do('Print this!')
# Testing/demo function.
@pre
def get_sums(trigger_data, config):
args = config['config']
args1 = args[0]['srate']
print('get_sums got: ' + str(args1))
print('Result: ' + str(int(args1) + 10))
return trigger_data
@post
def custom_resample(trigger_data, config):
args = config['config']
logger = MultiProcessingLog.get_logger()
testing = trigger_data['config']['testing']
# Get srate
srate = args[0]['srate']
proc_trial_data = trigger_data['trials']
proc_trial_data = {
trial_name: trial_info for trial_name, trial_info in proc_trial_data.items()
if 'trial' in trial_info
and 'timestamps' in trial_info['trial']
and 'data' in trial_info['trial']
and len(trial_info['trial']['timestamps']) > 0
and len(trial_info['trial']['data']) > 0
}
if len(proc_trial_data) <= 0:
return trigger_data
for trial_num, trial_info in proc_trial_data.items():
if 'reject' in trial_info and trial_info['reject']:
continue
times = trial_info['trial']['timestamps']
stimes = np.asarray(times) - times[0]
new_xrange = np.linspace(stimes[0], stimes[-1], num=srate*(stimes[-1]-stimes[0]))
trial_info['trial']['data'] = np.interp(
new_xrange,
stimes,
trial_info['trial']['data']
)
trial_info['trial']['timestamps'] = new_xrange
if 'trial_proc' in trial_info:
trial_info['trial_proc']['data'] = np.interp(
new_xrange,
stimes,
trial_info['trial_proc']['data']
)
trial_info['trial_proc']['timestamps'] = new_xrange
return trigger_data
# This function should only be run after the
# custom resampling phase.
@post
def rm_baseline(trigger_data, config):
args = config['config']
logger = MultiProcessingLog.get_logger()
testing = trigger_data['config']['testing']
proc_trial_data = trigger_data['trials']
new_trial_data = copy.deepcopy(proc_trial_data)
baseline_range = trigger_data['config']['baseline']
if not baseline_range:
return trigger_data
for trial_num, trial_info in proc_trial_data.items():
if 'baseline_mean' in new_trial_data[trial_num]:
continue
times = copy.deepcopy(trial_info['trial']['timestamps'])
data = copy.deepcopy(trial_info['trial']['data'])
# Subtract initial
times = times - times[0]
total_time = times[-1]
# Check to make sure the baseline range is OK.
if baseline_range[0] < times[0]:
raise Exception("Error: Cannot have a negative baseline range start. All trials start at 0. ")
if baseline_range[1] > total_time:
raise Exception("Error: Cannot have a baseline range that exceeds the total time of the trial. ")
# Get the initial point, then the final point, with all points in
# between as the baseline mean for each trial.
bmean = 0
pcount = 0
found_first = False
for time_ind in range(len(times)-1):
# While we have not found the first point, continue looking
if not found_first:
if times[time_ind] <= baseline_range[0] < times[time_ind+1]:
pcount += 1
if baseline_range[0] == times[time_ind]:
bmean += data[time_ind]
else:
bmean += linear_approx(data[time_ind], times[time_ind],
data[time_ind+1], times[time_ind+1],
baseline_range[0])
found_first = True
continue
# Check if we have the final point area, if we do, get it and
# finish looking for points.
if times[time_ind] <= baseline_range[1] < times[time_ind+1]:
pcount += 1
if baseline_range[1] == times[time_ind]:
bmean += data[time_ind]
else:
bmean += linear_approx(data[time_ind], times[time_ind],
data[time_ind + 1], times[time_ind + 1],
baseline_range[1])
break
# We get here when we're in between the first and final points.
pcount += 1
bmean += data[time_ind]
# For each trial, calculate the baseline removed data and store the baseline mean.
new_trial_data[trial_num]['baseline_mean'] = bmean/pcount
new_trial_data[trial_num]['trial_rmbaseline']['data'] = data - new_trial_data[trial_num]['baseline_mean']
trigger_data['trials'] = new_trial_data
return trigger_data
# Calculates the percent change data for each trial.
@post
def get_percent_change(trigger_data, config):
proc_trial_data = trigger_data['trials']
pcs = {}
if not trigger_data['config']['baseline']:
return trigger_data
# Get the baseline means if it wasn't already calculated.
if len(proc_trial_data) > 0:
for trial_num, trial_info in proc_trial_data.items():
if 'baseline_mean' not in trial_info:
trigger_data = rm_baseline(trigger_data, {})
break
proc_trial_data = trigger_data['trials']
for trial_num, trial_info in proc_trial_data.items():
bmean = trial_info['baseline_mean']
data = copy.deepcopy(trial_info['trial_rmbaseline']['data'])
if bmean and bmean != 0:
pcs[trial_num] = data / bmean
else:
self.logger.send('WARNING', 'Baseline mean is 0 or undefined for a trial for name: ' + trial_info['config']['name'],
os.getpid(), threading.get_ident())
self.logger.send('WARNING', 'Not computing percent change for name: ' + trial_info['config']['name'],
os.getpid(), threading.get_ident())
pcs[trial_num] = data
trigger_data['trials'][trial_num]['reject'] = True
for trial_num in pcs:
trigger_data['trials'][trial_num]['trial_pc']['data'] = pcs[trial_num]
return trigger_data
self.pre_processing = pre
self.post_processing = post
|
gpl-3.0
|
rdipietro/tensorflow
|
tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py
|
82
|
6157
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + list(self._data.keys()))
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
|
apache-2.0
|
pratapvardhan/pandas
|
pandas/core/sorting.py
|
2
|
16139
|
""" miscellaneous sorting / groupby utilities """
import numpy as np
from pandas.compat import long, string_types, PY3
from pandas.core.dtypes.common import (
_ensure_platform_int,
_ensure_int64,
is_list_like,
is_categorical_dtype)
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algorithms
from pandas._libs import lib, algos, hashtable
from pandas._libs.hashtable import unique_label_indices
_INT64_MAX = np.iinfo(np.int64).max
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = long(1)
for i, mul in enumerate(shape):
acc *= long(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def maybe_lift(lab, size):
# promote nan values (assigned -1 label in lab array)
# so that all output values are non-negative
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(_ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
labels = list(labels)
shape = list(shape)
# Iteratively process all the labels in chunks sized so less
# than _INT64_MAX unique int ids will be required for each chunk
while True:
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
break
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return out
def get_compressed_ids(labels, sizes):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
Parameters
----------
labels : list of label arrays
sizes : list of size of the levels
Returns
-------
tuple of (comp_ids, obs_group_ids)
"""
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return compress_group_index(ids, sort=True)
def is_int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *= long(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
if is_int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError('cannot deconstruct factorized group indices!')
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull):
"""
reconstruct labels from observed group ids
Parameters
----------
xnull: boolean,
if nulls are excluded; i.e. -1 labels are passed through
"""
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8')
shape = np.asarray(shape, dtype='i8') + lift
if not is_int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() \
else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels]
def indexer_from_factorized(labels, shape, compress=True):
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = compress_group_index(ids, sort=True)
ngroups = len(obs)
return get_group_index_sorter(ids, ngroups)
def lexsort_indexer(keys, orders=None, na_position='last'):
from pandas.core.arrays import Categorical
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
for key, order in zip(keys, orders):
# we are already a Categorical
if is_categorical_dtype(key):
c = key
# create the Categorical
else:
c = Categorical(key, ordered=True)
if na_position not in ['last', 'first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
n = len(c.categories)
codes = c.codes.copy()
mask = (c.codes == -1)
if order: # ascending
if na_position == 'last':
codes = np.where(mask, n, codes)
elif na_position == 'first':
codes += 1
else: # not order means descending
if na_position == 'last':
codes = np.where(mask, n, n - codes - 1)
elif na_position == 'first':
codes = np.where(mask, 0, n - codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return indexer_from_factorized(labels, shape)
def nargsort(items, kind='quicksort', ascending=True, na_position='last'):
"""
This is intended to be a drop-in replacement for np.argsort which
handles NaNs. It adds ascending and na_position parameters.
GH #6399, #5231
"""
# specially handle Categorical
if is_categorical_dtype(items):
return items.argsort(ascending=ascending, kind=kind)
items = np.asanyarray(items)
idx = np.arange(len(items))
mask = isna(items)
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to
# na_position
if na_position == 'last':
indexer = np.concatenate([indexer, nan_idx])
elif na_position == 'first':
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
return indexer
class _KeyMapper(object):
"""
Ease my suffering. Map compressed group id -> key tuple
"""
def __init__(self, comp_ids, ngroups, levels, labels):
self.levels = levels
self.labels = labels
self.comp_ids = comp_ids.astype(np.int64)
self.k = len(labels)
self.tables = [hashtable.Int64HashTable(ngroups)
for _ in range(self.k)]
self._populate_tables()
def _populate_tables(self):
for labs, table in zip(self.labels, self.tables):
table.map(self.comp_ids, labs.astype(np.int64))
def get_key(self, comp_id):
return tuple(level[table.get_item(comp_id)]
for table, level in zip(self.tables, self.levels))
def get_flattened_iterator(comp_ids, ngroups, levels, labels):
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, levels, labels)
return [mapper.get_key(i) for i in range(ngroups)]
def get_indexer_dict(label_list, keys):
""" return a diction of {labels} -> {indexers} """
shape = list(map(len, keys))
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
ngroups = ((group_index.size and group_index.max()) + 1) \
if is_int64_overflow_possible(shape) \
else np.prod(shape, dtype='i8')
sorter = get_group_index_sorter(group_index, ngroups)
sorted_labels = [lab.take(sorter) for lab in label_list]
group_index = group_index.take(sorter)
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
# ----------------------------------------------------------------------
# sorting levels...cleverly?
def get_group_index_sorter(group_index, ngroups):
"""
algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
length of the data-frame;
Both algorithms are `stable` sort and that is necessary for correctness of
groupby operations. e.g. consider:
df.groupby(key)[col].transform('first')
"""
count = len(group_index)
alpha = 0.0 # taking complexities literally; there may be
beta = 1.0 # some room for fine-tuning these parameters
do_groupsort = (count > 0 and ((alpha + beta * ngroups) <
(count * np.log(count))))
if do_groupsort:
sorter, _ = algos.groupsort_indexer(_ensure_int64(group_index),
ngroups)
return _ensure_platform_int(sorter)
else:
return group_index.argsort(kind='mergesort')
def compress_group_index(group_index, sort=True):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT)
table = hashtable.Int64HashTable(size_hint)
group_index = _ensure_int64(group_index)
# note, group labels come out ascending (ie, 1,2,3 etc)
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
if sort and len(obs_group_ids) > 0:
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
return comp_ids, obs_group_ids
def _reorder_by_uniques(uniques, labels):
# sorter is index where elements ought to go
sorter = uniques.argsort()
# reverse_indexer is where elements came from
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
# move labels to right locations (ie, unsort ascending labels)
labels = algorithms.take_nd(reverse_indexer, labels, allow_fill=False)
np.putmask(labels, mask, -1)
# sort observed ids
uniques = algorithms.take_nd(uniques, sorter, allow_fill=False)
return uniques, labels
def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False):
"""
Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
----------
values : list-like
Sequence; must be unique if ``labels`` is not None.
labels : list_like
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``labels`` to mark "not found".
Ignored when ``labels`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``labels`` is None.
Returns
-------
ordered : ndarray
Sorted ``values``
new_labels : ndarray
Reordered ``labels``; returned when ``labels`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``labels`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``labels`` is not None and ``values`` contain duplicates.
"""
if not is_list_like(values):
raise TypeError("Only list-like objects are allowed to be passed to"
"safe_sort as values")
if not isinstance(values, np.ndarray):
# don't convert to string types
dtype, _ = infer_dtype_from_array(values)
values = np.asarray(values, dtype=dtype)
def sort_mixed(values):
# order ints before strings, safe in py3
str_pos = np.array([isinstance(x, string_types) for x in values],
dtype=bool)
nums = np.sort(values[~str_pos])
strs = np.sort(values[str_pos])
return np.concatenate([nums, np.asarray(strs, dtype=object)])
sorter = None
if PY3 and lib.infer_dtype(values) == 'mixed-integer':
# unorderable in py3 if mixed str/int
ordered = sort_mixed(values)
else:
try:
sorter = values.argsort()
ordered = values.take(sorter)
except TypeError:
# try this anyway
ordered = sort_mixed(values)
# labels:
if labels is None:
return ordered
if not is_list_like(labels):
raise TypeError("Only list-like objects or None are allowed to be"
"passed to safe_sort as labels")
labels = _ensure_platform_int(np.asarray(labels))
from pandas import Index
if not assume_unique and not Index(values).is_unique:
raise ValueError("values should be unique if labels is not None")
if sorter is None:
# mixed types
(hash_klass, _), values = algorithms._get_data_algo(
values, algorithms._hashtables)
t = hash_klass(len(values))
t.map_locations(values)
sorter = _ensure_platform_int(t.lookup(ordered))
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = (labels < -len(values)) | (labels >= len(values)) | \
(labels == na_sentinel)
# (Out of bound indices will be masked with `na_sentinel` next, so we may
# deal with them here without performance loss using `mode='wrap'`.)
new_labels = reverse_indexer.take(labels, mode='wrap')
np.putmask(new_labels, mask, na_sentinel)
return ordered, _ensure_platform_int(new_labels)
|
bsd-3-clause
|
IDEALLab/design_embeddings_jmd_2016
|
3d_visual.py
|
1
|
1470
|
"""
Visualizes design parameters in a 3D space.
Author(s): Wei Chen ([email protected])
"""
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
from sklearn.metrics import mean_squared_error
import numpy as np
from sklearn.manifold import Isomap
from util import pick_k
from parametric_space import initialize
X = initialize(raw_data=1)
pca = PCA(n_components=3)
F = pca.fit_transform(X)
# Reconstruction error
X_rec = pca.inverse_transform(F)
err = mean_squared_error(X, X_rec)
print 'Reconstruct error: ', err
#k_opt = pick_k(X, 3)
#F = Isomap(n_neighbors=k_opt, n_components=3).fit_transform(X)
# 3D Plot
fig3d = plt.figure()
ax3d = fig3d.add_subplot(111, projection = '3d')
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([F[:,0].max()-F[:,0].min(), F[:,1].max()-F[:,1].min(), F[:,2].max()-F[:,2].min()]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(F[:,0].max()+F[:,0].min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(F[:,1].max()+F[:,1].min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(F[:,2].max()+F[:,2].min())
ax3d.scatter(Xb, Yb, Zb, c='white', alpha=0)
ax3d.scatter(F[:,0], F[:,1], F[:,2])
import matplotlib
matplotlib.rcParams.update({'font.size': 22})
ax3d.set_xticks([])
ax3d.set_yticks([])
ax3d.set_zticks([])
plt.show()
|
mit
|
lewisc/spark-tk
|
regression-tests/sparktkregtests/testcases/models/naive_bayes_test.py
|
10
|
6233
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests Naive Bayes Model against known values. """
import unittest
from sparktkregtests.lib import sparktk_test
from pyspark import SparkContext
from pyspark.mllib import classification
from pyspark.mllib.regression import LabeledPoint
class NaiveBayes(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build the frames needed for the tests."""
super(NaiveBayes, self).setUp()
dataset = self.get_file("naive_bayes.csv")
schema = [("label", int),
("f1", int),
("f2", int),
("f3", int)]
self.frame = self.context.frame.import_csv(dataset, schema=schema)
def test_model_train_empty_feature(self):
"""Test empty string for training features throws errors."""
with self.assertRaisesRegexp(Exception,
"observationColumn must not be null nor empty"):
self.context.models.classification.naive_bayes.train(self.frame,
"",
"label")
def test_model_train_empty_label_coloum(self):
"""Test empty string for label coloum throws error."""
with self.assertRaisesRegexp(Exception,
"labelColumn must not be null nor empty"):
self.context.models.classification.naive_bayes.train(self.frame,
"['f1', 'f2', 'f3']",
"")
def test_model_test(self):
"""Test training intializes theta, pi and labels"""
model = self.context.models.classification.naive_bayes.train(self.frame,
['f1', 'f2', 'f3'],
"label")
res = model.test(self.frame)
true_pos = float(res.confusion_matrix["Predicted_Pos"]["Actual_Pos"])
false_neg = float(res.confusion_matrix["Predicted_Neg"]["Actual_Pos"])
false_pos = float(res.confusion_matrix["Predicted_Pos"]["Actual_Neg"])
true_neg = float(res.confusion_matrix["Predicted_Neg"]["Actual_Neg"])
recall = true_pos / (false_neg + true_pos)
precision = true_pos / (false_pos + true_pos)
f_measure = float(2) / (float(1/precision) + float(1/recall))
accuracy = float(true_pos + true_neg) / self.frame.count()
self.assertAlmostEqual(res.recall, recall)
self.assertAlmostEqual(res.precision, precision)
self.assertAlmostEqual(res.f_measure, f_measure)
self.assertAlmostEqual(res.accuracy, accuracy)
def test_model_publish_bayes(self):
"""Test training intializes theta, pi and labels"""
model = self.context.models.classification.naive_bayes.train(self.frame,
['f1', 'f2', 'f3'],
"label")
file_name = self.get_name("naive_bayes")
path = model.export_to_mar(self.get_export_file(file_name))
self.assertIn("hdfs", path)
self.assertIn("naive_bayes", path)
def test_model_test_paramater_initiation(self):
"""Test training intializes theta, pi and labels"""
# we will compare pyspark's result with sparktks for predict
# points will be an array of pyspark LabelPoints
points = []
# the location of the dataset
location = self.get_local_dataset("naive_bayes.csv")
# we have to build a dataset for pyspark
# pyspark expects an rdd of LabelPoints for
# its NaiveBayes model
with open(location, 'r') as datafile:
lines = datafile.read().split('\n')
dataset = []
# for each line, split into columns and
# create a label point object out of each line
for line in lines:
if line is not "":
line = map(int, line.split(","))
label = line[0]
features = line[1:4]
lp = LabeledPoint(label, features)
points.append(lp)
# use pyspark context to parallelize
dataframe = self.context.sc.parallelize(points)
# create a pyspark model from the data and a sparktk model
pyspark_model = classification.NaiveBayes.train(dataframe, 1.0)
model = self.context.models.classification.naive_bayes.train(self.frame,
['f1', 'f2', 'f3'],
"label")
# use our sparktk model to predict, download to pandas for
# ease of comparison
predicted_frame = model.predict(self.frame, ['f1', 'f2', 'f3'])
analysis = predicted_frame.to_pandas()
# iterate through the sparktk result and compare the prediction
# with pyspark's prediction
for index, row in analysis.iterrows():
# extract the features
features = [row["f1"], row["f2"], row["f3"]]
# use the features to get pyspark's result
pyspark_result = pyspark_model.predict(features)
self.assertEqual(row["predicted_class"], pyspark_result)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
annoviko/pyclustering
|
pyclustering/core/tests/__init__.py
|
1
|
3091
|
"""!
@brief Unit-test runner for core wrapper.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest
from pyclustering.tests.suite_holder import suite_holder
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.core.tests import ut_package as core_package_unit_tests
import os
import warnings
from pyclustering.core.definitions import PATH_PYCLUSTERING_CCORE_LIBRARY
from pyclustering.core.wrapper import ccore_library
class remove_library(object):
"""!
@brief Decorator for tests where ccore library should be removed.
"""
def __init__(self, call_object):
self.call_object = call_object
def __call__(self, *args):
test_result = True
try:
os.rename(PATH_PYCLUSTERING_CCORE_LIBRARY, PATH_PYCLUSTERING_CCORE_LIBRARY + "_corrupted")
warnings.filterwarnings("ignore", category=ResourceWarning)
ccore_library.initialize()
self.call_object(args)
except os.error:
warnings.warn("Test skipped: no rights to rename C/C++ pyclustering library for testing.")
return
except:
test_result = False
os.rename(PATH_PYCLUSTERING_CCORE_LIBRARY + "_corrupted", PATH_PYCLUSTERING_CCORE_LIBRARY)
ccore_library.initialize()
warnings.filterwarnings("default", category=ResourceWarning)
if test_result is False:
raise AssertionError("Test failed")
class corrupt_library(object):
"""!
@brief Decorator for tests where ccore library should be corrupted.
"""
def __init__(self, call_object):
self.call_object = call_object
def __create_corrupted_library(self, filepath):
with open(filepath, 'wb') as binary_file_descriptor:
binary_file_descriptor.write(bytes("corrupted binary library", 'UTF-8'))
def __remove_corrupted_library(self, filepath):
os.remove(filepath)
def __call__(self, *args):
try:
os.rename(PATH_PYCLUSTERING_CCORE_LIBRARY, PATH_PYCLUSTERING_CCORE_LIBRARY + "_corrupted")
except os.error:
warnings.warn("Test skipped: no rights to rename C/C++ pyclustering library for testing.")
return
self.__create_corrupted_library(PATH_PYCLUSTERING_CCORE_LIBRARY)
warnings.filterwarnings("ignore", category=ResourceWarning)
ccore_library.initialize()
self.call_object(args)
self.__remove_corrupted_library(PATH_PYCLUSTERING_CCORE_LIBRARY)
os.rename(PATH_PYCLUSTERING_CCORE_LIBRARY + "_corrupted", PATH_PYCLUSTERING_CCORE_LIBRARY)
ccore_library.initialize()
warnings.filterwarnings("default", category=ResourceWarning)
class core_tests(suite_holder):
def __init__(self):
super().__init__()
core_tests.fill_suite(self.get_suite())
@staticmethod
def fill_suite(core_suite):
core_suite.addTests(unittest.TestLoader().loadTestsFromModule(core_package_unit_tests))
|
gpl-3.0
|
cjayb/mne-python
|
mne/decoding/search_light.py
|
6
|
27438
|
# Author: Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from .mixin import TransformerMixin
from .base import BaseEstimator, _check_estimator
from ..fixes import _get_check_scoring
from ..parallel import parallel_func
from ..utils import (_validate_type, array_split_idx, ProgressBar,
verbose, fill_doc)
@fill_doc
class SlidingEstimator(BaseEstimator, TransformerMixin):
"""Search Light.
Fit, predict and score a series of models to each subset of the dataset
along the last dimension. Each entry in the last dimension is referred
to as a task.
Parameters
----------
base_estimator : object
The base estimator to iteratively fit on a subset of the dataset.
scoring : callable, str, default None
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
Note that the predict_method is automatically identified if scoring is
a string (e.g. scoring="roc_auc" calls predict_proba) but is not
automatically set if scoring is a callable (e.g.
scoring=sklearn.metrics.roc_auc_score).
%(n_jobs)s
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
%(verbose)s
Attributes
----------
estimators_ : array-like, shape (n_tasks,)
List of fitted scikit-learn estimators (one per task).
"""
def __init__(self, base_estimator, scoring=None, n_jobs=1,
verbose=None): # noqa: D102
_check_estimator(base_estimator)
self._estimator_type = getattr(base_estimator, "_estimator_type", None)
self.base_estimator = base_estimator
self.n_jobs = n_jobs
self.scoring = scoring
self.verbose = verbose
_validate_type(self.n_jobs, 'int', 'n_jobs')
def __repr__(self): # noqa: D105
repr_str = '<' + super(SlidingEstimator, self).__repr__()
if hasattr(self, 'estimators_'):
repr_str = repr_str[:-1]
repr_str += ', fitted with %i estimators' % len(self.estimators_)
return repr_str + '>'
@verbose # to use class value
def fit(self, X, y, **fit_params):
"""Fit a series of independent estimators to the dataset.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The training input samples. For each data slice, a clone estimator
is fitted independently. The feature dimension can be
multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks).
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
**fit_params : dict of string -> object
Parameters to pass to the fit method of the estimator.
Returns
-------
self : object
Return self.
"""
self._check_Xy(X, y)
self.estimators_ = list()
self.fit_params = fit_params
# For fitting, the parallelization is across estimators.
parallel, p_func, n_jobs = parallel_func(_sl_fit, self.n_jobs,
verbose=False)
n_jobs = min(n_jobs, X.shape[-1])
mesg = 'Fitting %s' % (self.__class__.__name__,)
with ProgressBar(X.shape[-1], mesg=mesg) as pb:
estimators = parallel(
p_func(self.base_estimator, split, y, pb.subset(pb_idx),
**fit_params)
for pb_idx, split in array_split_idx(X, n_jobs, axis=-1))
# Each parallel job can have a different number of training estimators
# We can't directly concatenate them because of sklearn's Bagging API
# (see scikit-learn #9720)
self.estimators_ = np.empty(X.shape[-1], dtype=object)
idx = 0
for job_estimators in estimators:
for est in job_estimators:
self.estimators_[idx] = est
idx += 1
return self
def fit_transform(self, X, y, **fit_params):
"""Fit and transform a series of independent estimators to the dataset.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The training input samples. For each task, a clone estimator
is fitted independently. The feature dimension can be
multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
**fit_params : dict of string -> object
Parameters to pass to the fit method of the estimator.
Returns
-------
y_pred : array, shape (n_samples, n_tasks) | (n_samples, n_tasks, n_targets)
The predicted values for each estimator.
""" # noqa: E501
return self.fit(X, y, **fit_params).transform(X)
@verbose # to use the class value
def _transform(self, X, method):
"""Aux. function to make parallel predictions/transformation."""
self._check_Xy(X)
method = _check_method(self.base_estimator, method)
if X.shape[-1] != len(self.estimators_):
raise ValueError('The number of estimators does not match '
'X.shape[-1]')
# For predictions/transforms the parallelization is across the data and
# not across the estimators to avoid memory load.
mesg = 'Transforming %s' % (self.__class__.__name__,)
parallel, p_func, n_jobs = parallel_func(
_sl_transform, self.n_jobs, verbose=False)
n_jobs = min(n_jobs, X.shape[-1])
X_splits = np.array_split(X, n_jobs, axis=-1)
idx, est_splits = zip(*array_split_idx(self.estimators_, n_jobs))
with ProgressBar(X.shape[-1], mesg=mesg) as pb:
y_pred = parallel(p_func(est, x, method, pb.subset(pb_idx))
for pb_idx, est, x in zip(
idx, est_splits, X_splits))
y_pred = np.concatenate(y_pred, axis=1)
return y_pred
def transform(self, X):
"""Transform each data slice/task with a series of independent estimators.
The number of tasks in X should match the number of tasks/estimators
given at fit time.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice/task, the corresponding
estimator makes a transformation of the data, e.g.
``[estimators[ii].transform(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks).
Returns
-------
Xt : array, shape (n_samples, n_estimators)
The transformed values generated by each estimator.
""" # noqa: E501
return self._transform(X, 'transform')
def predict(self, X):
"""Predict each data slice/task with a series of independent estimators.
The number of tasks in X should match the number of tasks/estimators
given at fit time.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
makes the sample predictions, e.g.:
``[estimators[ii].predict(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks).
Returns
-------
y_pred : array, shape (n_samples, n_estimators) | (n_samples, n_tasks, n_targets)
Predicted values for each estimator/data slice.
""" # noqa: E501
return self._transform(X, 'predict')
def predict_proba(self, X):
"""Predict each data slice with a series of independent estimators.
The number of tasks in X should match the number of tasks/estimators
given at fit time.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
makes the sample probabilistic predictions, e.g.:
``[estimators[ii].predict_proba(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks).
Returns
-------
y_pred : array, shape (n_samples, n_tasks, n_classes)
Predicted probabilities for each estimator/data slice/task.
""" # noqa: E501
return self._transform(X, 'predict_proba')
def decision_function(self, X):
"""Estimate distances of each data slice to the hyperplanes.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
outputs the distance to the hyperplane, e.g.:
``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators).
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2)
Predicted distances for each estimator/data slice.
Notes
-----
This requires base_estimator to have a ``decision_function`` method.
""" # noqa: E501
return self._transform(X, 'decision_function')
def _check_Xy(self, X, y=None):
"""Aux. function to check input data."""
if y is not None:
if len(X) != len(y) or len(y) < 1:
raise ValueError('X and y must have the same length.')
if X.ndim < 3:
raise ValueError('X must have at least 3 dimensions.')
def score(self, X, y):
"""Score each estimator on each task.
The number of tasks in X should match the number of tasks/estimators
given at fit time, i.e. we need
``X.shape[-1] == len(self.estimators_)``.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
scores the prediction, e.g.:
``[estimators[ii].score(X[..., ii], y) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks).
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_samples, n_estimators)
Score for each estimator/task.
""" # noqa: E501
check_scoring = _get_check_scoring()
self._check_Xy(X)
if X.shape[-1] != len(self.estimators_):
raise ValueError('The number of estimators does not match '
'X.shape[-1]')
scoring = check_scoring(self.base_estimator, self.scoring)
y = _fix_auc(scoring, y)
# For predictions/transforms the parallelization is across the data and
# not across the estimators to avoid memory load.
parallel, p_func, n_jobs = parallel_func(_sl_score, self.n_jobs)
n_jobs = min(n_jobs, X.shape[-1])
X_splits = np.array_split(X, n_jobs, axis=-1)
est_splits = np.array_split(self.estimators_, n_jobs)
score = parallel(p_func(est, scoring, x, y)
for (est, x) in zip(est_splits, X_splits))
score = np.concatenate(score, axis=0)
return score
@property
def classes_(self):
if not hasattr(self.estimators_[0], 'classes_'):
raise AttributeError('classes_ attribute available only if '
'base_estimator has it, and estimator %s does'
' not' % (self.estimators_[0],))
return self.estimators_[0].classes_
def _sl_fit(estimator, X, y, pb, **fit_params):
"""Aux. function to fit SlidingEstimator in parallel.
Fit a clone estimator to each slice of data.
Parameters
----------
base_estimator : object
The base estimator to iteratively fit on a subset of the dataset.
X : array, shape (n_samples, nd_features, n_estimators)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
y : array, shape (n_sample, )
The target values.
fit_params : dict | None
Parameters to pass to the fit method of the estimator.
Returns
-------
estimators_ : list of estimators
The fitted estimators.
"""
from sklearn.base import clone
estimators_ = list()
for ii in range(X.shape[-1]):
est = clone(estimator)
est.fit(X[..., ii], y, **fit_params)
estimators_.append(est)
pb.update(ii + 1)
return estimators_
def _sl_transform(estimators, X, method, pb):
"""Aux. function to transform SlidingEstimator in parallel.
Applies transform/predict/decision_function etc for each slice of data.
Parameters
----------
estimators : list of estimators
The fitted estimators.
X : array, shape (n_samples, nd_features, n_estimators)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
method : str
The estimator method to use (e.g. 'predict', 'transform').
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2)
The transformations for each slice of data.
""" # noqa: E501
for ii, est in enumerate(estimators):
transform = getattr(est, method)
_y_pred = transform(X[..., ii])
# Initialize array of predictions on the first transform iteration
if ii == 0:
y_pred = _sl_init_pred(_y_pred, X)
y_pred[:, ii, ...] = _y_pred
pb.update(ii + 1)
return y_pred
def _sl_init_pred(y_pred, X):
"""Aux. function to SlidingEstimator to initialize y_pred."""
n_sample, n_tasks = X.shape[0], X.shape[-1]
y_pred = np.zeros((n_sample, n_tasks) + y_pred.shape[1:], y_pred.dtype)
return y_pred
def _sl_score(estimators, scoring, X, y):
"""Aux. function to score SlidingEstimator in parallel.
Predict and score each slice of data.
Parameters
----------
estimators : list, shape (n_tasks,)
The fitted estimators.
X : array, shape (n_samples, nd_features, n_tasks)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
scoring : callable, str or None
If scoring is None (default), the predictions are internally
generated by estimator.score(). Else, we must first get the
predictions to pass them to ad-hoc scorer.
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_tasks,)
The score for each task / slice of data.
"""
n_tasks = X.shape[-1]
score = np.zeros(n_tasks)
for ii, est in enumerate(estimators):
score[ii] = scoring(est, X[..., ii], y)
return score
def _check_method(estimator, method):
"""Check that an estimator has the method attribute.
If method == 'transform' and estimator does not have 'transform', use
'predict' instead.
"""
if method == 'transform' and not hasattr(estimator, 'transform'):
method = 'predict'
if not hasattr(estimator, method):
ValueError('base_estimator does not have `%s` method.' % method)
return method
@fill_doc
class GeneralizingEstimator(SlidingEstimator):
"""Generalization Light.
Fit a search-light along the last dimension and use them to apply a
systematic cross-tasks generalization.
Parameters
----------
base_estimator : object
The base estimator to iteratively fit on a subset of the dataset.
scoring : callable | str | None
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
Note that the predict_method is automatically identified if scoring is
a string (e.g. scoring="roc_auc" calls predict_proba) but is not
automatically set if scoring is a callable (e.g.
scoring=sklearn.metrics.roc_auc_score).
%(n_jobs)s
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
%(verbose)s
"""
def __repr__(self): # noqa: D105
repr_str = super(GeneralizingEstimator, self).__repr__()
if hasattr(self, 'estimators_'):
repr_str = repr_str[:-1]
repr_str += ', fitted with %i estimators>' % len(self.estimators_)
return repr_str
@verbose # use class value
def _transform(self, X, method):
"""Aux. function to make parallel predictions/transformation."""
self._check_Xy(X)
method = _check_method(self.base_estimator, method)
mesg = 'Transforming %s' % (self.__class__.__name__,)
parallel, p_func, n_jobs = parallel_func(
_gl_transform, self.n_jobs, verbose=False)
n_jobs = min(n_jobs, X.shape[-1])
with ProgressBar(X.shape[-1] * len(self.estimators_), mesg=mesg) as pb:
y_pred = parallel(
p_func(self.estimators_, x_split, method, pb.subset(pb_idx))
for pb_idx, x_split in array_split_idx(
X, n_jobs, axis=-1, n_per_split=len(self.estimators_)))
y_pred = np.concatenate(y_pred, axis=2)
return y_pred
def transform(self, X):
"""Transform each data slice with all possible estimators.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The input samples. For estimator the corresponding data slice is
used to make a transformation. The feature dimension can be
multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators).
Returns
-------
Xt : array, shape (n_samples, n_estimators, n_slices)
The transformed values generated by each estimator.
"""
return self._transform(X, 'transform')
def predict(self, X):
"""Predict each data slice with all possible estimators.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. For each data slice, a fitted estimator
predicts each slice of the data independently. The feature
dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators).
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_slices) | (n_samples, n_estimators, n_slices, n_targets)
The predicted values for each estimator.
""" # noqa: E501
return self._transform(X, 'predict')
def predict_proba(self, X):
"""Estimate probabilistic estimates of each data slice with all possible estimators.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. For each data slice, a fitted estimator
predicts a slice of the data. The feature dimension can be
multidimensional e.g.
``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes)
The predicted values for each estimator.
Notes
-----
This requires base_estimator to have a `predict_proba` method.
""" # noqa: E501
return self._transform(X, 'predict_proba')
def decision_function(self, X):
"""Estimate distances of each data slice to all hyperplanes.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. Each estimator outputs the distance to
its hyperplane, e.g.:
``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes * (n_classes-1) // 2)
The predicted values for each estimator.
Notes
-----
This requires base_estimator to have a ``decision_function`` method.
""" # noqa: E501
return self._transform(X, 'decision_function')
@verbose # to use class value
def score(self, X, y):
"""Score each of the estimators on the tested dimensions.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The input samples. For each data slice, the corresponding estimator
scores the prediction, e.g.:
``[estimators[ii].score(X[..., ii], y) for ii in range(n_slices)]``.
The feature dimension can be multidimensional e.g.
``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_samples, n_estimators, n_slices)
Score for each estimator / data slice couple.
""" # noqa: E501
check_scoring = _get_check_scoring()
self._check_Xy(X)
# For predictions/transforms the parallelization is across the data and
# not across the estimators to avoid memory load.
mesg = 'Scoring %s' % (self.__class__.__name__,)
parallel, p_func, n_jobs = parallel_func(_gl_score, self.n_jobs,
verbose=False)
n_jobs = min(n_jobs, X.shape[-1])
scoring = check_scoring(self.base_estimator, self.scoring)
y = _fix_auc(scoring, y)
with ProgressBar(X.shape[-1] * len(self.estimators_), mesg=mesg) as pb:
score = parallel(p_func(self.estimators_, scoring, x, y,
pb.subset(pb_idx))
for pb_idx, x in array_split_idx(
X, n_jobs, axis=-1,
n_per_split=len(self.estimators_)))
score = np.concatenate(score, axis=1)
return score
def _gl_transform(estimators, X, method, pb):
"""Transform the dataset.
This will apply each estimator to all slices of the data.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. For each data slice, a clone estimator
is fitted independently. The feature dimension can be multidimensional
e.g. X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
Returns
-------
Xt : array, shape (n_samples, n_slices)
The transformed values generated by each estimator.
"""
n_sample, n_iter = X.shape[0], X.shape[-1]
for ii, est in enumerate(estimators):
# stack generalized data for faster prediction
X_stack = X.transpose(np.r_[0, X.ndim - 1, range(1, X.ndim - 1)])
X_stack = X_stack.reshape(np.r_[n_sample * n_iter, X_stack.shape[2:]])
transform = getattr(est, method)
_y_pred = transform(X_stack)
# unstack generalizations
if _y_pred.ndim == 2:
_y_pred = np.reshape(_y_pred, [n_sample, n_iter, _y_pred.shape[1]])
else:
shape = np.r_[n_sample, n_iter, _y_pred.shape[1:]].astype(int)
_y_pred = np.reshape(_y_pred, shape)
# Initialize array of predictions on the first transform iteration
if ii == 0:
y_pred = _gl_init_pred(_y_pred, X, len(estimators))
y_pred[:, ii, ...] = _y_pred
pb.update((ii + 1) * n_iter)
return y_pred
def _gl_init_pred(y_pred, X, n_train):
"""Aux. function to GeneralizingEstimator to initialize y_pred."""
n_sample, n_iter = X.shape[0], X.shape[-1]
if y_pred.ndim == 3:
y_pred = np.zeros((n_sample, n_train, n_iter, y_pred.shape[-1]),
y_pred.dtype)
else:
y_pred = np.zeros((n_sample, n_train, n_iter), y_pred.dtype)
return y_pred
def _gl_score(estimators, scoring, X, y, pb):
"""Score GeneralizingEstimator in parallel.
Predict and score each slice of data.
Parameters
----------
estimators : list of estimators
The fitted estimators.
scoring : callable, string or None
If scoring is None (default), the predictions are internally
generated by estimator.score(). Else, we must first get the
predictions to pass them to ad-hoc scorer.
X : array, shape (n_samples, nd_features, n_slices)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_estimators, n_slices)
The score for each slice of data.
"""
# FIXME: The level parallelization may be a bit high, and might be memory
# consuming. Perhaps need to lower it down to the loop across X slices.
score_shape = [len(estimators), X.shape[-1]]
for jj in range(X.shape[-1]):
for ii, est in enumerate(estimators):
_score = scoring(est, X[..., jj], y)
# Initialize array of predictions on the first score iteration
if (ii == 0) and (jj == 0):
dtype = type(_score)
score = np.zeros(score_shape, dtype)
score[ii, jj, ...] = _score
pb.update(jj * len(estimators) + ii + 1)
return score
def _fix_auc(scoring, y):
from sklearn.preprocessing import LabelEncoder
# This fixes sklearn's inability to compute roc_auc when y not in [0, 1]
# scikit-learn/scikit-learn#6874
if scoring is not None:
score_func = getattr(scoring, '_score_func', None)
kwargs = getattr(scoring, '_kwargs', {})
if (getattr(score_func, '__name__', '') == 'roc_auc_score' and
kwargs.get('multi_class', 'raise') == 'raise'):
if np.ndim(y) != 1 or len(set(y)) != 2:
raise ValueError('roc_auc scoring can only be computed for '
'two-class problems.')
y = LabelEncoder().fit_transform(y)
return y
|
bsd-3-clause
|
tmhm/scikit-learn
|
sklearn/cross_validation.py
|
4
|
62314
|
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : integer or cross-validation generator, optional, default=3
A cross-validation generator to use. If int, determines the number
of folds in StratifiedKFold if estimator is a classifier and the
target y is binary or multiclass, or the number of folds in KFold
otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional, default=3
A cross-validation generator to use. If int, determines the number
of folds in StratifiedKFold if estimator is a classifier and the
target y is binary or multiclass, or the number of folds in KFold
otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional, default=3
A cross-validation generator to use. If int, determines the number
of folds in StratifiedKFold if estimator is a classifier and the
target y is binary or multiclass, or the number of folds in KFold
otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
|
bsd-3-clause
|
alexsavio/scikit-learn
|
examples/linear_model/plot_huber_vs_ridge.py
|
127
|
2206
|
"""
=======================================================
HuberRegressor vs Ridge on dataset with strong outliers
=======================================================
Fit Ridge and HuberRegressor on a dataset with outliers.
The example shows that the predictions in ridge are strongly influenced
by the outliers present in the dataset. The Huber regressor is less
influenced by the outliers since the model uses the linear loss for these.
As the parameter epsilon is increased for the Huber regressor, the decision
function approaches that of the ridge.
"""
# Authors: Manoj Kumar [email protected]
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_regression
from sklearn.linear_model import HuberRegressor, Ridge
# Generate toy data.
rng = np.random.RandomState(0)
X, y = make_regression(n_samples=20, n_features=1, random_state=0, noise=4.0,
bias=100.0)
# Add four strong outliers to the dataset.
X_outliers = rng.normal(0, 0.5, size=(4, 1))
y_outliers = rng.normal(0, 2.0, size=4)
X_outliers[:2, :] += X.max() + X.mean() / 4.
X_outliers[2:, :] += X.min() - X.mean() / 4.
y_outliers[:2] += y.min() - y.mean() / 4.
y_outliers[2:] += y.max() + y.mean() / 4.
X = np.vstack((X, X_outliers))
y = np.concatenate((y, y_outliers))
plt.plot(X, y, 'b.')
# Fit the huber regressor over a series of epsilon values.
colors = ['r-', 'b-', 'y-', 'm-']
x = np.linspace(X.min(), X.max(), 7)
epsilon_values = [1.35, 1.5, 1.75, 1.9]
for k, epsilon in enumerate(epsilon_values):
huber = HuberRegressor(fit_intercept=True, alpha=0.0, max_iter=100,
epsilon=epsilon)
huber.fit(X, y)
coef_ = huber.coef_ * x + huber.intercept_
plt.plot(x, coef_, colors[k], label="huber loss, %s" % epsilon)
# Fit a ridge regressor to compare it to huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.0, random_state=0, normalize=True)
ridge.fit(X, y)
coef_ridge = ridge.coef_
coef_ = ridge.coef_ * x + ridge.intercept_
plt.plot(x, coef_, 'g-', label="ridge regression")
plt.title("Comparison of HuberRegressor vs Ridge")
plt.xlabel("X")
plt.ylabel("y")
plt.legend(loc=0)
plt.show()
|
bsd-3-clause
|
vitaly-krugl/nupic
|
examples/opf/tools/MirrorImageViz/mirrorImageViz.py
|
50
|
7221
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Author: Surabhi Gupta
import sys
import numpy as np
import matplotlib.pylab as pyl
def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset):
'''Mirror Image Visualization: Shows the encoding space juxtaposed against the
coincidence space. The encoding space is the bottom-up sensory encoding and
the coincidence space depicts the corresponding activation of coincidences in
the SP. Hence, the mirror image visualization is a visual depiction of the
mapping of SP cells to the input representations.
Note:
* The files spBUOut and sensorBUOut are assumed to be in the output format
used for LPF experiment outputs.
* BU outputs for some sample datasets are provided. Specify the name of the
dataset as an option while running this script.
'''
lines = activeCoincsFile.readlines()
inputs = encodingsFile.readlines()
w = len(inputs[0].split(' '))-1
patterns = set([])
encodings = set([])
coincs = [] #The set of all coincidences that have won at least once
reUsedCoincs = []
firstLine = inputs[0].split(' ')
size = int(firstLine.pop(0))
spOutput = np.zeros((len(lines),40))
inputBits = np.zeros((len(lines),w))
print 'Total n:', size
print 'Total number of records in the file:', len(lines), '\n'
print 'w:', w
count = 0
for x in xrange(len(lines)):
inputSpace = [] #Encoded representation for each input
spBUout = [int(z) for z in lines[x].split(' ')]
spBUout.pop(0) #The first element of each row of spBUOut is the size of the SP
temp = set(spBUout)
spOutput[x]=spBUout
input = [int(z) for z in inputs[x].split(' ')]
input.pop(0) #The first element of each row of sensorBUout is the size of the encoding space
tempInput = set(input)
inputBits[x]=input
#Creating the encoding space
for m in xrange(size):
if m in tempInput:
inputSpace.append(m)
else:
inputSpace.append('|') #A non-active bit
repeatedBits = tempInput.intersection(encodings) #Storing the bits that have been previously active
reUsed = temp.intersection(patterns) #Checking if any of the active cells have been previously active
#Dividing the coincidences into two difference categories.
if len(reUsed)==0:
coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) #Pattern no, active cells, repeated bits, encoding (full), encoding (summary)
else:
reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput))
patterns=patterns.union(temp) #Adding the active cells to the set of coincs that have been active at least once
encodings = encodings.union(tempInput)
count +=1
overlap = {}
overlapVal = 0
seen = []
seen = (printOverlaps(coincs, coincs, seen))
print len(seen), 'sets of 40 cells'
seen = printOverlaps(reUsedCoincs, coincs, seen)
Summ=[]
for z in coincs:
c=0
for y in reUsedCoincs:
c += len(z[1].intersection(y[1]))
Summ.append(c)
print 'Sum: ', Summ
for m in xrange(3):
displayLimit = min(51, len(spOutput[m*200:]))
if displayLimit>0:
drawFile(dataset, np.zeros([len(inputBits[:(m+1)*displayLimit]),len(inputBits[:(m+1)*displayLimit])]), inputBits[:(m+1)*displayLimit], spOutput[:(m+1)*displayLimit], w, m+1)
else:
print 'No more records to display'
pyl.show()
def drawFile(dataset, matrix, patterns, cells, w, fnum):
'''The similarity of two patterns in the bit-encoding space is displayed alongside
their similarity in the sp-coinc space.'''
score=0
count = 0
assert len(patterns)==len(cells)
for p in xrange(len(patterns)-1):
matrix[p+1:,p] = [len(set(patterns[p]).intersection(set(q)))*100/w for q in patterns[p+1:]]
matrix[p,p+1:] = [len(set(cells[p]).intersection(set(r)))*5/2 for r in cells[p+1:]]
score += sum(abs(np.array(matrix[p+1:,p])-np.array(matrix[p,p+1:])))
count += len(matrix[p+1:,p])
print 'Score', score/count
fig = pyl.figure(figsize = (10,10), num = fnum)
pyl.matshow(matrix, fignum = fnum)
pyl.colorbar()
pyl.title('Coincidence Space', verticalalignment='top', fontsize=12)
pyl.xlabel('The Mirror Image Visualization for '+dataset, fontsize=17)
pyl.ylabel('Encoding space', fontsize=12)
def printOverlaps(comparedTo, coincs, seen):
""" Compare the results and return True if success, False if failure
Parameters:
--------------------------------------------------------------------
coincs: Which cells are we comparing?
comparedTo: The set of 40 cells we being compared to (they have no overlap with seen)
seen: Which of the cells we are comparing to have already been encountered.
This helps glue together the unique and reused coincs
"""
inputOverlap = 0
cellOverlap = 0
for y in comparedTo:
closestInputs = []
closestCells = []
if len(seen)>0:
inputOverlap = max([len(seen[m][1].intersection(y[4])) for m in xrange(len(seen))])
cellOverlap = max([len(seen[m][0].intersection(y[1])) for m in xrange(len(seen))])
for m in xrange( len(seen) ):
if len(seen[m][1].intersection(y[4]))==inputOverlap:
closestInputs.append(seen[m][2])
if len(seen[m][0].intersection(y[1]))==cellOverlap:
closestCells.append(seen[m][2])
seen.append((y[1], y[4], y[0]))
print 'Pattern',y[0]+1,':',' '.join(str(len(z[1].intersection(y[1]))).rjust(2) for z in coincs),'input overlap:', inputOverlap, ';', len(closestInputs), 'closest encodings:',','.join(str(m+1) for m in closestInputs).ljust(15), \
'cell overlap:', cellOverlap, ';', len(closestCells), 'closest set(s):',','.join(str(m+1) for m in closestCells)
return seen
if __name__=='__main__':
if len(sys.argv)<2: #Use basil if no dataset specified
print ('Input files required. Read documentation for details.')
else:
dataset = sys.argv[1]
activeCoincsPath = dataset+'/'+dataset+'_spBUOut.txt'
encodingsPath = dataset+'/'+dataset+'_sensorBUOut.txt'
activeCoincsFile=open(activeCoincsPath, 'r')
encodingsFile=open(encodingsPath, 'r')
analyzeOverlaps(activeCoincsFile, encodingsFile, dataset)
|
agpl-3.0
|
abhishekkrthakur/scikit-learn
|
sklearn/cluster/spectral.py
|
1
|
17999
|
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is k
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
|
bsd-3-clause
|
DonBeo/scikit-learn
|
examples/decomposition/plot_kernel_pca.py
|
353
|
2011
|
"""
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
|
bsd-3-clause
|
rahulsrma26/code-gems
|
RL/nArmedBandit/eGreedySoftmaxCpp.py
|
1
|
1513
|
import time
from matplotlib import pyplot as plt
from cppLib.nArmedBandit import e_greedy, softmax
def experiment(n_runs, n_machines, n_steps, method='softmax', param=.1):
if method == 'softmax':
return softmax.run(n_runs, n_machines, n_steps, param)
return e_greedy.run(n_runs, n_machines, n_steps, param)
def main():
n_runs = 10000
n_machines = 10
n_steps = 1000
parameters = [('e_greedy', 0), ('e_greedy', .01), ('softmax', .2), ('e_greedy', .1)]
avg_rewards, optimal_actions = [], []
legends = []
start_time = time.time()
for i, (m, p) in enumerate(parameters):
print('\r {0:03.2f}%'.format(100*i/len(parameters)), end='')
r, a = experiment(n_runs, n_machines, n_steps, m, p)
avg_rewards.append(r)
optimal_actions.append(a)
legends.append('{0} param= {1}'.format(m, p))
print("\r{0} seconds.".format(time.time() - start_time))
for avg_reward in avg_rewards:
plt.plot(avg_reward)
plt.legend(legends, loc='upper left')
plt.ylabel('Average Reward')
plt.xlabel('Steps')
plt.title('Machines = {0}, Runs = {1}'.format(n_machines, n_runs))
plt.show()
for optimal_action in optimal_actions:
plt.plot(optimal_action)
plt.gca().set_ylim([0,100])
plt.legend(legends, loc='upper left')
plt.ylabel('% Optimal Action')
plt.xlabel('Steps')
plt.title('Machines = {0}, Runs = {1}'.format(n_machines, n_runs))
plt.show()
if __name__ == '__main__':
main()
|
mit
|
cwandtj/A2P2
|
180419_qCat_SurfAtomExt.py
|
1
|
23421
|
import pandas as pd
import numpy as np
import os
from sys import argv
import copy
import json
plot_angle = 0 # plot histogram of maxmimum angle between NN atom vectors.
class POSCAR:
theta = 90 # surface threshold in degree
ninfile = ''
lines = ''
line_list = []
compact_line = []
coordi_start = 0
CartPOSCAR = []
real_line = []
surface_index = []
header = []
lat_vec = []
atom_species = []
atom_numbers = []
total_atom = 0
nb_atoms_list = []
xmin = np.inf
xmax = -np.inf
ymin = np.inf
ymax = -np.inf
zmin = np.inf
zmax = -np.inf
xlen = -1
ylen = -1
zlen = -1
x1 = x2 = y1 = y2 = z1 = z2 = np.inf # 33% and 67% positions
maxveclen = 0
f_radius = 0
data = pd.DataFrame(columns=['xcoord', 'ycoord', 'zcoord', 'atom_num',
'mat_org', 'matnum_org', 'surf_flag', 'num_neighbor',
'nb_vector_x', 'nb_vector_y', 'nb_vector_z', 'nb_vector_sum'])
def input_check():
if len(argv) != 2 and len(argv) != 3:
print "\n###\tError!!\t###"
print "#"
print "#\tUSAGE1: > python3 SurfaceExtraction.py vasp_file_name.vasp"
print "#\t ex) python3 SurfaceExtraction.py SiO2.vasp"
print "#\n"
print "#\tUSAGE2: > python3 SurfaceExtraction.py vasp_file_name.vasp surface_threshold_angle_in_degree"
print "# ex) python3 SurfaceExtraction.py SiO2.vasp 60"
exit(1)
if len(argv) == 3:
POSCAR.theta = float(argv[2])
print 'surface threshold angle = ', POSCAR.theta
def readPOSCAR():
POSCAR.surf_th = 360-POSCAR.theta # surface threshold in degree (inside material)
POSCAR.lines = kCms['input']['lines']
POSCAR.line_list = POSCAR.lines.split('\n')
POSCAR.compact_line = [x for x in POSCAR.line_list if x != []]
POSCAR.coordi_start = 0
for line in POSCAR.compact_line:
if len(line.lower()) != 0:
if line.lower()[0] == 'd' or line.lower()[0] == 'c':
coordi_type = line.lower()
POSCAR.coordi_start = POSCAR.compact_line.index(line) + 1
POSCAR.header = POSCAR.compact_line[0:POSCAR.coordi_start]
for i in range(3):
POSCAR.lat_vec.append([float(x) for x in POSCAR.compact_line[2+i].split()])
if POSCAR.coordi_start == 8:
POSCAR.atom_species = POSCAR.compact_line[POSCAR.coordi_start - 3].split()
POSCAR.atom_numbers = [int(x) for x in POSCAR.compact_line[POSCAR.coordi_start - 2].split()]
else:
POSCAR.atom_species = POSCAR.compact_line[POSCAR.coordi_start - 4].split()
POSCAR.atom_numbers = [int(x) for x in POSCAR.compact_line[POSCAR.coordi_start - 3].split()]
POSCAR.total_atom = sum(POSCAR.atom_numbers)
scale_factor = float(POSCAR.compact_line[1])
matnum = 0
for i in range(POSCAR.total_atom):
raw_coords = [float(x) for x in POSCAR.compact_line[POSCAR.coordi_start + i].split()]
x_fact = raw_coords[0] * POSCAR.lat_vec[0][0] + raw_coords[1] * POSCAR.lat_vec[1][0] + raw_coords[2] * POSCAR.lat_vec[2][0]
y_fact = raw_coords[0] * POSCAR.lat_vec[0][1] + raw_coords[1] * POSCAR.lat_vec[1][1] + raw_coords[2] * POSCAR.lat_vec[2][1]
z_fact = raw_coords[0] * POSCAR.lat_vec[0][2] + raw_coords[1] * POSCAR.lat_vec[1][2] + raw_coords[2] * POSCAR.lat_vec[2][2]
if coordi_type[0] == 'd':
coords = [x_fact * scale_factor, y_fact * scale_factor, z_fact * scale_factor]
else:
coords = raw_coords
coords_str = [str(coords[0]), str(coords[1]), str(coords[2])]
POSCAR.real_line.append(' '.join(coords_str))
if coords[0] < POSCAR.xmin:
POSCAR.xmin = coords[0]
if coords[0] > POSCAR.xmax:
POSCAR.xmax = coords[0]
if coords[1] < POSCAR.ymin:
POSCAR.ymin = coords[1]
if coords[1] > POSCAR.ymax:
POSCAR.ymax = coords[1]
if coords[2] < POSCAR.zmin:
POSCAR.zmin = coords[2]
if coords[2] > POSCAR.zmax:
POSCAR.zmax = coords[2]
POSCAR.data.at[i, 'xcoord'] = coords[0]
POSCAR.data.at[i, 'ycoord'] = coords[1]
POSCAR.data.at[i, 'zcoord'] = coords[2]
POSCAR.data.at[i, 'atom_num'] = int(i+1)
if i >= sum(POSCAR.atom_numbers[0:matnum+1]):
matnum += 1
POSCAR.data.at[i, 'mat_org'] = POSCAR.atom_species[matnum]
POSCAR.data.at[i, 'matnum_org'] = POSCAR.atom_numbers[matnum]
POSCAR.data.at[i, 'surf_flag'] = 0
POSCAR.xlen = POSCAR.xmax - POSCAR.xmin + 1 # +1 is to avoid atom overlapping
POSCAR.ylen = POSCAR.ymax - POSCAR.ymin + 1
POSCAR.zlen = POSCAR.zmax - POSCAR.zmin + 1
POSCAR.x1 = POSCAR.xmin + 0.33*POSCAR.xlen
POSCAR.x2 = POSCAR.xmax - 0.33*POSCAR.xlen
POSCAR.y1 = POSCAR.ymin + 0.33*POSCAR.ylen
POSCAR.y2 = POSCAR.ymax - 0.33*POSCAR.ylen
POSCAR.z1 = POSCAR.zmin + 0.33*POSCAR.zlen
POSCAR.z2 = POSCAR.zmax - 0.33*POSCAR.zlen
#print '\n#\tX range= %.2f ~ %.2f,\tx length= %.2f' %(POSCAR.xmin, POSCAR.xmax, POSCAR.xlen)
#print '#\tY range= %.2f ~ %.2f,\ty length= %.2f' %(POSCAR.ymin, POSCAR.ymax, POSCAR.ylen)
#print '#\tZ range= %.2f ~ %.2f,\tz length= %.2f' %(POSCAR.zmin, POSCAR.zmax, POSCAR.zlen)
return POSCAR.data
def peakfind(X, Y, X_init, Y_final):
peakind = []
pos = X_init
while X[pos] <= Y_final:
kernal = [pos-3, pos-2, pos-1, pos, pos+1, pos+2, pos+3]
if pos-3 < 0:
kernal[0] = pos+3
if pos-2 < 0:
kernal[1] = pos+2
if pos-1 < 0:
kernal[2] = pos+1
y1 = Y[kernal[0]]
y2 = Y[kernal[1]]
y3 = Y[kernal[2]]
y4 = Y[kernal[3]]
y5 = Y[kernal[4]]
y6 = Y[kernal[5]]
y7 = Y[kernal[6]]
y_1 = [y1, y2, y3, y4, y5, y6, y7]
if (y4 == max(y_1)) and (y4 >= 0.2 * Y[0]):
x_2 = np.arange(pos*2-2, pos*2+3)
y_2 = np.zeros(5)
for i in range(5):
y_2[i] = Y[x_2[i]]
if y_2.max() > 0:
peakind.append(pos)
pos += 1
if len(peakind) < 2:
peakind.append(0)
peakind.append(0)
return peakind
def selfEvaluation(POSCAR = POSCAR):
if os.path.isfile('voro_input_single') is True:
os.remove('voro_input_single')
if os.path.isfile('voro_input_single.vol') is True:
os.remove('voro_input_single.vol')
noutfile = 'voro_input_single'
outfile = open(noutfile, 'w')
for i in range(POSCAR.total_atom):
outfile.write(str(i+1)+'\t'+str(POSCAR.data.xcoord.loc[i])
+'\t'+str(POSCAR.data.ycoord.loc[i])
+'\t'+str(POSCAR.data.zcoord.loc[i])+'\n')
outfile.close()
a = str(np.sqrt(POSCAR.lat_vec[0][0]**2 + POSCAR.lat_vec[0][1]**2 + POSCAR.lat_vec[0][2]**2))
b = str(np.sqrt(POSCAR.lat_vec[1][0]**2 + POSCAR.lat_vec[1][1]**2 + POSCAR.lat_vec[1][2]**2))
c = str(np.sqrt(POSCAR.lat_vec[2][0]**2 + POSCAR.lat_vec[2][1]**2 + POSCAR.lat_vec[2][2]**2))
lat_vec_xmin = min(POSCAR.lat_vec[0][0], POSCAR.lat_vec[1][0], POSCAR.lat_vec[2][0])
lat_vec_ymin = min(POSCAR.lat_vec[0][1], POSCAR.lat_vec[1][1], POSCAR.lat_vec[2][1])
lat_vec_zmin = min(POSCAR.lat_vec[0][2], POSCAR.lat_vec[1][2], POSCAR.lat_vec[2][2])
lat_vec_xmax = max(POSCAR.lat_vec[0][0], POSCAR.lat_vec[1][0], POSCAR.lat_vec[2][0])
lat_vec_ymax = max(POSCAR.lat_vec[0][1], POSCAR.lat_vec[1][1], POSCAR.lat_vec[2][1])
lat_vec_zmax = max(POSCAR.lat_vec[0][2], POSCAR.lat_vec[1][2], POSCAR.lat_vec[2][2])
cmd1 = '/vlab_data_c/solvers/voro++ -c "%i %q %n" '
cmd2 = '-o %s %s %s %s %s %s voro_input_single' \
%(min(POSCAR.xmin, lat_vec_xmin), max(POSCAR.xmax, lat_vec_xmax),
min(POSCAR.ymin, lat_vec_ymin), max(POSCAR.ymax, lat_vec_ymax),
min(POSCAR.zmin, lat_vec_zmin), max(POSCAR.zmax, lat_vec_zmax))
cmd = cmd1 + cmd2
os.system(cmd)
voro_single_list = [line.strip() for line in open('voro_input_single.vol')]
data_single = pd.DataFrame(columns=['xcoord', 'ycoord', 'zcoord', 'atom_num', 'nb_atoms_list'])
voro_single_list_len = len(voro_single_list)
for i in range(voro_single_list_len):
x = voro_single_list[i]
x_split = x.split()
data_single.at[i, 'xcoord'] = float(x_split[1])
data_single.at[i, 'ycoord'] = float(x_split[2])
data_single.at[i, 'zcoord'] = float(x_split[3])
data_single.at[i, 'atom_num'] = int(x_split[0])
data_single.at[i, 'nb_atoms_list'] = []
#print data_single.loc[i]
#print x_split[4:]
data_single.nb_atoms_list[i].append([int(j) for j in x_split[4:]])
vector_list = []
for i in range(voro_single_list_len):
self_position = np.array([data_single.xcoord.loc[i], data_single.ycoord.loc[i], data_single.zcoord.loc[i]])
#print 'i=', i
for k in data_single.nb_atoms_list[i][0]:
if (([i+1, k] not in vector_list) or ([k, i+1] not in vector_list)) and k >= 0:
index = int(data_single[data_single['atom_num'] == k].index[0])
nb_vec_x = data_single.xcoord.loc[index]
nb_vec_y = data_single.ycoord.loc[index]
nb_vec_z = data_single.zcoord.loc[index]
nb_vector = np.array([nb_vec_x, nb_vec_y, nb_vec_z]) - self_position
nb_vector_len = np.linalg.norm(nb_vector)
vector_list.append([i+1, k, nb_vector_len])
if nb_vector_len > POSCAR.maxveclen:
POSCAR.maxveclen = nb_vector_len
maxvec_1 = i
maxvec_2 = k
#print 'threshold vector length =', POSCAR.maxveclen
def strFFT():
### FFT in x, y, z-direction
### x-direction
gridsize = 1e-3
xmin_sc = POSCAR.data.xcoord.min()
xmax_sc = POSCAR.data.xcoord.max()
ymin_sc = POSCAR.data.ycoord.min()
ymax_sc = POSCAR.data.ycoord.max()
zmin_sc = POSCAR.data.zcoord.min()
zmax_sc = POSCAR.data.zcoord.max()
xnum = int((xmax_sc - xmin_sc)/gridsize + 1)
if xnum != 1:
W = np.zeros(xnum)
X = np.arange(xmin_sc, xmax_sc, gridsize)
X = np.append(X, xmax_sc)
for i in range(POSCAR.total_atom*27):
W[int((POSCAR.data.xcoord.loc[i]-xmin_sc)/gridsize)] = 1
spectrum = np.fft.fft(W)
frequency = np.fft.fftfreq(spectrum.size, d=gridsize)
index = np.where(frequency >= 0.)
clipped_spectrum = gridsize * spectrum[index].real
clipped_frequency = frequency[index]
### peak finding
peakind = peakfind(clipped_frequency, clipped_spectrum, 0, 10)
if clipped_frequency[peakind[1]] == 0:
vec_x = np.sqrt(POSCAR.maxveclen/3)
else:
vec_x = 1/clipped_frequency[peakind[1]]
else:
vec_x = np.sqrt(POSCAR.maxveclen/3)
### Y-direction
ynum = int((ymax_sc - ymin_sc)/gridsize + 1)
if ynum != 1:
W = np.zeros(ynum)
Y = np.arange(ymin_sc, ymax_sc, gridsize)
Y = np.append(Y, ymax_sc)
for i in range(POSCAR.total_atom*27):
W[int((POSCAR.data.ycoord.loc[i]-ymin_sc)/gridsize)] = 1
spectrum = np.fft.fft(W)
frequency = np.fft.fftfreq(spectrum.size, d=gridsize)
index = np.where(frequency >= 0.)
clipped_spectrum = gridsize * spectrum[index].real
clipped_frequency = frequency[index]
### peak finding
peakind = peakfind(clipped_frequency, clipped_spectrum, 0, 10)
if clipped_frequency[peakind[1]] == 0:
vec_y = np.sqrt(POSCAR.maxveclen/3)
else:
vec_y = 1/clipped_frequency[peakind[1]]
else:
vec_y = np.sqrt(POSCAR.maxveclen/3)
### Z-direction
znum = int((zmax_sc - zmin_sc)/gridsize + 1)
if znum != 1:
W = np.zeros(znum)
Z = np.arange(zmin_sc, zmax_sc, gridsize)
Z = np.append(Z, zmax_sc)
for i in range(POSCAR.total_atom*27):
W[int((POSCAR.data.zcoord.loc[i]-zmin_sc)/gridsize)] = 1
spectrum = np.fft.fft(W)
frequency = np.fft.fftfreq(spectrum.size, d=gridsize)
index = np.where(frequency >= 0.)
clipped_spectrum = gridsize * spectrum[index].real
clipped_frequency = frequency[index]
### peak finding
peakind = peakfind(clipped_frequency, clipped_spectrum, 0, 10)
if clipped_frequency[peakind[1]] == 0:
vec_z = np.sqrt(POSCAR.maxveclen/3)
else:
vec_z = 1/clipped_frequency[peakind[1]]
else:
vec_z = np.sqrt(POSCAR.maxveclen/3)
POSCAR.f_radius = np.linalg.norm([vec_x, vec_y, vec_z])
if POSCAR.f_radius == 0:
POSCAR.f_radius = POSCAR.maxveclen
def makeSupercell(POSCAR = POSCAR):
# create 26 dummy cells around the original one, in X, Y, Z directions.
tmpdata = copy.deepcopy(POSCAR.data)
supercell = pd.DataFrame()
shift = [-1, 0, 1]
for i in range(3): # x-direction
for j in range(3): # y-direction
for k in range(3): # z-direction
for m in range(POSCAR.data.xcoord.size):
tmpdata.at[m, 'xcoord'] = \
POSCAR.data.loc[m, 'xcoord'] + \
(POSCAR.lat_vec[0][0] + POSCAR.lat_vec[1][0] + POSCAR.lat_vec[2][0]) * shift[i]
tmpdata.at[m, 'ycoord'] = \
POSCAR.data.loc[m, 'ycoord'] + \
(POSCAR.lat_vec[0][1] + POSCAR.lat_vec[1][1] + POSCAR.lat_vec[2][1]) * shift[j]
tmpdata.at[m, 'zcoord'] = \
POSCAR.data.loc[m, 'zcoord'] + \
(POSCAR.lat_vec[0][2] + POSCAR.lat_vec[1][2] + POSCAR.lat_vec[2][2]) * shift[k]
supercell = supercell.append(tmpdata, ignore_index=True)
POSCAR.data = copy.deepcopy(supercell)
def runVoro(data = POSCAR):
if os.path.isfile('voro_input') is True:
os.remove('voro_input')
if os.path.isfile('voro_input.vol') is True:
os.remove('voro_input.vol')
noutfile = 'voro_input'
outfile = open(noutfile, 'w')
for i in range(data.total_atom * 27):
outfile.write(str(i+1)+'\t'+str(POSCAR.data.xcoord.loc[i])
+'\t'+str(POSCAR.data.ycoord.loc[i])
+'\t'+str(POSCAR.data.zcoord.loc[i])+'\n')
outfile.close()
a = np.sqrt(data.lat_vec[0][0]**2 + data.lat_vec[0][1]**2 + data.lat_vec[0][2]**2)
b = np.sqrt(data.lat_vec[1][0]**2 + data.lat_vec[1][1]**2 + data.lat_vec[1][2]**2)
c = np.sqrt(data.lat_vec[2][0]**2 + data.lat_vec[2][1]**2 + data.lat_vec[2][2]**2)
cmd1 = '/vlab_data_c/solvers/voro++ -c "%i %q %v %n %m" '
cmd2 = '-o -p %s %s %s %s %s %s voro_input' %(str(-1*a), str(2*a), str(-1*b), str(2*b), str(-1*c), str(2*c))
cmd = cmd1 + cmd2
os.system(cmd)
def chk_in_plane(i, self_position, vector_list):
#screening vector by magnitude
vector_in_frad = []
for k in vector_list:
nb_vec_x = POSCAR.data.xcoord.loc[k-1]
nb_vec_y = POSCAR.data.ycoord.loc[k-1]
nb_vec_z = POSCAR.data.zcoord.loc[k-1]
nb_vector = np.array([nb_vec_x, nb_vec_y, nb_vec_z]) - self_position
nb_vector_mag = np.linalg.norm(nb_vector)
if nb_vector_mag <= POSCAR.f_radius:
vector_in_frad.append(nb_vector)
vector_frad_len = len(vector_in_frad)
nb_vec = np.zeros(3*vector_frad_len).reshape(3, vector_frad_len)
count = 0
for k in range(vector_frad_len):
nb_vec[0][count] = vector_in_frad[k][0]
nb_vec[1][count] = vector_in_frad[k][1]
nb_vec[2][count] = vector_in_frad[k][2]
count += 1
if count == 0:
pass
else:
mean_x = np.mean(nb_vec[0, :])
mean_y = np.mean(nb_vec[1, :])
mean_z = np.mean(nb_vec[2, :])
mean_vector = np.array([[mean_x], [mean_y], [mean_z]])
if len(nb_vec[0]) > 1:
cov_mat = np.cov([nb_vec[0, :], nb_vec[1, :], nb_vec[2, :]])
eig_val, eig_vec = np.linalg.eig(cov_mat)
eig_pairs = [(np.abs(eig_val[ii]), eig_vec[:, ii]) for ii in range(len(eig_val))]
eig_pairs.sort(key=lambda x: x[0], reverse=True)
matrix_w = np.hstack((eig_pairs[0][1].reshape(3, 1), eig_pairs[1][1].reshape(3,1), eig_pairs[2][1].reshape(3, 1)))
transformed = matrix_w.T.dot(nb_vec)
transformed_eigvec = matrix_w.T.dot(eig_vec)
polar_min = np.inf
polar_max = -np.inf
for ii in range(vector_frad_len):
r = np.linalg.norm(transformed[:, ii])
polar = (np.arccos(transformed[2][ii]/r) - np.pi/2) * 180/np.pi
if polar > polar_max:
polar_max = polar
if polar < polar_min:
polar_min = polar
polar_delta = polar_max - polar_min
POSCAR.data.at[i, 'polar_delta'] = polar_delta
if polar_delta < POSCAR.theta * 0.5:
POSCAR.data.at[i, 'surf_flag'] = 3
def SurfaceExtraction(data = POSCAR):
voro_list = [line.strip() for line in open('voro_input.vol')]
voro_list_len = len(voro_list)
strFFT()
for i in range(voro_list_len):
x = voro_list[i]
data.nb_atoms_list.append([])
data.nb_atoms_list[i].append([int(j) for j in x.split()[5:-1]])
vector_maxinner = []
for i in range(POSCAR.total_atom*13, POSCAR.total_atom*14):
#print '############ atom ',i+1
vector_array = []
self_position = np.array([data.data.xcoord.loc[i], data.data.ycoord.loc[i], data.data.zcoord.loc[i]])
#self_position /= np.linalg.norm(self_position)
nn_list = [] # nearest neighbor list
#1st nearest neighbor
#print data.nb_atoms_list[i][0]
for k in data.nb_atoms_list[i][0]:
#1st nearest neighbor
nb_vec_x = data.data.xcoord.loc[k-1]
nb_vec_y = data.data.ycoord.loc[k-1]
nb_vec_z = data.data.zcoord.loc[k-1]
nb_vector = np.array([nb_vec_x, nb_vec_y, nb_vec_z]) - self_position
nb_vector_len = np.linalg.norm(nb_vector)
if nb_vector_len <= data.maxveclen:
nn_list.append(k)
nb_vector /= np.linalg.norm(nb_vector)
vector_array.append(nb_vector.tolist())
if nb_vector_len > data.maxveclen * 1.2: #safety factor +20%
POSCAR.data.at[i, 'surf_flag'] = 2
#2nd nearest neighbor
for k in data.nb_atoms_list[i][0]:
for m in data.nb_atoms_list[k-1][0]:
if (m not in nn_list) and (i != m-1):
nb_vec_x = data.data.xcoord.loc[m-1]
nb_vec_y = data.data.ycoord.loc[m-1]
nb_vec_z = data.data.zcoord.loc[m-1]
nb_vector = np.array([nb_vec_x, nb_vec_y, nb_vec_z]) - self_position
nb_vector_len = np.linalg.norm(nb_vector)
if nb_vector_len <= data.maxveclen:
nn_list.append(m)
nb_vector /= np.linalg.norm(nb_vector)
vector_array.append(nb_vector.tolist())
### PCA for in-plane check
chk_in_plane(i, self_position, nn_list)
vector_sum = np.sum(np.array(vector_array), axis=0)
vector_sum_mag = np.linalg.norm(vector_sum)
data.data.at[i, 'num_neighbor'] = len(data.nb_atoms_list[i][0])
data.data.at[i, 'nb_vector_x'] = vector_sum[0]
data.data.at[i, 'nb_vector_y'] = vector_sum[1]
data.data.at[i, 'nb_vector_z'] = vector_sum[2]
data.data.at[i, 'nb_vector_sum'] = vector_sum_mag
for ii in np.arange(0, len(vector_array)):
vector_inner = []
maxinner = -np.inf
mininner = np.inf
for jj in np.arange(0, len(vector_array)):
nb_inner = np.inner(vector_array[ii], vector_array[jj])
if nb_inner >1: nb_inner = 1
if nb_inner <-1: nb_inner = -1
if nb_inner > maxinner:
maxinner = nb_inner
if nb_inner < mininner:
mininner = nb_inner
if nb_inner >= np.cos(POSCAR.surf_th/2 * np.pi/180):
vector_inner.append(1)
else:
vector_inner.append(0)
#vector_maxinner.append(np.arccos(nb_inner)*180/np.pi)
vector_maxinner.append(np.arccos(mininner)*180/np.pi)
if 0 not in vector_inner:
POSCAR.data.at[i, 'surf_flag'] = 1
def writeCSV(input_filename, POSCAR = POSCAR):
input_filename = argv[1]
noutfile = 'Surf_' + input_filename
data_out = POSCAR.data[POSCAR.total_atom*13:POSCAR.total_atom*14]
POSCAR.data.to_csv(noutfile[:-5] + '_supercell.csv', index=False)
data_out.to_csv(noutfile[:-5] + '.csv', index=False)
def writeList(POSCAR = POSCAR):
if os.path.isfile('surfatoms.txt') is True:
os.remove('surfatoms.txt')
noutfile = 'surfatoms.txt'
outfile = open(noutfile, 'w')
count = 0
for i in range(POSCAR.total_atom*13, POSCAR.total_atom*14):
if POSCAR.data.surf_flag.loc[i] > 0:
if count != 0:
outfile.write(',')
outfile.write(str(POSCAR.data.atom_num.loc[i]))
count += 1
outfile.close()
print 'number of surface atoms = ', count
def outSurface():
POSCAR.CartPOSCAR = POSCAR.header + POSCAR.real_line
POSCAR.CartPOSCAR[POSCAR.coordi_start-1] = 'cartesian'
for i in range(POSCAR.total_atom*13, POSCAR.total_atom*14):
if POSCAR.data.surf_flag.loc[i] > 0:
POSCAR.surface_index.append(POSCAR.data.atom_num.loc[i]-1)
output = {}
output['lines'] = '\n'.join(POSCAR.CartPOSCAR)
output['Surface_index'] = POSCAR.surface_index
print json.dumps(output)
def main():
#input_check()
readPOSCAR()
selfEvaluation()
makeSupercell()
runVoro()
SurfaceExtraction()
#writeCSV()
#writeList()
outSurface()
if __name__ == '__main__':
main()
|
mit
|
zycdragonball/tensorflow
|
tensorflow/contrib/timeseries/examples/known_anomaly.py
|
53
|
6786
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of using an exogenous feature to ignore a known anomaly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/changepoints.csv")
def train_and_evaluate_exogenous(csv_file_name=_DATA_FILE, train_steps=300):
"""Training, evaluating, and predicting on a series with changepoints."""
# Indicate the format of our exogenous feature, in this case a string
# representing a boolean value.
string_feature = tf.contrib.layers.sparse_column_with_keys(
column_name="is_changepoint", keys=["no", "yes"])
# Specify the way this feature is presented to the model, here using a one-hot
# encoding.
one_hot_feature = tf.contrib.layers.one_hot_column(
sparse_id_column=string_feature)
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=12,
# Extract a smooth period by constraining the number of latent values
# being cycled between.
cycle_num_latent_values=3,
num_features=1,
exogenous_feature_columns=[one_hot_feature],
# Make exogenous updates sparse by setting an update condition. This in
# effect allows missing exogenous features: if the condition evaluates to
# False, no update is performed. Otherwise we sometimes end up with
# "leaky" updates which add unnecessary uncertainty to the model even when
# there is no changepoint.
exogenous_update_condition=
lambda times, features: tf.equal(features["is_changepoint"], "yes"))
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
# Indicate the format of our CSV file. First we have two standard columns,
# one for times and one for values. The third column is a custom exogenous
# feature indicating whether each timestep is a changepoint. The
# changepoint feature name must match the string_feature column name
# above.
column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES,
tf.contrib.timeseries.TrainEvalFeatures.VALUES,
"is_changepoint"),
# Indicate dtypes for our features.
column_dtypes=(tf.int64, tf.float32, tf.string),
# This CSV has a header line; here we just ignore it.
skip_header_lines=1)
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Use truncated backpropagation with a window size of 64, batching
# together 4 of these windows (random offsets) per training step. Training
# with exogenous features often requires somewhat larger windows.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=train_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Create an input_fn for prediction, with a simulated changepoint. Since all
# of the anomalies in the training data are explained by the exogenous
# feature, we should get relatively confident predictions before the indicated
# changepoint (since we are telling the model that no changepoint exists at
# those times) and relatively uncertain predictions after.
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features={
"is_changepoint": [["no"] * 49 + ["yes"] + ["no"] * 50]})))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
# Indicate the locations of the changepoints for plotting vertical lines.
anomaly_locations = []
with open(csv_file_name, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row["is_changepoint"] == "yes":
anomaly_locations.append(int(row["time"]))
anomaly_locations.append(predictions["times"][49])
return (times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations)
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit, anomaly_locations):
"""Plot the time series and anomalies in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.axvline(anomaly_locations[0], linestyle="dotted", label="changepoints")
for anomaly_location in anomaly_locations[1:]:
pyplot.axvline(anomaly_location, linestyle="dotted")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Ignoring a known anomaly", *train_and_evaluate_exogenous())
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
|
apache-2.0
|
CallaJun/hackprince
|
indico/mpl_toolkits/mplot3d/axes3d.py
|
7
|
92718
|
#!/usr/bin/python
# axes3d.py, original mplot3d version by John Porter
# Created: 23 Sep 2005
# Parts fixed by Reinier Heeres <[email protected]>
# Minor additions by Ben Axelrod <[email protected]>
# Significant updates and revisions by Ben Root <[email protected]>
"""
Module containing Axes3D, an object which can plot 3D objects on a
2D matplotlib figure.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import six
from six.moves import map, xrange, zip, reduce
import warnings
from operator import itemgetter
import matplotlib.axes as maxes
from matplotlib.axes import Axes, rcParams
from matplotlib import cbook
import matplotlib.transforms as mtransforms
from matplotlib.transforms import Bbox
import matplotlib.collections as mcoll
from matplotlib import docstring
import matplotlib.scale as mscale
from matplotlib.tri.triangulation import Triangulation
import numpy as np
from matplotlib.colors import Normalize, colorConverter, LightSource
from . import art3d
from . import proj3d
from . import axis3d
def unit_bbox():
box = Bbox(np.array([[0, 0], [1, 1]]))
return box
class Axes3D(Axes):
"""
3D axes object.
"""
name = '3d'
_shared_z_axes = cbook.Grouper()
def __init__(self, fig, rect=None, *args, **kwargs):
'''
Build an :class:`Axes3D` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*azim* Azimuthal viewing angle (default -60)
*elev* Elevation viewing angle (default 30)
*zscale* [%(scale)s]
*sharez* Other axes to share z-limits with
================ =========================================
.. versionadded :: 1.2.1
*sharez*
''' % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
if rect is None:
rect = [0.0, 0.0, 1.0, 1.0]
self._cids = []
self.initial_azim = kwargs.pop('azim', -60)
self.initial_elev = kwargs.pop('elev', 30)
zscale = kwargs.pop('zscale', None)
sharez = kwargs.pop('sharez', None)
self.xy_viewLim = unit_bbox()
self.zz_viewLim = unit_bbox()
self.xy_dataLim = unit_bbox()
self.zz_dataLim = unit_bbox()
# inihibit autoscale_view until the axes are defined
# they can't be defined until Axes.__init__ has been called
self.view_init(self.initial_elev, self.initial_azim)
self._ready = 0
self._sharez = sharez
if sharez is not None:
self._shared_z_axes.join(self, sharez)
self._adjustable = 'datalim'
Axes.__init__(self, fig, rect,
frameon=True,
*args, **kwargs)
# Disable drawing of axes by base class
Axes.set_axis_off(self)
# Enable drawing of axes by Axes3D class
self.set_axis_on()
self.M = None
# func used to format z -- fall back on major formatters
self.fmt_zdata = None
if zscale is not None :
self.set_zscale(zscale)
if self.zaxis is not None :
self._zcid = self.zaxis.callbacks.connect('units finalize',
self.relim)
else :
self._zcid = None
self._ready = 1
self.mouse_init()
self.set_top_view()
self.axesPatch.set_linewidth(0)
# Calculate the pseudo-data width and height
pseudo_bbox = self.transLimits.inverted().transform([(0, 0), (1, 1)])
self._pseudo_w, self._pseudo_h = pseudo_bbox[1] - pseudo_bbox[0]
self.figure.add_axes(self)
def set_axis_off(self):
self._axis3don = False
def set_axis_on(self):
self._axis3don = True
def have_units(self):
"""
Return *True* if units are set on the *x*, *y*, or *z* axes
"""
return (self.xaxis.have_units() or self.yaxis.have_units() or
self.zaxis.have_units())
def convert_zunits(self, z):
"""
For artists in an axes, if the zaxis has units support,
convert *z* using zaxis unit type
.. versionadded :: 1.2.1
"""
return self.zaxis.convert_units(z)
def _process_unit_info(self, xdata=None, ydata=None, zdata=None,
kwargs=None):
"""
Look for unit *kwargs* and update the axis instances as necessary
"""
Axes._process_unit_info(self, xdata=xdata, ydata=ydata, kwargs=kwargs)
if self.xaxis is None or self.yaxis is None or self.zaxis is None:
return
if zdata is not None:
# we only need to update if there is nothing set yet.
if not self.zaxis.have_units():
self.zaxis.update_units(xdata)
# process kwargs 2nd since these will override default units
if kwargs is not None:
zunits = kwargs.pop('zunits', self.zaxis.units)
if zunits != self.zaxis.units:
self.zaxis.set_units(zunits)
# If the units being set imply a different converter,
# we need to update.
if zdata is not None:
self.zaxis.update_units(zdata)
def set_top_view(self):
# this happens to be the right view for the viewing coordinates
# moved up and to the left slightly to fit labels and axes
xdwl = (0.95/self.dist)
xdw = (0.9/self.dist)
ydwl = (0.95/self.dist)
ydw = (0.9/self.dist)
# This is purposely using the 2D Axes's set_xlim and set_ylim,
# because we are trying to place our viewing pane.
Axes.set_xlim(self, -xdwl, xdw, auto=None)
Axes.set_ylim(self, -ydwl, ydw, auto=None)
def _init_axis(self):
'''Init 3D axes; overrides creation of regular X/Y axes'''
self.w_xaxis = axis3d.XAxis('x', self.xy_viewLim.intervalx,
self.xy_dataLim.intervalx, self)
self.xaxis = self.w_xaxis
self.w_yaxis = axis3d.YAxis('y', self.xy_viewLim.intervaly,
self.xy_dataLim.intervaly, self)
self.yaxis = self.w_yaxis
self.w_zaxis = axis3d.ZAxis('z', self.zz_viewLim.intervalx,
self.zz_dataLim.intervalx, self)
self.zaxis = self.w_zaxis
for ax in self.xaxis, self.yaxis, self.zaxis:
ax.init3d()
def get_children(self):
return [self.zaxis,] + Axes.get_children(self)
def unit_cube(self, vals=None):
minx, maxx, miny, maxy, minz, maxz = vals or self.get_w_lims()
xs, ys, zs = ([minx, maxx, maxx, minx, minx, maxx, maxx, minx],
[miny, miny, maxy, maxy, miny, miny, maxy, maxy],
[minz, minz, minz, minz, maxz, maxz, maxz, maxz])
return list(zip(xs, ys, zs))
def tunit_cube(self, vals=None, M=None):
if M is None:
M = self.M
xyzs = self.unit_cube(vals)
tcube = proj3d.proj_points(xyzs, M)
return tcube
def tunit_edges(self, vals=None, M=None):
tc = self.tunit_cube(vals, M)
edges = [(tc[0], tc[1]),
(tc[1], tc[2]),
(tc[2], tc[3]),
(tc[3], tc[0]),
(tc[0], tc[4]),
(tc[1], tc[5]),
(tc[2], tc[6]),
(tc[3], tc[7]),
(tc[4], tc[5]),
(tc[5], tc[6]),
(tc[6], tc[7]),
(tc[7], tc[4])]
return edges
def draw(self, renderer):
# draw the background patch
self.axesPatch.draw(renderer)
self._frameon = False
# add the projection matrix to the renderer
self.M = self.get_proj()
renderer.M = self.M
renderer.vvec = self.vvec
renderer.eye = self.eye
renderer.get_axis_position = self.get_axis_position
# Calculate projection of collections and zorder them
zlist = [(col.do_3d_projection(renderer), col) \
for col in self.collections]
zlist.sort(key=itemgetter(0), reverse=True)
for i, (z, col) in enumerate(zlist):
col.zorder = i
# Calculate projection of patches and zorder them
zlist = [(patch.do_3d_projection(renderer), patch) \
for patch in self.patches]
zlist.sort(key=itemgetter(0), reverse=True)
for i, (z, patch) in enumerate(zlist):
patch.zorder = i
if self._axis3don:
axes = (self.xaxis, self.yaxis, self.zaxis)
# Draw panes first
for ax in axes:
ax.draw_pane(renderer)
# Then axes
for ax in axes:
ax.draw(renderer)
# Then rest
Axes.draw(self, renderer)
def get_axis_position(self):
vals = self.get_w_lims()
tc = self.tunit_cube(vals, self.M)
xhigh = tc[1][2] > tc[2][2]
yhigh = tc[3][2] > tc[2][2]
zhigh = tc[0][2] > tc[2][2]
return xhigh, yhigh, zhigh
def update_datalim(self, xys, **kwargs):
pass
def get_autoscale_on(self) :
"""
Get whether autoscaling is applied for all axes on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
return Axes.get_autoscale_on(self) and self.get_autoscalez_on()
def get_autoscalez_on(self) :
"""
Get whether autoscaling for the z-axis is applied on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
return self._autoscaleZon
def set_autoscale_on(self, b) :
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
Axes.set_autoscale_on(self, b)
self.set_autoscalez_on(b)
def set_autoscalez_on(self, b) :
"""
Set whether autoscaling for the z-axis is applied on plot commands
accepts: [ *True* | *False* ]
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self._autoscalez_on = b
def set_zmargin(self, m) :
"""
Set padding of Z data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if m < 0 or m > 1 :
raise ValueError("margin must be in range 0 to 1")
self._zmargin = m
def margins(self, *args, **kw) :
"""
Convenience method to set or retrieve autoscaling margins.
signatures::
margins()
returns xmargin, ymargin, zmargin
::
margins(margin)
margins(xmargin, ymargin, zmargin)
margins(x=xmargin, y=ymargin, z=zmargin)
margins(..., tight=False)
All forms above set the xmargin, ymargin and zmargin
parameters. All keyword parameters are optional. A single argument
specifies xmargin, ymargin and zmargin. The *tight* parameter
is passed to :meth:`autoscale_view`, which is executed after
a margin is changed; the default here is *True*, on the
assumption that when margins are specified, no additional
padding to match tick marks is usually desired. Setting
*tight* to *None* will preserve the previous setting.
Specifying any margin changes only the autoscaling; for example,
if *xmargin* is not None, then *xmargin* times the X data
interval will be added to each end of that interval before
it is used in autoscaling.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if not args and not kw:
return self._xmargin, self._ymargin, self._zmargin
tight = kw.pop('tight', True)
mx = kw.pop('x', None)
my = kw.pop('y', None)
mz = kw.pop('z', None)
if len(args) == 1:
mx = my = mz = args[0]
elif len(args) == 2:
# Maybe put out a warning because mz is not set?
mx, my = args
elif len(args) == 3:
mx, my, mz = args
else:
raise ValueError("more than three arguments were supplied")
if mx is not None:
self.set_xmargin(mx)
if my is not None:
self.set_ymargin(my)
if mz is not None:
self.set_zmargin(mz)
scalex = (mx is not None)
scaley = (my is not None)
scalez = (mz is not None)
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley,
scalez=scalez)
def autoscale(self, enable=True, axis='both', tight=None) :
"""
Convenience method for simple axis view autoscaling.
See :meth:`matplotlib.axes.Axes.autoscale` for full explanation.
Note that this function behaves the same, but for all
three axes. Therfore, 'z' can be passed for *axis*,
and 'both' applies to all three axes.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if enable is None:
scalex = True
scaley = True
scalez = True
else:
scalex = False
scaley = False
scalez = False
if axis in ['x', 'both']:
self._autoscaleXon = bool(enable)
scalex = self._autoscaleXon
if axis in ['y', 'both']:
self._autoscaleYon = bool(enable)
scaley = self._autoscaleYon
if axis in ['z', 'both']:
self._autoscaleZon = bool(enable)
scalez = self._autoscaleZon
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley,
scalez=scalez)
def auto_scale_xyz(self, X, Y, Z=None, had_data=None):
x, y, z = list(map(np.asarray, (X, Y, Z)))
try:
x, y = x.flatten(), y.flatten()
if Z is not None:
z = z.flatten()
except AttributeError:
raise
# This updates the bounding boxes as to keep a record as
# to what the minimum sized rectangular volume holds the
# data.
self.xy_dataLim.update_from_data_xy(np.array([x, y]).T, not had_data)
if z is not None:
self.zz_dataLim.update_from_data_xy(np.array([z, z]).T, not had_data)
# Let autoscale_view figure out how to use this data.
self.autoscale_view()
def autoscale_view(self, tight=None, scalex=True, scaley=True,
scalez=True) :
"""
Autoscale the view limits using the data limits.
See :meth:`matplotlib.axes.Axes.autoscale_view` for documentation.
Note that this function applies to the 3D axes, and as such
adds the *scalez* to the function arguments.
.. versionchanged :: 1.1.0
Function signature was changed to better match the 2D version.
*tight* is now explicitly a kwarg and placed first.
.. versionchanged :: 1.2.1
This is now fully functional.
"""
if not self._ready:
return
# This method looks at the rectangular volume (see above)
# of data and decides how to scale the view portal to fit it.
if tight is None:
# if image data only just use the datalim
_tight = self._tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)
else:
_tight = self._tight = bool(tight)
if scalex and self._autoscaleXon:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = self.xy_dataLim.intervalx
xlocator = self.xaxis.get_major_locator()
try:
x0, x1 = xlocator.nonsingular(x0, x1)
except AttributeError:
x0, x1 = mtransforms.nonsingular(x0, x1, increasing=False,
expander=0.05)
if self._xmargin > 0:
delta = (x1 - x0) * self._xmargin
x0 -= delta
x1 += delta
if not _tight:
x0, x1 = xlocator.view_limits(x0, x1)
self.set_xbound(x0, x1)
if scaley and self._autoscaleYon:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = self.xy_dataLim.intervaly
ylocator = self.yaxis.get_major_locator()
try:
y0, y1 = ylocator.nonsingular(y0, y1)
except AttributeError:
y0, y1 = mtransforms.nonsingular(y0, y1, increasing=False,
expander=0.05)
if self._ymargin > 0:
delta = (y1 - y0) * self._ymargin
y0 -= delta
y1 += delta
if not _tight:
y0, y1 = ylocator.view_limits(y0, y1)
self.set_ybound(y0, y1)
if scalez and self._autoscaleZon:
zshared = self._shared_z_axes.get_siblings(self)
dl = [ax.dataLim for ax in zshared]
bb = mtransforms.BboxBase.union(dl)
z0, z1 = self.zz_dataLim.intervalx
zlocator = self.zaxis.get_major_locator()
try:
z0, z1 = zlocator.nonsingular(z0, z1)
except AttributeError:
z0, z1 = mtransforms.nonsingular(z0, z1, increasing=False,
expander=0.05)
if self._zmargin > 0:
delta = (z1 - z0) * self._zmargin
z0 -= delta
z1 += delta
if not _tight:
z0, z1 = zlocator.view_limits(z0, z1)
self.set_zbound(z0, z1)
def get_w_lims(self):
'''Get 3D world limits.'''
minx, maxx = self.get_xlim3d()
miny, maxy = self.get_ylim3d()
minz, maxz = self.get_zlim3d()
return minx, maxx, miny, maxy, minz, maxz
def _determine_lims(self, xmin=None, xmax=None, *args, **kwargs):
if xmax is None and cbook.iterable(xmin):
xmin, xmax = xmin
if xmin == xmax:
xmin -= 0.05
xmax += 0.05
return (xmin, xmax)
def set_xlim3d(self, left=None, right=None, emit=True, auto=False, **kw):
"""
Set 3D x limits.
See :meth:`matplotlib.axes.Axes.set_xlim` for full documentation.
"""
if 'xmin' in kw:
left = kw.pop('xmin')
if 'xmax' in kw:
right = kw.pop('xmax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if right is None and cbook.iterable(left):
left, right = left
self._process_unit_info(xdata=(left, right))
if left is not None:
left = self.convert_xunits(left)
if right is not None:
right = self.convert_xunits(right)
old_left, old_right = self.get_xlim()
if left is None:
left = old_left
if right is None:
right = old_right
if left == right:
warnings.warn(('Attempting to set identical left==right results\n'
'in singular transformations; automatically expanding.\n'
'left=%s, right=%s') % (left, right))
left, right = mtransforms.nonsingular(left, right, increasing=False)
left, right = self.xaxis.limit_range_for_scale(left, right)
self.xy_viewLim.intervalx = (left, right)
if auto is not None:
self._autoscaleXon = bool(auto)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.xy_viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return left, right
set_xlim = set_xlim3d
def set_ylim3d(self, bottom=None, top=None, emit=True, auto=False, **kw):
"""
Set 3D y limits.
See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation.
"""
if 'ymin' in kw:
bottom = kw.pop('ymin')
if 'ymax' in kw:
top = kw.pop('ymax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if top is None and cbook.iterable(bottom):
bottom, top = bottom
self._process_unit_info(ydata=(bottom, top))
if bottom is not None:
bottom = self.convert_yunits(bottom)
if top is not None:
top = self.convert_yunits(top)
old_bottom, old_top = self.get_ylim()
if bottom is None:
bottom = old_bottom
if top is None:
top = old_top
if top == bottom:
warnings.warn(('Attempting to set identical bottom==top results\n'
'in singular transformations; automatically expanding.\n'
'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.yaxis.limit_range_for_scale(bottom, top)
self.xy_viewLim.intervaly = (bottom, top)
if auto is not None:
self._autoscaleYon = bool(auto)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.xy_viewLim.intervaly,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return bottom, top
set_ylim = set_ylim3d
def set_zlim3d(self, bottom=None, top=None, emit=True, auto=False, **kw):
"""
Set 3D z limits.
See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation
"""
if 'zmin' in kw:
bottom = kw.pop('zmin')
if 'zmax' in kw:
top = kw.pop('zmax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if top is None and cbook.iterable(bottom):
bottom, top = bottom
self._process_unit_info(zdata=(bottom, top))
if bottom is not None:
bottom = self.convert_zunits(bottom)
if top is not None:
top = self.convert_zunits(top)
old_bottom, old_top = self.get_zlim()
if bottom is None:
bottom = old_bottom
if top is None:
top = old_top
if top == bottom:
warnings.warn(('Attempting to set identical bottom==top results\n'
'in singular transformations; automatically expanding.\n'
'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.zaxis.limit_range_for_scale(bottom, top)
self.zz_viewLim.intervalx = (bottom, top)
if auto is not None:
self._autoscaleZon = bool(auto)
if emit:
self.callbacks.process('zlim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_z_axes.get_siblings(self):
if other is not self:
other.set_zlim(self.zz_viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return bottom, top
set_zlim = set_zlim3d
def get_xlim3d(self):
return self.xy_viewLim.intervalx
get_xlim3d.__doc__ = maxes.Axes.get_xlim.__doc__
get_xlim = get_xlim3d
get_xlim.__doc__ += """
.. versionchanged :: 1.1.0
This function now correctly refers to the 3D x-limits
"""
def get_ylim3d(self):
return self.xy_viewLim.intervaly
get_ylim3d.__doc__ = maxes.Axes.get_ylim.__doc__
get_ylim = get_ylim3d
get_ylim.__doc__ += """
.. versionchanged :: 1.1.0
This function now correctly refers to the 3D y-limits.
"""
def get_zlim3d(self):
'''Get 3D z limits.'''
return self.zz_viewLim.intervalx
get_zlim = get_zlim3d
def get_zscale(self) :
"""
Return the zaxis scale string %s
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
""" % (", ".join(mscale.get_scale_names()))
return self.zaxis.get_scale()
# We need to slightly redefine these to pass scalez=False
# to their calls of autoscale_view.
def set_xscale(self, value, **kwargs) :
self.xaxis._set_scale(value, **kwargs)
self.autoscale_view(scaley=False, scalez=False)
self._update_transScale()
set_xscale.__doc__ = maxes.Axes.set_xscale.__doc__ + """
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
def set_yscale(self, value, **kwargs) :
self.yaxis._set_scale(value, **kwargs)
self.autoscale_view(scalex=False, scalez=False)
self._update_transScale()
set_yscale.__doc__ = maxes.Axes.set_yscale.__doc__ + """
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
@docstring.dedent_interpd
def set_zscale(self, value, **kwargs) :
"""
call signature::
set_zscale(value)
Set the scaling of the z-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
.. note ::
Currently, Axes3D objects only supports linear scales.
Other scales may or may not work, and support for these
is improving with each release.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self.zaxis._set_scale(value, **kwargs)
self.autoscale_view(scalex=False, scaley=False)
self._update_transScale()
def set_zticks(self, *args, **kwargs):
"""
Set z-axis tick locations.
See :meth:`matplotlib.axes.Axes.set_yticks` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return self.zaxis.set_ticks(*args, **kwargs)
def get_zticks(self, minor=False):
"""
Return the z ticks as a list of locations
See :meth:`matplotlib.axes.Axes.get_yticks` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return self.zaxis.get_ticklocs(minor=minor)
def get_zmajorticklabels(self) :
"""
Get the ztick labels as a list of Text instances
.. versionadded :: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_majorticklabels())
def get_zminorticklabels(self) :
"""
Get the ztick labels as a list of Text instances
.. note::
Minor ticks are not supported. This function was added
only for completeness.
.. versionadded :: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_minorticklabels())
def set_zticklabels(self, *args, **kwargs) :
"""
Set z-axis tick labels.
See :meth:`matplotlib.axes.Axes.set_yticklabels` for more details.
.. note::
Minor ticks are not supported by Axes3D objects.
.. versionadded:: 1.1.0
"""
return self.zaxis.set_ticklabels(*args, **kwargs)
def get_zticklabels(self, minor=False) :
"""
Get ztick labels as a list of Text instances.
See :meth:`matplotlib.axes.Axes.get_yticklabels` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_ticklabels(minor=minor))
def zaxis_date(self, tz=None) :
"""
Sets up z-axis ticks and labels that treat the z data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
.. note::
This function is merely provided for completeness.
Axes3D objects do not officially support dates for ticks,
and so this may or may not work as expected.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self.zaxis.axis_date(tz)
def get_zticklines(self) :
"""
Get ztick lines as a list of Line2D instances.
Note that this function is provided merely for completeness.
These lines are re-calculated as the display changes.
.. versionadded:: 1.1.0
"""
return self.zaxis.get_ticklines()
def clabel(self, *args, **kwargs):
"""
This function is currently not implemented for 3D axes.
Returns *None*.
"""
return None
def view_init(self, elev=None, azim=None):
"""
Set the elevation and azimuth of the axes.
This can be used to rotate the axes programatically.
'elev' stores the elevation angle in the z plane.
'azim' stores the azimuth angle in the x,y plane.
if elev or azim are None (default), then the initial value
is used which was specified in the :class:`Axes3D` constructor.
"""
self.dist = 10
if elev is None:
self.elev = self.initial_elev
else:
self.elev = elev
if azim is None:
self.azim = self.initial_azim
else:
self.azim = azim
def get_proj(self):
"""
Create the projection matrix from the current viewing position.
elev stores the elevation angle in the z plane
azim stores the azimuth angle in the x,y plane
dist is the distance of the eye viewing point from the object
point.
"""
relev, razim = np.pi * self.elev/180, np.pi * self.azim/180
xmin, xmax = self.get_xlim3d()
ymin, ymax = self.get_ylim3d()
zmin, zmax = self.get_zlim3d()
# transform to uniform world coordinates 0-1.0,0-1.0,0-1.0
worldM = proj3d.world_transformation(xmin, xmax,
ymin, ymax,
zmin, zmax)
# look into the middle of the new coordinates
R = np.array([0.5, 0.5, 0.5])
xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist
yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist
zp = R[2] + np.sin(relev) * self.dist
E = np.array((xp, yp, zp))
self.eye = E
self.vvec = R - E
self.vvec = self.vvec / proj3d.mod(self.vvec)
if abs(relev) > np.pi/2:
# upside down
V = np.array((0, 0, -1))
else:
V = np.array((0, 0, 1))
zfront, zback = -self.dist, self.dist
viewM = proj3d.view_transformation(E, R, V)
perspM = proj3d.persp_transformation(zfront, zback)
M0 = np.dot(viewM, worldM)
M = np.dot(perspM, M0)
return M
def mouse_init(self, rotate_btn=1, zoom_btn=3):
"""Initializes mouse button callbacks to enable 3D rotation of
the axes. Also optionally sets the mouse buttons for 3D rotation
and zooming.
============ =======================================================
Argument Description
============ =======================================================
*rotate_btn* The integer or list of integers specifying which mouse
button or buttons to use for 3D rotation of the axes.
Default = 1.
*zoom_btn* The integer or list of integers specifying which mouse
button or buttons to use to zoom the 3D axes.
Default = 3.
============ =======================================================
"""
self.button_pressed = None
canv = self.figure.canvas
if canv is not None:
c1 = canv.mpl_connect('motion_notify_event', self._on_move)
c2 = canv.mpl_connect('button_press_event', self._button_press)
c3 = canv.mpl_connect('button_release_event', self._button_release)
self._cids = [c1, c2, c3]
else:
warnings.warn('Axes3D.figure.canvas is \'None\', mouse rotation disabled. Set canvas then call Axes3D.mouse_init().')
# coerce scalars into array-like, then convert into
# a regular list to avoid comparisons against None
# which breaks in recent versions of numpy.
self._rotate_btn = np.atleast_1d(rotate_btn).tolist()
self._zoom_btn = np.atleast_1d(zoom_btn).tolist()
def can_zoom(self) :
"""
Return *True* if this axes supports the zoom box button functionality.
3D axes objects do not use the zoom box button.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
3D axes objects do not use the pan/zoom button.
"""
return False
def cla(self):
"""
Clear axes
"""
# Disabling mouse interaction might have been needed a long
# time ago, but I can't find a reason for it now - BVR (2012-03)
#self.disable_mouse_rotation()
self.zaxis.cla()
if self._sharez is not None:
self.zaxis.major = self._sharez.zaxis.major
self.zaxis.minor = self._sharez.zaxis.minor
z0, z1 = self._sharez.get_zlim()
self.set_zlim(z0, z1, emit=False, auto=None)
self.zaxis._set_scale(self._sharez.zaxis.get_scale())
else:
self.zaxis._set_scale('linear')
self._autoscaleZon = True
self._zmargin = 0
Axes.cla(self)
self.grid(rcParams['axes3d.grid'])
def disable_mouse_rotation(self):
"""Disable mouse button callbacks.
"""
# Disconnect the various events we set.
for cid in self._cids:
self.figure.canvas.mpl_disconnect(cid)
self._cids = []
def _button_press(self, event):
if event.inaxes == self:
self.button_pressed = event.button
self.sx, self.sy = event.xdata, event.ydata
def _button_release(self, event):
self.button_pressed = None
def format_zdata(self, z):
"""
Return *z* string formatted. This function will use the
:attr:`fmt_zdata` attribute if it is callable, else will fall
back on the zaxis major formatter
"""
try: return self.fmt_zdata(z)
except (AttributeError, TypeError):
func = self.zaxis.get_major_formatter().format_data_short
val = func(z)
return val
def format_coord(self, xd, yd):
"""
Given the 2D view coordinates attempt to guess a 3D coordinate.
Looks for the nearest edge to the point and then assumes that
the point is at the same z location as the nearest point on the edge.
"""
if self.M is None:
return ''
if self.button_pressed in self._rotate_btn:
return 'azimuth=%d deg, elevation=%d deg ' % (self.azim, self.elev)
# ignore xd and yd and display angles instead
p = (xd, yd)
edges = self.tunit_edges()
#lines = [proj3d.line2d(p0,p1) for (p0,p1) in edges]
ldists = [(proj3d.line2d_seg_dist(p0, p1, p), i) for \
i, (p0, p1) in enumerate(edges)]
ldists.sort()
# nearest edge
edgei = ldists[0][1]
p0, p1 = edges[edgei]
# scale the z value to match
x0, y0, z0 = p0
x1, y1, z1 = p1
d0 = np.hypot(x0-xd, y0-yd)
d1 = np.hypot(x1-xd, y1-yd)
dt = d0+d1
z = d1/dt * z0 + d0/dt * z1
x, y, z = proj3d.inv_transform(xd, yd, z, self.M)
xs = self.format_xdata(x)
ys = self.format_ydata(y)
zs = self.format_zdata(z)
return 'x=%s, y=%s, z=%s' % (xs, ys, zs)
def _on_move(self, event):
"""Mouse moving
button-1 rotates by default. Can be set explicitly in mouse_init().
button-3 zooms by default. Can be set explicitly in mouse_init().
"""
if not self.button_pressed:
return
if self.M is None:
return
x, y = event.xdata, event.ydata
# In case the mouse is out of bounds.
if x is None:
return
dx, dy = x - self.sx, y - self.sy
w = self._pseudo_w
h = self._pseudo_h
self.sx, self.sy = x, y
# Rotation
if self.button_pressed in self._rotate_btn:
# rotate viewing point
# get the x and y pixel coords
if dx == 0 and dy == 0:
return
self.elev = art3d.norm_angle(self.elev - (dy/h)*180)
self.azim = art3d.norm_angle(self.azim - (dx/w)*180)
self.get_proj()
self.figure.canvas.draw_idle()
# elif self.button_pressed == 2:
# pan view
# project xv,yv,zv -> xw,yw,zw
# pan
# pass
# Zoom
elif self.button_pressed in self._zoom_btn:
# zoom view
# hmmm..this needs some help from clipping....
minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()
df = 1-((h - dy)/h)
dx = (maxx-minx)*df
dy = (maxy-miny)*df
dz = (maxz-minz)*df
self.set_xlim3d(minx - dx, maxx + dx)
self.set_ylim3d(miny - dy, maxy + dy)
self.set_zlim3d(minz - dz, maxz + dz)
self.get_proj()
self.figure.canvas.draw_idle()
def set_zlabel(self, zlabel, fontdict=None, labelpad=None, **kwargs):
'''
Set zlabel. See doc for :meth:`set_ylabel` for description.
.. note::
Currently, *labelpad* does not have an effect on the labels.
'''
# FIXME: With a rework of axis3d.py, the labelpad should work again
# At that point, remove the above message in the docs.
if labelpad is not None : self.zaxis.labelpad = labelpad
return self.zaxis.set_label_text(zlabel, fontdict, **kwargs)
def get_zlabel(self) :
"""
Get the z-label text string.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
label = self.zaxis.get_label()
return label.get_text()
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the 3D axes panels are drawn
.. versionadded :: 1.1.0
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the 3D axes panels are drawn
ACCEPTS: [ *True* | *False* ]
.. versionadded :: 1.1.0
"""
self._frameon = bool(b)
def get_axisbelow(self):
"""
Get whether axis below is true or not.
For axes3d objects, this will always be *True*
.. versionadded :: 1.1.0
This function was added for completeness.
"""
return True
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below
most artists
For axes3d objects, this will ignore any settings and just use *True*
ACCEPTS: [ *True* | *False* ]
.. versionadded :: 1.1.0
This function was added for completeness.
"""
self._axisbelow = True
def grid(self, b=True, **kwargs):
'''
Set / unset 3D grid.
.. note::
Currently, this function does not behave the same as
:meth:`matplotlib.axes.Axes.grid`, but it is intended to
eventually support that behavior.
.. versionchanged :: 1.1.0
This function was changed, but not tested. Please report any bugs.
'''
# TODO: Operate on each axes separately
if len(kwargs) :
b = True
self._draw_grid = cbook._string_to_bool(b)
def ticklabel_format(self, **kwargs) :
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes in Axed3D objects.
See :meth:`matplotlib.axes.Axes.ticklabel_format` for full
documentation. Note that this version applies to all three
axes of the Axes3D object. Therefore, the *axis* argument
will also accept a value of 'z' and the value of 'both' will
apply to all three axes.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
useOffset = kwargs.pop('useOffset', None)
axis = kwargs.pop('axis', 'both').lower()
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError("comma style remains to be added")
elif style == '':
sb = None
else:
raise ValueError("%s is not a valid style value")
try:
if sb is not None:
if axis in ['both', 'z']:
self.xaxis.major.formatter.set_scientific(sb)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_scientific(sb)
if axis in ['both', 'z'] :
self.zaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis in ['both', 'x']:
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_powerlimits(scilimits)
if axis in ['both', 'z']:
self.zaxis.major.formatter.set_powerlimits(scilimits)
if useOffset is not None:
if axis in ['both', 'x']:
self.xaxis.major.formatter.set_useOffset(useOffset)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_useOffset(useOffset)
if axis in ['both', 'z']:
self.zaxis.major.formatter.set_useOffset(useOffset)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def locator_params(self, axis='both', tight=None, **kwargs) :
"""
Convenience method for controlling tick locators.
See :meth:`matplotlib.axes.Axes.locator_params` for full
documentation Note that this is for Axes3D objects,
therefore, setting *axis* to 'both' will result in the
parameters being set for all three axes. Also, *axis*
can also take a value of 'z' to apply parameters to the
z axis.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
_x = axis in ['x', 'both']
_y = axis in ['y', 'both']
_z = axis in ['z', 'both']
if _x:
self.xaxis.get_major_locator().set_params(**kwargs)
if _y:
self.yaxis.get_major_locator().set_params(**kwargs)
if _z:
self.zaxis.get_major_locator().set_params(**kwargs)
self.autoscale_view(tight=tight, scalex=_x, scaley=_y, scalez=_z)
def tick_params(self, axis='both', **kwargs) :
"""
Convenience method for changing the appearance of ticks and
tick labels.
See :meth:`matplotlib.axes.Axes.tick_params` for more complete
documentation.
The only difference is that setting *axis* to 'both' will
mean that the settings are applied to all three axes. Also,
the *axis* parameter also accepts a value of 'z', which
would mean to apply to only the z-axis.
Also, because of how Axes3D objects are drawn very differently
from regular 2D axes, some of these settings may have
ambiguous meaning. For simplicity, the 'z' axis will
accept settings as if it was like the 'y' axis.
.. note::
While this function is currently implemented, the core part
of the Axes3D object may ignore some of these settings.
Future releases will fix this. Priority will be given to
those who file bugs.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
Axes.tick_params(self, axis, **kwargs)
if axis in ['z', 'both'] :
zkw = dict(kwargs)
zkw.pop('top', None)
zkw.pop('bottom', None)
zkw.pop('labeltop', None)
zkw.pop('labelbottom', None)
self.zaxis.set_tick_params(**zkw)
### data limits, ticks, tick labels, and formatting
def invert_zaxis(self):
"""
Invert the z-axis.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
bottom, top = self.get_zlim()
self.set_zlim(top, bottom, auto=None)
def zaxis_inverted(self):
'''
Returns True if the z-axis is inverted.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
'''
bottom, top = self.get_zlim()
return top < bottom
def get_zbound(self):
"""
Returns the z-axis numerical bounds where::
lowerBound < upperBound
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
bottom, top = self.get_zlim()
if bottom < top:
return bottom, top
else:
return top, bottom
def set_zbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the z-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the :attr:`_autoscaleZon` attribute.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if upper is None and cbook.iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_zbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.zaxis_inverted():
if lower < upper:
self.set_zlim(upper, lower, auto=None)
else:
self.set_zlim(lower, upper, auto=None)
else :
if lower < upper:
self.set_zlim(lower, upper, auto=None)
else :
self.set_zlim(upper, lower, auto=None)
def text(self, x, y, z, s, zdir=None, **kwargs):
'''
Add text to the plot. kwargs will be passed on to Axes.text,
except for the `zdir` keyword, which sets the direction to be
used as the z direction.
'''
text = Axes.text(self, x, y, s, **kwargs)
art3d.text_2d_to_3d(text, z, zdir)
return text
text3D = text
text2D = Axes.text
def plot(self, xs, ys, *args, **kwargs):
'''
Plot 2D or 3D data.
========== ================================================
Argument Description
========== ================================================
*xs*, *ys* X, y coordinates of vertices
*zs* z value(s), either one for all points or one for
each point.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
========== ================================================
Other arguments are passed on to
:func:`~matplotlib.axes.Axes.plot`
'''
# FIXME: This argument parsing might be better handled
# when we set later versions of python for
# minimum requirements. Currently at 2.4.
# Note that some of the reason for the current difficulty
# is caused by the fact that we want to insert a new
# (semi-optional) positional argument 'Z' right before
# many other traditional positional arguments occur
# such as the color, linestyle and/or marker.
had_data = self.has_data()
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
argsi = 0
# First argument is array of zs
if len(args) > 0 and cbook.iterable(args[0]) and \
len(xs) == len(args[0]) :
# So, we know that it is an array with
# first dimension the same as xs.
# Next, check to see if the data contained
# therein (if any) is scalar (and not another array).
if len(args[0]) == 0 or cbook.is_scalar(args[0][0]) :
zs = args[argsi]
argsi += 1
# First argument is z value
elif len(args) > 0 and cbook.is_scalar(args[0]):
zs = args[argsi]
argsi += 1
# Match length
if not cbook.iterable(zs):
zs = np.ones(len(xs)) * zs
lines = Axes.plot(self, xs, ys, *args[argsi:], **kwargs)
for line in lines:
art3d.line_2d_to_3d(line, zs=zs, zdir=zdir)
self.auto_scale_xyz(xs, ys, zs, had_data)
return lines
plot3D = plot
def plot_surface(self, X, Y, Z, *args, **kwargs):
'''
Create a surface plot.
By default it will be colored in shades of a solid color,
but it also supports color mapping by supplying the *cmap*
argument.
The `rstride` and `cstride` kwargs set the stride used to
sample the input data to generate the graph. If 1k by 1k
arrays are passed in the default values for the strides will
result in a 100x100 grid being plotted.
============= ================================================
Argument Description
============= ================================================
*X*, *Y*, *Z* Data values as 2D arrays
*rstride* Array row stride (step size), defaults to 10
*cstride* Array column stride (step size), defaults to 10
*color* Color of the surface patches
*cmap* A colormap for the surface patches.
*facecolors* Face colors for the individual patches
*norm* An instance of Normalize to map values to colors
*vmin* Minimum value to map
*vmax* Maximum value to map
*shade* Whether to shade the facecolors
============= ================================================
Other arguments are passed on to
:class:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
'''
had_data = self.has_data()
Z = np.atleast_2d(Z)
# TODO: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
rstride = kwargs.pop('rstride', 10)
cstride = kwargs.pop('cstride', 10)
if 'facecolors' in kwargs:
fcolors = kwargs.pop('facecolors')
else:
color = np.array(colorConverter.to_rgba(kwargs.pop('color', 'b')))
fcolors = None
cmap = kwargs.get('cmap', None)
norm = kwargs.pop('norm', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
linewidth = kwargs.get('linewidth', None)
shade = kwargs.pop('shade', cmap is None)
lightsource = kwargs.pop('lightsource', None)
# Shade the data
if shade and cmap is not None and fcolors is not None:
fcolors = self._shade_colors_lightsource(Z, cmap, lightsource)
polys = []
# Only need these vectors to shade if there is no cmap
if cmap is None and shade :
totpts = int(np.ceil(float(rows - 1) / rstride) *
np.ceil(float(cols - 1) / cstride))
v1 = np.empty((totpts, 3))
v2 = np.empty((totpts, 3))
# This indexes the vertex points
which_pt = 0
#colset contains the data for coloring: either average z or the facecolor
colset = []
for rs in xrange(0, rows-1, rstride):
for cs in xrange(0, cols-1, cstride):
ps = []
for a in (X, Y, Z) :
ztop = a[rs,cs:min(cols, cs+cstride+1)]
zleft = a[rs+1:min(rows, rs+rstride+1),
min(cols-1, cs+cstride)]
zbase = a[min(rows-1, rs+rstride), cs:min(cols, cs+cstride+1):][::-1]
zright = a[rs:min(rows-1, rs+rstride):, cs][::-1]
z = np.concatenate((ztop, zleft, zbase, zright))
ps.append(z)
# The construction leaves the array with duplicate points, which
# are removed here.
ps = list(zip(*ps))
lastp = np.array([])
ps2 = [ps[0]] + [ps[i] for i in xrange(1, len(ps)) if ps[i] != ps[i-1]]
avgzsum = sum(p[2] for p in ps2)
polys.append(ps2)
if fcolors is not None:
colset.append(fcolors[rs][cs])
else:
colset.append(avgzsum / len(ps2))
# Only need vectors to shade if no cmap
if cmap is None and shade:
i1, i2, i3 = 0, int(len(ps2)/3), int(2*len(ps2)/3)
v1[which_pt] = np.array(ps2[i1]) - np.array(ps2[i2])
v2[which_pt] = np.array(ps2[i2]) - np.array(ps2[i3])
which_pt += 1
if cmap is None and shade:
normals = np.cross(v1, v2)
else :
normals = []
polyc = art3d.Poly3DCollection(polys, *args, **kwargs)
if fcolors is not None:
if shade:
colset = self._shade_colors(colset, normals)
polyc.set_facecolors(colset)
polyc.set_edgecolors(colset)
elif cmap:
colset = np.array(colset)
polyc.set_array(colset)
if vmin is not None or vmax is not None:
polyc.set_clim(vmin, vmax)
if norm is not None:
polyc.set_norm(norm)
else:
if shade:
colset = self._shade_colors(color, normals)
else:
colset = color
polyc.set_facecolors(colset)
self.add_collection(polyc)
self.auto_scale_xyz(X, Y, Z, had_data)
return polyc
def _generate_normals(self, polygons):
'''
Generate normals for polygons by using the first three points.
This normal of course might not make sense for polygons with
more than three points not lying in a plane.
'''
normals = []
for verts in polygons:
v1 = np.array(verts[0]) - np.array(verts[1])
v2 = np.array(verts[2]) - np.array(verts[0])
normals.append(np.cross(v1, v2))
return normals
def _shade_colors(self, color, normals):
'''
Shade *color* using normal vectors given by *normals*.
*color* can also be an array of the same length as *normals*.
'''
shade = np.array([np.dot(n / proj3d.mod(n), [-1, -1, 0.5])
if proj3d.mod(n) else np.nan
for n in normals])
mask = ~np.isnan(shade)
if len(shade[mask]) > 0:
norm = Normalize(min(shade[mask]), max(shade[mask]))
shade[~mask] = min(shade[mask])
color = colorConverter.to_rgba_array(color)
# shape of color should be (M, 4) (where M is number of faces)
# shape of shade should be (M,)
# colors should have final shape of (M, 4)
alpha = color[:, 3]
colors = (0.5 + norm(shade)[:, np.newaxis] * 0.5) * color
colors[:, 3] = alpha
else:
colors = np.asanyarray(color).copy()
return colors
def _shade_colors_lightsource(self, data, cmap, lightsource):
if lightsource is None:
lightsource = LightSource(azdeg=135, altdeg=55)
return lightsource.shade(data, cmap)
def plot_wireframe(self, X, Y, Z, *args, **kwargs):
'''
Plot a 3D wireframe.
The `rstride` and `cstride` kwargs set the stride used to
sample the input data to generate the graph.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as 2D arrays
*Z*
*rstride* Array row stride (step size), defaults to 1
*cstride* Array column stride (step size), defaults to 1
========== ================================================
Keyword arguments are passed on to
:class:`~matplotlib.collections.LineCollection`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Line3DCollection`
'''
rstride = kwargs.pop("rstride", 1)
cstride = kwargs.pop("cstride", 1)
had_data = self.has_data()
Z = np.atleast_2d(Z)
# FIXME: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
# We want two sets of lines, one running along the "rows" of
# Z and another set of lines running along the "columns" of Z.
# This transpose will make it easy to obtain the columns.
tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z)
rii = list(xrange(0, rows, rstride))
cii = list(xrange(0, cols, cstride))
# Add the last index only if needed
if rows > 0 and rii[-1] != (rows - 1) :
rii += [rows-1]
if cols > 0 and cii[-1] != (cols - 1) :
cii += [cols-1]
# If the inputs were empty, then just
# reset everything.
if Z.size == 0 :
rii = []
cii = []
xlines = [X[i] for i in rii]
ylines = [Y[i] for i in rii]
zlines = [Z[i] for i in rii]
txlines = [tX[i] for i in cii]
tylines = [tY[i] for i in cii]
tzlines = [tZ[i] for i in cii]
lines = [list(zip(xl, yl, zl)) for xl, yl, zl in \
zip(xlines, ylines, zlines)]
lines += [list(zip(xl, yl, zl)) for xl, yl, zl in \
zip(txlines, tylines, tzlines)]
linec = art3d.Line3DCollection(lines, *args, **kwargs)
self.add_collection(linec)
self.auto_scale_xyz(X, Y, Z, had_data)
return linec
def plot_trisurf(self, *args, **kwargs):
"""
============= ================================================
Argument Description
============= ================================================
*X*, *Y*, *Z* Data values as 1D arrays
*color* Color of the surface patches
*cmap* A colormap for the surface patches.
*norm* An instance of Normalize to map values to colors
*vmin* Minimum value to map
*vmax* Maximum value to map
*shade* Whether to shade the facecolors
============= ================================================
The (optional) triangulation can be specified in one of two ways;
either::
plot_trisurf(triangulation, ...)
where triangulation is a :class:`~matplotlib.tri.Triangulation`
object, or::
plot_trisurf(X, Y, ...)
plot_trisurf(X, Y, triangles, ...)
plot_trisurf(X, Y, triangles=triangles, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of
these possibilities.
The remaining arguments are::
plot_trisurf(..., Z)
where *Z* is the array of values to contour, one per point
in the triangulation.
Other arguments are passed on to
:class:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
**Examples:**
.. plot:: mpl_examples/mplot3d/trisurf3d_demo.py
.. plot:: mpl_examples/mplot3d/trisurf3d_demo2.py
.. versionadded:: 1.2.0
This plotting function was added for the v1.2.0 release.
"""
had_data = self.has_data()
# TODO: Support custom face colours
color = np.array(colorConverter.to_rgba(kwargs.pop('color', 'b')))
cmap = kwargs.get('cmap', None)
norm = kwargs.pop('norm', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
linewidth = kwargs.get('linewidth', None)
shade = kwargs.pop('shade', cmap is None)
lightsource = kwargs.pop('lightsource', None)
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
if 'Z' in kwargs:
z = np.asarray(kwargs.pop('Z'))
else:
z = np.asarray(args[0])
# We do this so Z doesn't get passed as an arg to PolyCollection
args = args[1:]
triangles = tri.get_masked_triangles()
xt = tri.x[triangles][..., np.newaxis]
yt = tri.y[triangles][..., np.newaxis]
zt = z[triangles][..., np.newaxis]
verts = np.concatenate((xt, yt, zt), axis=2)
# Only need these vectors to shade if there is no cmap
if cmap is None and shade:
totpts = len(verts)
v1 = np.empty((totpts, 3))
v2 = np.empty((totpts, 3))
# This indexes the vertex points
which_pt = 0
colset = []
for i in xrange(len(verts)):
avgzsum = verts[i,0,2] + verts[i,1,2] + verts[i,2,2]
colset.append(avgzsum / 3.0)
# Only need vectors to shade if no cmap
if cmap is None and shade:
v1[which_pt] = np.array(verts[i,0]) - np.array(verts[i,1])
v2[which_pt] = np.array(verts[i,1]) - np.array(verts[i,2])
which_pt += 1
if cmap is None and shade:
normals = np.cross(v1, v2)
else:
normals = []
polyc = art3d.Poly3DCollection(verts, *args, **kwargs)
if cmap:
colset = np.array(colset)
polyc.set_array(colset)
if vmin is not None or vmax is not None:
polyc.set_clim(vmin, vmax)
if norm is not None:
polyc.set_norm(norm)
else:
if shade:
colset = self._shade_colors(color, normals)
else:
colset = color
polyc.set_facecolors(colset)
self.add_collection(polyc)
self.auto_scale_xyz(tri.x, tri.y, z, had_data)
return polyc
def _3d_extend_contour(self, cset, stride=5):
'''
Extend a contour in 3D by creating
'''
levels = cset.levels
colls = cset.collections
dz = (levels[1] - levels[0]) / 2
for z, linec in zip(levels, colls):
topverts = art3d.paths_to_3d_segments(linec.get_paths(), z - dz)
botverts = art3d.paths_to_3d_segments(linec.get_paths(), z + dz)
color = linec.get_color()[0]
polyverts = []
normals = []
nsteps = round(len(topverts[0]) / stride)
if nsteps <= 1:
if len(topverts[0]) > 1:
nsteps = 2
else:
continue
stepsize = (len(topverts[0]) - 1) / (nsteps - 1)
for i in range(int(round(nsteps)) - 1):
i1 = int(round(i * stepsize))
i2 = int(round((i + 1) * stepsize))
polyverts.append([topverts[0][i1],
topverts[0][i2],
botverts[0][i2],
botverts[0][i1]])
v1 = np.array(topverts[0][i1]) - np.array(topverts[0][i2])
v2 = np.array(topverts[0][i1]) - np.array(botverts[0][i1])
normals.append(np.cross(v1, v2))
colors = self._shade_colors(color, normals)
colors2 = self._shade_colors(color, normals)
polycol = art3d.Poly3DCollection(polyverts,
facecolors=colors,
edgecolors=colors2)
polycol.set_sort_zpos(z)
self.add_collection3d(polycol)
for col in colls:
self.collections.remove(col)
def add_contour_set(self, cset, extend3d=False, stride=5, zdir='z', offset=None):
zdir = '-' + zdir
if extend3d:
self._3d_extend_contour(cset, stride)
else:
for z, linec in zip(cset.levels, cset.collections):
if offset is not None:
z = offset
art3d.line_collection_2d_to_3d(linec, z, zdir=zdir)
def add_contourf_set(self, cset, zdir='z', offset=None) :
zdir = '-' + zdir
for z, linec in zip(cset.levels, cset.collections) :
if offset is not None :
z = offset
art3d.poly_collection_2d_to_3d(linec, z, zdir=zdir)
linec.set_sort_zpos(z)
def contour(self, X, Y, Z, *args, **kwargs):
'''
Create a 3D contour plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*extend3d* Whether to extend contour in 3D (default: False)
*stride* Stride (step size) for extending contour
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
The positional and other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.contour`
Returns a :class:`~matplotlib.axes.Axes.contour`
'''
extend3d = kwargs.pop('extend3d', False)
stride = kwargs.pop('stride', 5)
zdir = kwargs.pop('zdir', 'z')
offset = kwargs.pop('offset', None)
had_data = self.has_data()
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
cset = Axes.contour(self, jX, jY, jZ, *args, **kwargs)
self.add_contour_set(cset, extend3d, stride, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
contour3D = contour
def tricontour(self, *args, **kwargs):
"""
Create a 3D contour plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*extend3d* Whether to extend contour in 3D (default: False)
*stride* Stride (step size) for extending contour
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
Other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.tricontour`
Returns a :class:`~matplotlib.axes.Axes.contour`
.. versionchanged:: 1.3.0
Added support for custom triangulations
EXPERIMENTAL: This method currently produces incorrect output due to a
longstanding bug in 3D PolyCollection rendering.
"""
extend3d = kwargs.pop('extend3d', False)
stride = kwargs.pop('stride', 5)
zdir = kwargs.pop('zdir', 'z')
offset = kwargs.pop('offset', None)
had_data = self.has_data()
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
*args, **kwargs)
X = tri.x
Y = tri.y
if 'Z' in kwargs:
Z = kwargs.pop('Z')
else:
Z = args[0]
# We do this so Z doesn't get passed as an arg to Axes.tricontour
args = args[1:]
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
tri = Triangulation(jX, jY, tri.triangles, tri.mask)
cset = Axes.tricontour(self, tri, jZ, *args, **kwargs)
self.add_contour_set(cset, extend3d, stride, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
def contourf(self, X, Y, Z, *args, **kwargs):
'''
Create a 3D contourf plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the filled contour
on this position in plane normal to zdir
========== ================================================
The positional and keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.contourf`
Returns a :class:`~matplotlib.axes.Axes.contourf`
.. versionchanged :: 1.1.0
The *zdir* and *offset* kwargs were added.
'''
zdir = kwargs.pop('zdir', 'z')
offset = kwargs.pop('offset', None)
had_data = self.has_data()
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
cset = Axes.contourf(self, jX, jY, jZ, *args, **kwargs)
self.add_contourf_set(cset, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
contourf3D = contourf
def tricontourf(self, *args, **kwargs):
"""
Create a 3D contourf plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
Other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.tricontour`
Returns a :class:`~matplotlib.axes.Axes.contour`
.. versionchanged :: 1.3.0
Added support for custom triangulations
EXPERIMENTAL: This method currently produces incorrect output due to a
longstanding bug in 3D PolyCollection rendering.
"""
zdir = kwargs.pop('zdir', 'z')
offset = kwargs.pop('offset', None)
had_data = self.has_data()
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
*args, **kwargs)
X = tri.x
Y = tri.y
if 'Z' in kwargs:
Z = kwargs.pop('Z')
else:
Z = args[0]
# We do this so Z doesn't get passed as an arg to Axes.tricontourf
args = args[1:]
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
tri = Triangulation(jX, jY, tri.triangles, tri.mask)
cset = Axes.tricontourf(self, tri, jZ, *args, **kwargs)
self.add_contourf_set(cset, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
def add_collection3d(self, col, zs=0, zdir='z'):
'''
Add a 3D collection object to the plot.
2D collection types are converted to a 3D version by
modifying the object and adding z coordinate information.
Supported are:
- PolyCollection
- LineColleciton
- PatchCollection
'''
zvals = np.atleast_1d(zs)
if len(zvals) > 0 :
zsortval = min(zvals)
else :
zsortval = 0 # FIXME: Fairly arbitrary. Is there a better value?
# FIXME: use issubclass() (although, then a 3D collection
# object would also pass.) Maybe have a collection3d
# abstract class to test for and exclude?
if type(col) is mcoll.PolyCollection:
art3d.poly_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
elif type(col) is mcoll.LineCollection:
art3d.line_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
elif type(col) is mcoll.PatchCollection:
art3d.patch_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
Axes.add_collection(self, col)
def scatter(self, xs, ys, zs=0, zdir='z', s=20, c='b', depthshade=True,
*args, **kwargs):
'''
Create a scatter plot.
============ ========================================================
Argument Description
============ ========================================================
*xs*, *ys* Positions of data points.
*zs* Either an array of the same length as *xs* and
*ys* or a single value to place all points in
the same plane. Default is 0.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
*s* size in points^2. It is a scalar or an array of the
same length as *x* and *y*.
*c* a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*depthshade*
Whether or not to shade the scatter markers to give
the appearance of depth. Default is *True*.
============ ========================================================
Keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.scatter`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Patch3DCollection`
'''
had_data = self.has_data()
xs = np.ma.ravel(xs)
ys = np.ma.ravel(ys)
zs = np.ma.ravel(zs)
if xs.size != ys.size:
raise ValueError("Arguments 'xs' and 'ys' must be of same size.")
if xs.size != zs.size:
if zs.size == 1:
zs = np.tile(zs[0], xs.size)
else:
raise ValueError(("Argument 'zs' must be of same size as 'xs' "
"and 'ys' or of size 1."))
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
cstr = cbook.is_string_like(c) or cbook.is_sequence_of_strings(c)
if not cstr:
c = np.asanyarray(c)
if c.size == xs.size:
c = np.ma.ravel(c)
xs, ys, zs, s, c = cbook.delete_masked_points(xs, ys, zs, s, c)
patches = Axes.scatter(self, xs, ys, s=s, c=c, *args, **kwargs)
if not cbook.iterable(zs):
is_2d = True
zs = np.ones(len(xs)) * zs
else:
is_2d = False
art3d.patch_collection_2d_to_3d(patches, zs=zs, zdir=zdir,
depthshade=depthshade)
if self._zmargin < 0.05 and xs.size > 0:
self.set_zmargin(0.05)
#FIXME: why is this necessary?
if not is_2d:
self.auto_scale_xyz(xs, ys, zs, had_data)
return patches
scatter3D = scatter
def bar(self, left, height, zs=0, zdir='z', *args, **kwargs):
'''
Add 2D bar(s).
========== ================================================
Argument Description
========== ================================================
*left* The x coordinates of the left sides of the bars.
*height* The height of the bars.
*zs* Z coordinate of bars, if one value is specified
they will all be placed at the same z.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
========== ================================================
Keyword arguments are passed onto :func:`~matplotlib.axes.Axes.bar`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Patch3DCollection`
'''
had_data = self.has_data()
patches = Axes.bar(self, left, height, *args, **kwargs)
if not cbook.iterable(zs):
zs = np.ones(len(left)) * zs
verts = []
verts_zs = []
for p, z in zip(patches, zs):
vs = art3d.get_patch_verts(p)
verts += vs.tolist()
verts_zs += [z] * len(vs)
art3d.patch_2d_to_3d(p, z, zdir)
if 'alpha' in kwargs:
p.set_alpha(kwargs['alpha'])
if len(verts) > 0 :
# the following has to be skipped if verts is empty
# NOTE: Bugs could still occur if len(verts) > 0,
# but the "2nd dimension" is empty.
xs, ys = list(zip(*verts))
else :
xs, ys = [], []
xs, ys, verts_zs = art3d.juggle_axes(xs, ys, verts_zs, zdir)
self.auto_scale_xyz(xs, ys, verts_zs, had_data)
return patches
def bar3d(self, x, y, z, dx, dy, dz, color='b',
zsort='average', *args, **kwargs):
'''
Generate a 3D bar, or multiple bars.
When generating multiple bars, x, y, z have to be arrays.
dx, dy, dz can be arrays or scalars.
*color* can be:
- A single color value, to color all bars the same color.
- An array of colors of length N bars, to color each bar
independently.
- An array of colors of length 6, to color the faces of the
bars similarly.
- An array of colors of length 6 * N bars, to color each face
independently.
When coloring the faces of the boxes specifically, this is
the order of the coloring:
1. -Z (bottom of box)
2. +Z (top of box)
3. -Y
4. +Y
5. -X
6. +X
Keyword arguments are passed onto
:func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
'''
had_data = self.has_data()
if not cbook.iterable(x):
x = [x]
if not cbook.iterable(y):
y = [y]
if not cbook.iterable(z):
z = [z]
if not cbook.iterable(dx):
dx = [dx]
if not cbook.iterable(dy):
dy = [dy]
if not cbook.iterable(dz):
dz = [dz]
if len(dx) == 1:
dx = dx * len(x)
if len(dy) == 1:
dy = dy * len(y)
if len(dz) == 1:
dz = dz * len(z)
if len(x) != len(y) or len(x) != len(z):
warnings.warn('x, y, and z must be the same length.')
# FIXME: This is archaic and could be done much better.
minx, miny, minz = 1e20, 1e20, 1e20
maxx, maxy, maxz = -1e20, -1e20, -1e20
polys = []
for xi, yi, zi, dxi, dyi, dzi in zip(x, y, z, dx, dy, dz):
minx = min(xi, minx)
maxx = max(xi + dxi, maxx)
miny = min(yi, miny)
maxy = max(yi + dyi, maxy)
minz = min(zi, minz)
maxz = max(zi + dzi, maxz)
polys.extend([
((xi, yi, zi), (xi + dxi, yi, zi),
(xi + dxi, yi + dyi, zi), (xi, yi + dyi, zi)),
((xi, yi, zi + dzi), (xi + dxi, yi, zi + dzi),
(xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
((xi, yi, zi), (xi + dxi, yi, zi),
(xi + dxi, yi, zi + dzi), (xi, yi, zi + dzi)),
((xi, yi + dyi, zi), (xi + dxi, yi + dyi, zi),
(xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
((xi, yi, zi), (xi, yi + dyi, zi),
(xi, yi + dyi, zi + dzi), (xi, yi, zi + dzi)),
((xi + dxi, yi, zi), (xi + dxi, yi + dyi, zi),
(xi + dxi, yi + dyi, zi + dzi), (xi + dxi, yi, zi + dzi)),
])
facecolors = []
if color is None:
# no color specified
facecolors = [None] * len(x)
elif len(color) == len(x):
# bar colors specified, need to expand to number of faces
for c in color:
facecolors.extend([c] * 6)
else:
# a single color specified, or face colors specified explicitly
facecolors = list(colorConverter.to_rgba_array(color))
if len(facecolors) < len(x):
facecolors *= (6 * len(x))
normals = self._generate_normals(polys)
sfacecolors = self._shade_colors(facecolors, normals)
col = art3d.Poly3DCollection(polys,
zsort=zsort,
facecolor=sfacecolors,
*args, **kwargs)
self.add_collection(col)
self.auto_scale_xyz((minx, maxx), (miny, maxy), (minz, maxz), had_data)
def set_title(self, label, fontdict=None, loc='center', **kwargs):
ret = Axes.set_title(self, label, fontdict=fontdict, loc=loc, **kwargs)
(x, y) = self.title.get_position()
self.title.set_y(0.92 * y)
return ret
set_title.__doc__ = maxes.Axes.set_title.__doc__
def quiver(self, *args, **kwargs):
"""
Plot a 3D field of arrows.
call signatures::
quiver(X, Y, Z, U, V, W, **kwargs)
Arguments:
*X*, *Y*, *Z*:
The x, y and z coordinates of the arrow locations
*U*, *V*, *W*:
The direction vector that the arrow is pointing
The arguments could be array-like or scalars, so long as they
they can be broadcast together. The arguments can also be
masked arrays. If an element in any of argument is masked, then
that corresponding quiver element will not be plotted.
Keyword arguments:
*length*: [1.0 | float]
The length of each quiver, default to 1.0, the unit is
the same with the axes
*arrow_length_ratio*: [0.3 | float]
The ratio of the arrow head with respect to the quiver,
default to 0.3
Any additional keyword arguments are delegated to
:class:`~matplotlib.collections.LineCollection`
"""
def calc_arrow(u, v, w, angle=15):
"""
To calculate the arrow head. (u, v, w) should be unit vector.
"""
# this part figures out the axis of rotation to use
# use unit vector perpendicular to (u,v,w) when |w|=1, by default
x, y, z = 0, 1, 0
# get the norm
norm = math.sqrt(v**2 + u**2)
# normalize it if it is safe
if norm > 0:
# get unit direction vector perpendicular to (u,v,w)
x, y = v/norm, -u/norm
# this function takes an angle, and rotates the (u,v,w)
# angle degrees around (x,y,z)
def rotatefunction(angle):
ra = math.radians(angle)
c = math.cos(ra)
s = math.sin(ra)
# construct the rotation matrix
R = np.matrix([[c+(x**2)*(1-c), x*y*(1-c)-z*s, x*z*(1-c)+y*s],
[y*x*(1-c)+z*s, c+(y**2)*(1-c), y*z*(1-c)-x*s],
[z*x*(1-c)-y*s, z*y*(1-c)+x*s, c+(z**2)*(1-c)]])
# construct the column vector for (u,v,w)
line = np.matrix([[u],[v],[w]])
# use numpy to multiply them to get the rotated vector
rotatedline = R*line
# return the rotated (u,v,w) from the computed matrix
return (rotatedline[0,0], rotatedline[1,0], rotatedline[2,0])
# compute and return the two arrowhead direction unit vectors
return rotatefunction(angle), rotatefunction(-angle)
def point_vector_to_line(point, vector, length):
"""
use a point and vector to generate lines
"""
lines = []
for var in np.linspace(0, length, num=2):
lines.append(list(zip(*(point - var * vector))))
lines = np.array(lines).swapaxes(0, 1)
return lines.tolist()
had_data = self.has_data()
# handle kwargs
# shaft length
length = kwargs.pop('length', 1)
# arrow length ratio to the shaft length
arrow_length_ratio = kwargs.pop('arrow_length_ratio', 0.3)
# handle args
argi = 6
if len(args) < argi:
ValueError('Wrong number of arguments. Expected %d got %d' %
(argi, len(args)))
# first 6 arguments are X, Y, Z, U, V, W
input_args = args[:argi]
# if any of the args are scalar, convert into list
input_args = [[k] if isinstance(k, (int, float)) else k
for k in input_args]
# extract the masks, if any
masks = [k.mask for k in input_args if isinstance(k, np.ma.MaskedArray)]
# broadcast to match the shape
bcast = np.broadcast_arrays(*(input_args + masks))
input_args = bcast[:argi]
masks = bcast[argi:]
if masks:
# combine the masks into one
mask = reduce(np.logical_or, masks)
# put mask on and compress
input_args = [np.ma.array(k, mask=mask).compressed()
for k in input_args]
else:
input_args = [k.flatten() for k in input_args]
if any(len(v) == 0 for v in input_args):
# No quivers, so just make an empty collection and return early
linec = art3d.Line3DCollection([], *args[argi:], **kwargs)
self.add_collection(linec)
return linec
# Following assertions must be true before proceeding
# must all be ndarray
assert all(isinstance(k, np.ndarray) for k in input_args)
# must all in same shape
assert len(set([k.shape for k in input_args])) == 1
xs, ys, zs, us, vs, ws = input_args[:argi]
lines = []
# for each arrow
for i in range(xs.shape[0]):
# calulate body
x = xs[i]
y = ys[i]
z = zs[i]
u = us[i]
v = vs[i]
w = ws[i]
# (u,v,w) expected to be normalized, recursive to fix A=0 scenario.
if u == 0 and v == 0 and w == 0:
# Just don't make a quiver for such a case.
continue
# normalize
norm = math.sqrt(u ** 2 + v ** 2 + w ** 2)
u /= norm
v /= norm
w /= norm
# draw main line
t = np.linspace(0, length, num=20)
lx = x - t * u
ly = y - t * v
lz = z - t * w
line = list(zip(lx, ly, lz))
lines.append(line)
d1, d2 = calc_arrow(u, v, w)
ua1, va1, wa1 = d1[0], d1[1], d1[2]
ua2, va2, wa2 = d2[0], d2[1], d2[2]
# TODO: num should probably get parameterized
t = np.linspace(0, length * arrow_length_ratio, num=20)
la1x = x - t * ua1
la1y = y - t * va1
la1z = z - t * wa1
la2x = x - t * ua2
la2y = y - t * va2
la2z = z - t * wa2
line = list(zip(la1x, la1y, la1z))
lines.append(line)
line = list(zip(la2x, la2y, la2z))
lines.append(line)
linec = art3d.Line3DCollection(lines, *args[argi:], **kwargs)
self.add_collection(linec)
self.auto_scale_xyz(xs, ys, zs, had_data)
return linec
quiver3D = quiver
def get_test_data(delta=0.05):
'''
Return a tuple X, Y, Z with a test data set.
'''
from matplotlib.mlab import bivariate_normal
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = Z2 - Z1
X = X * 10
Y = Y * 10
Z = Z * 500
return X, Y, Z
########################################################
# Register Axes3D as a 'projection' object available
# for use just like any other axes
########################################################
import matplotlib.projections as proj
proj.projection_registry.register(Axes3D)
|
lgpl-3.0
|
r357/Advanced-Pyconomics
|
1_Dynamic_GE/Dynamic_GE1.py
|
1
|
4085
|
# TWO PERIOD GENERAL EQUILIBRIUM MODEL
# WITH TWO TYPES OF AGENTS IN TERMS OF
# ENDOWMENTS AND PREFERENCES (DISCOUNT RATE)
import numpy as np
import matplotlib.pyplot as plt
# KEY PARAMETERS
# PREFERENCE PARAMETERS
rho1 = np.arange(0.0,0.2,0.02) # discount rate 1
rho2 = np.arange(0.0,0.2,0.02) # discount rate 2
# ENDOWMENTS
# NOTATION 12 - denotes agent 1 period 2 (see consumption function below)
Q11 = np.arange(1.0,2.0,0.05)
Q12 = np.arange(1.0,2.0,0.05)
Q21 = np.arange(1.0,2.0,0.05)
Q22 = np.arange(1.0,2.0,0.05)
# ix function for indexing shapes
Q11x,Q12x,Q21x,Q22x,rho1x,rho2x = np.ix_(Q11,Q12,Q21,Q22,rho1,rho2)
# INTEREST RATE - SOLUTION TO GE
Q11x.shape, Q12x.shape, Q21x.shape, Q22x.shape, rho1x.shape, rho2x.shape
r = -1 + ((1+rho1x)/(2+rho1x)*Q12x+(1+rho2x)/(2+rho2x)*Q22x)/(Q11x/(2+rho1x)+Q21x/(2+rho2x))
C11 = (1+rho1x)/(2+rho1x)*(Q11x+Q12x/(1+r))
C12 = (1+r)/(2+rho1x)*(Q11x+Q12x/(1+r))
C21 = (1+rho2x)/(2+rho2x)*(Q21x+Q22x/(1+r))
C22 = (1+r)/(2+rho2x)*(Q21x+Q22x/(1+r))
# PLOT 1: VARIATION OF ENDOWMENT of Agent Type 1 in period 1
r_1 = r[:,0,0,0,0,0]
C11_1 = C11[:,0,0,0,0,0]
C12_1 = C12[:,0,0,0,0,0]
C21_1 = C21[:,0,0,0,0,0]
C22_1 = C22[:,0,0,0,0,0]
fig1 = plt.figure(); ax1=fig1.add_subplot(1,1,1)
ax1.plot(Q11,r_1,'k-',color='r',label='Interest rate')
ax1.legend(loc='best')
ax1.set_xlabel('Endowment $Q_{1}^{1}$')
ax1.set_ylabel('Real interest rate')
#fig1.savefig('C:/aaaCourses/AdvMacro/aaaHomeworks/2017/Python/fig1_r_Q11.png') # save the figure to file
fig2 = plt.figure(); ax1=fig2.add_subplot(1,1,1)
ax1.plot(Q11,C11_1,'k-',color='r',label='$C_{1}^{1}$')
ax1.plot(Q11,C12_1,'k--',color='r',label='$C_{2}^{1}$')
ax1.plot(Q11,C21_1,'k-',color='b',label='$C_{1}^{2}$')
ax1.plot(Q11,C22_1,'k--',color='b',label='$C_{2}^{2}$')
ax1.legend(loc='best')
ax1.set_xlabel('Endowment $Q_{1}^{1}$')
ax1.set_ylabel('Consumption ')
#fig2.savefig('C:/aaaCourses/AdvMacro/aaaHomeworks/2017/Python/fig2_C_Q11.png') # save the figure to file
# PLOT 2: VARIATION OF ENDOWMENT of Agent Type 1 in period 2
r_2 = r[0,:,0,0,0,0]
C11_2 = C11[0,:,0,0,0,0]
C12_2 = C12[0,:,0,0,0,0]
C21_2 = C21[0,:,0,0,0,0]
C22_2 = C22[0,:,0,0,0,0]
fig3 = plt.figure(); ax1=fig3.add_subplot(1,1,1)
ax1.plot(Q12,r_2,'k-',color='r',label='Interest rate')
ax1.legend(loc='best')
ax1.set_xlabel('Endowment $Q_{1}^{2}$')
ax1.set_ylabel('Real interest rate')
#fig3.savefig('C:/aaaCourses/AdvMacro/aaaHomeworks/2017/Python/fig3_r_Q12.png') # save the figure to file
fig4 = plt.figure(); ax1=fig4.add_subplot(1,1,1)
ax1.plot(Q12,C11_2,'k-',color='r',label='$C_{1}^{1}$')
ax1.plot(Q12,C12_2,'k--',color='r',label='$C_{2}^{1}$')
ax1.plot(Q12,C21_2,'k-',color='b',label='$C_{1}^{2}$')
ax1.plot(Q12,C22_2,'k--',color='b',label='$C_{2}^{2}$')
ax1.legend(loc='best')
ax1.set_xlabel('Endowment $Q_{1}^{2}$')
ax1.set_ylabel('Consumption ')
#fig4.savefig('C:/aaaCourses/AdvMacro/aaaHomeworks/2017/Python/fig4_C_Q12.png') # save the figure to file
# PLOT 3: VARIATION OF PREFERENCE PARAMETER RHO for Agent Type 1 in period 2
r_3 = r[0,0,0,0,:,0]
C11_3 = C11[0,0,0,0,:,0]
C12_3 = C12[0,0,0,0,:,0]
C21_3 = C21[0,0,0,0,:,0]
C22_3 = C22[0,0,0,0,:,0]
fig5 = plt.figure(); ax1=fig5.add_subplot(1,1,1)
ax1.plot(rho1,r_3,'k-',color='r',label='Interest rate')
ax1.legend(loc='best')
ax1.set_xlabel(r'Discount rate $\rho_{1}$')
ax1.set_ylabel('Real interest rate')
#fig5.savefig('C:/aaaCourses/AdvMacro/aaaHomeworks/2017/Python/fig5_r_rho1.png') # save the figure to file
fig6 = plt.figure(); ax1=fig6.add_subplot(1,1,1)
ax1.plot(rho1,C11_3,'k-',color='r',label='$C_{1}^{1}$')
ax1.plot(rho1,C12_3,'k--',color='r',label='$C_{2}^{1}$')
ax1.plot(rho1,C21_3,'k-',color='b',label='$C_{1}^{2}$')
ax1.plot(rho1,C22_3,'k--',color='b',label='$C_{2}^{2}$')
ax1.legend(loc='best')
ax1.set_xlabel(r'Discount rate $\rho_{1}$')
ax1.set_ylabel('Consumption ')
#fig6.savefig('C:/aaaCourses/AdvMacro/aaaHomeworks/2017/Python/fig6_C_rho1.png') # save the figure to file
|
gpl-3.0
|
hainm/statsmodels
|
statsmodels/sandbox/regression/kernridgeregress_class.py
|
39
|
7941
|
'''Kernel Ridge Regression for local non-parametric regression'''
import numpy as np
from scipy import spatial as ssp
from numpy.testing import assert_equal
import matplotlib.pylab as plt
def plt_closeall(n=10):
'''close a number of open matplotlib windows'''
for i in range(n): plt.close()
def kernel_rbf(x,y,scale=1, **kwds):
#scale = kwds.get('scale',1)
dist = ssp.minkowski_distance_p(x[:,np.newaxis,:],y[np.newaxis,:,:],2)
return np.exp(-0.5/scale*(dist))
def kernel_euclid(x,y,p=2, **kwds):
return ssp.minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
class GaussProcess(object):
'''class to perform kernel ridge regression (gaussian process)
Warning: this class is memory intensive, it creates nobs x nobs distance
matrix and its inverse, where nobs is the number of rows (observations).
See sparse version for larger number of observations
Notes
-----
Todo:
* normalize multidimensional x array on demand, either by var or cov
* add confidence band
* automatic selection or proposal of smoothing parameters
Note: this is different from kernel smoothing regression,
see for example http://en.wikipedia.org/wiki/Kernel_smoother
In this version of the kernel ridge regression, the training points
are fitted exactly.
Needs a fast version for leave-one-out regression, for fitting each
observation on all the other points.
This version could be numerically improved for the calculation for many
different values of the ridge coefficient. see also short summary by
Isabelle Guyon (ETHZ) in a manuscript KernelRidge.pdf
Needs verification and possibly additional statistical results or
summary statistics for interpretation, but this is a problem with
non-parametric, non-linear methods.
Reference
---------
Rasmussen, C.E. and C.K.I. Williams, 2006, Gaussian Processes for Machine
Learning, the MIT Press, www.GaussianProcess.org/gpal, chapter 2
a short summary of the kernel ridge regression is at
http://www.ics.uci.edu/~welling/teaching/KernelsICS273B/Kernel-Ridge.pdf
'''
def __init__(self, x, y=None, kernel=kernel_rbf,
scale=0.5, ridgecoeff = 1e-10, **kwds ):
'''
Parameters
----------
x : 2d array (N,K)
data array of explanatory variables, columns represent variables
rows represent observations
y : 2d array (N,1) (optional)
endogenous variable that should be fitted or predicted
can alternatively be specified as parameter to fit method
kernel : function, default: kernel_rbf
kernel: (x1,x2)->kernel matrix is a function that takes as parameter
two column arrays and return the kernel or distance matrix
scale : float (optional)
smoothing parameter for the rbf kernel
ridgecoeff : float (optional)
coefficient that is multiplied with the identity matrix in the
ridge regression
Notes
-----
After initialization, kernel matrix is calculated and if y is given
as parameter then also the linear regression parameter and the
fitted or estimated y values, yest, are calculated. yest is available
as an attribute in this case.
Both scale and the ridge coefficient smooth the fitted curve.
'''
self.x = x
self.kernel = kernel
self.scale = scale
self.ridgecoeff = ridgecoeff
self.distxsample = kernel(x,x,scale=scale)
self.Kinv = np.linalg.inv(self.distxsample +
np.eye(*self.distxsample.shape)*ridgecoeff)
if not y is None:
self.y = y
self.yest = self.fit(y)
def fit(self,y):
'''fit the training explanatory variables to a sample ouput variable'''
self.parest = np.dot(self.Kinv, y) #self.kernel(y,y,scale=self.scale))
yhat = np.dot(self.distxsample,self.parest)
return yhat
## print ds33.shape
## ds33_2 = kernel(x,x[::k,:],scale=scale)
## dsinv = np.linalg.inv(ds33+np.eye(*distxsample.shape)*ridgecoeff)
## B = np.dot(dsinv,y[::k,:])
def predict(self,x):
'''predict new y values for a given array of explanatory variables'''
self.xpredict = x
distxpredict = self.kernel(x, self.x, scale=self.scale)
self.ypredict = np.dot(distxpredict, self.parest)
return self.ypredict
def plot(self, y, plt=plt ):
'''some basic plots'''
#todo return proper graph handles
plt.figure();
plt.plot(self.x,self.y, 'bo-', self.x, self.yest, 'r.-')
plt.title('sample (training) points')
plt.figure()
plt.plot(self.xpredict,y,'bo-',self.xpredict,self.ypredict,'r.-')
plt.title('all points')
def example1():
m,k = 500,4
upper = 6
scale=10
xs1a = np.linspace(1,upper,m)[:,np.newaxis]
xs1 = xs1a*np.ones((1,4)) + 1/(1.0+np.exp(np.random.randn(m,k)))
xs1 /= np.std(xs1[::k,:],0) # normalize scale, could use cov to normalize
y1true = np.sum(np.sin(xs1)+np.sqrt(xs1),1)[:,np.newaxis]
y1 = y1true + 0.250 * np.random.randn(m,1)
stride = 2 #use only some points as trainig points e.g 2 means every 2nd
gp1 = GaussProcess(xs1[::stride,:],y1[::stride,:], kernel=kernel_euclid,
ridgecoeff=1e-10)
yhatr1 = gp1.predict(xs1)
plt.figure()
plt.plot(y1true, y1,'bo',y1true, yhatr1,'r.')
plt.title('euclid kernel: true y versus noisy y and estimated y')
plt.figure()
plt.plot(y1,'bo-',y1true,'go-',yhatr1,'r.-')
plt.title('euclid kernel: true (green), noisy (blue) and estimated (red) '+
'observations')
gp2 = GaussProcess(xs1[::stride,:],y1[::stride,:], kernel=kernel_rbf,
scale=scale, ridgecoeff=1e-1)
yhatr2 = gp2.predict(xs1)
plt.figure()
plt.plot(y1true, y1,'bo',y1true, yhatr2,'r.')
plt.title('rbf kernel: true versus noisy (blue) and estimated (red) observations')
plt.figure()
plt.plot(y1,'bo-',y1true,'go-',yhatr2,'r.-')
plt.title('rbf kernel: true (green), noisy (blue) and estimated (red) '+
'observations')
#gp2.plot(y1)
def example2(m=100, scale=0.01, stride=2):
#m,k = 100,1
upper = 6
xs1 = np.linspace(1,upper,m)[:,np.newaxis]
y1true = np.sum(np.sin(xs1**2),1)[:,np.newaxis]/xs1
y1 = y1true + 0.05*np.random.randn(m,1)
ridgecoeff = 1e-10
#stride = 2 #use only some points as trainig points e.g 2 means every 2nd
gp1 = GaussProcess(xs1[::stride,:],y1[::stride,:], kernel=kernel_euclid,
ridgecoeff=1e-10)
yhatr1 = gp1.predict(xs1)
plt.figure()
plt.plot(y1true, y1,'bo',y1true, yhatr1,'r.')
plt.title('euclid kernel: true versus noisy (blue) and estimated (red) observations')
plt.figure()
plt.plot(y1,'bo-',y1true,'go-',yhatr1,'r.-')
plt.title('euclid kernel: true (green), noisy (blue) and estimated (red) '+
'observations')
gp2 = GaussProcess(xs1[::stride,:],y1[::stride,:], kernel=kernel_rbf,
scale=scale, ridgecoeff=1e-2)
yhatr2 = gp2.predict(xs1)
plt.figure()
plt.plot(y1true, y1,'bo',y1true, yhatr2,'r.')
plt.title('rbf kernel: true versus noisy (blue) and estimated (red) observations')
plt.figure()
plt.plot(y1,'bo-',y1true,'go-',yhatr2,'r.-')
plt.title('rbf kernel: true (green), noisy (blue) and estimated (red) '+
'observations')
#gp2.plot(y1)
if __name__ == '__main__':
example2()
#example2(m=1000, scale=0.01)
#example2(m=100, scale=0.5) # oversmoothing
#example2(m=2000, scale=0.005) # this looks good for rbf, zoom in
#example2(m=200, scale=0.01,stride=4)
example1()
#plt.show()
#plt_closeall() # use this to close the open figure windows
|
bsd-3-clause
|
cmorgan/zipline
|
tests/modelling/test_frameload.py
|
11
|
6900
|
"""
Tests for zipline.data.ffc.frame.DataFrameFFCLoader
"""
from unittest import TestCase
from mock import patch
from numpy import arange
from numpy.testing import assert_array_equal
from pandas import (
DataFrame,
DatetimeIndex,
Int64Index,
)
from zipline.lib.adjustment import (
Float64Add,
Float64Multiply,
Float64Overwrite,
)
from zipline.data.equities import USEquityPricing
from zipline.data.ffc.frame import (
ADD,
DataFrameFFCLoader,
MULTIPLY,
OVERWRITE,
)
from zipline.utils.tradingcalendar import trading_day
class DataFrameFFCLoaderTestCase(TestCase):
def setUp(self):
self.nsids = 5
self.ndates = 20
self.sids = Int64Index(range(self.nsids))
self.dates = DatetimeIndex(
start='2014-01-02',
freq=trading_day,
periods=self.ndates,
)
self.mask = DataFrame(
True,
index=self.dates,
columns=self.sids,
dtype=bool,
)
def tearDown(self):
pass
def test_bad_input(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameFFCLoader(
USEquityPricing.close,
baseline,
)
with self.assertRaises(ValueError):
# Wrong column.
loader.load_adjusted_array([USEquityPricing.open], self.mask)
with self.assertRaises(ValueError):
# Too many columns.
loader.load_adjusted_array(
[USEquityPricing.open, USEquityPricing.close],
self.mask
)
def test_baseline(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameFFCLoader(
USEquityPricing.close,
baseline,
)
dates_slice = slice(None, 10, None)
sids_slice = slice(1, 3, None)
[adj_array] = loader.load_adjusted_array(
[USEquityPricing.close],
self.mask.iloc[dates_slice, sids_slice]
)
for idx, window in enumerate(adj_array.traverse(window_length=3)):
expected = baseline.values[dates_slice, sids_slice][idx:idx + 3]
assert_array_equal(window, expected)
def test_adjustments(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
# Use the dates from index 10 on and sids 1-3.
dates_slice = slice(10, None, None)
sids_slice = slice(1, 4, None)
# Adjustments that should actually affect the output.
relevant_adjustments = [
{
'sid': 1,
'start_date': None,
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 0.5,
'kind': MULTIPLY,
},
{
'sid': 2,
'start_date': self.dates[5],
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 1.0,
'kind': ADD,
},
{
'sid': 2,
'start_date': self.dates[15],
'end_date': self.dates[16],
'apply_date': self.dates[17],
'value': 1.0,
'kind': ADD,
},
{
'sid': 3,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': 99.0,
'kind': OVERWRITE,
},
]
# These adjustments shouldn't affect the output.
irrelevant_adjustments = [
{ # Sid Not Requested
'sid': 0,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Sid Unknown
'sid': 9999,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Not Requested
'sid': 2,
'start_date': self.dates[1],
'end_date': self.dates[2],
'apply_date': self.dates[3],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Before Known Data
'sid': 2,
'start_date': self.dates[0] - (2 * trading_day),
'end_date': self.dates[0] - trading_day,
'apply_date': self.dates[0] - trading_day,
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date After Known Data
'sid': 2,
'start_date': self.dates[-1] + trading_day,
'end_date': self.dates[-1] + (2 * trading_day),
'apply_date': self.dates[-1] + (3 * trading_day),
'value': -9999.0,
'kind': OVERWRITE,
},
]
adjustments = DataFrame(relevant_adjustments + irrelevant_adjustments)
loader = DataFrameFFCLoader(
USEquityPricing.close,
baseline,
adjustments=adjustments,
)
expected_baseline = baseline.iloc[dates_slice, sids_slice]
formatted_adjustments = loader.format_adjustments(
self.dates[dates_slice],
self.sids[sids_slice],
)
expected_formatted_adjustments = {
6: [
Float64Multiply(first_row=0, last_row=5, col=0, value=0.5),
Float64Add(first_row=0, last_row=5, col=1, value=1.0),
],
7: [
Float64Add(first_row=5, last_row=6, col=1, value=1.0),
],
8: [
Float64Overwrite(first_row=6, last_row=7, col=2, value=99.0)
],
}
self.assertEqual(formatted_adjustments, expected_formatted_adjustments)
mask = self.mask.iloc[dates_slice, sids_slice]
with patch('zipline.data.ffc.frame.adjusted_array') as m:
loader.load_adjusted_array(
columns=[USEquityPricing.close],
mask=mask,
)
self.assertEqual(m.call_count, 1)
args, kwargs = m.call_args
assert_array_equal(kwargs['data'], expected_baseline.values)
assert_array_equal(kwargs['mask'], mask.values)
self.assertEqual(kwargs['adjustments'], expected_formatted_adjustments)
|
apache-2.0
|
ssh0/growing-string
|
triangular_lattice/box_counting_analyze.py
|
1
|
5658
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-11-17
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
import matplotlib.cm as cm
import numpy as np
import glob
# import itertools
# from scipy.optimize import curve_fit
# from scipy.stats import gamma
# result_data_path_base = "./results/data/box_counting/"
# result_data_path_base = "./results/data/box_counting/2016-11-25/"
# fn = [
# # ls ./results/data/box_counting/
# "beta=0.00_161117_140700.npz",
# "beta=1.00_161117_140704.npz",
# "beta=2.00_161117_140709.npz",
# "beta=3.00_161117_140714.npz",
# "beta=4.00_161117_140720.npz",
# "beta=5.00_161117_140725.npz",
# "beta=6.00_161117_140734.npz",
# "beta=7.00_161117_152439.npz",
# "beta=8.00_161117_152444.npz",
# "beta=9.00_161117_152448.npz",
# "beta=10.00_161117_152454.npz",
# ]
# fpath = [result_data_path_base + f for f in fn]
# fpath = sorted(glob.glob('./results/data/box_counting/*.npz'))
# fpath = sorted(glob.glob('./results/data/box_counting/modified/*.npz'))
# fpath = sorted(glob.glob('./results/data/box_counting/2016-11-19/*.npz'))
# fpath = sorted(glob.glob('./results/data/box_counting/2016-11-25/*.npz'))
# fpath = sorted(glob.glob('./results/data/box_counting/2016-11-26/*.npz'))
# fpath = sorted(glob.glob('./results/data/box_counting/2016-11-28/*.npz'))
fpath = sorted(glob.glob('./results/data/box_counting/2016-12-01/*.npz'))
def plot_Ds():
fig, ax = plt.subplots()
D = {}
for i, result_data_path in enumerate(fpath):
data = np.load(result_data_path)
beta = float(data['beta'])
frames = data['frames']
Ds = data['Ds']
alpha = 0.04
T = (1. / alpha) * np.log(np.arange(frames) / 2. + 1.)
# filtered = np.where(Ds < 1.)
# Ds[filtered] = 1.
if D.has_key(beta):
D[beta].append(Ds)
else:
D[beta] = [Ds]
betas = sorted(D.keys())
## 1) 全てプロット
# for i, beta in enumerate(betas):
# for i, d in D[beta]:
# ax.plot(T, d, '.', color=cm.viridis(float(i) / len(betas)))
## 2) 指定したbetaのデータをすべてプロット
# beta = 2.
# for i, d in enumerate(D[beta]):
# color = cm.viridis(float(i) / len(D[beta]))
# ax.plot(T[5:], d[5:], '.-', label='data {}'.format(i), color=color)
## 3) 指定したbetaの平均とエラー(標準偏差)をプロット
# beta = 8.
# D_ave = np.average(np.array(D[beta]), axis=0)
# D_err = np.std(np.array(D[beta]), axis=0)
# ax.errorbar(T, D_ave, yerr=D_err, marker='.', ecolor=[0, 0, 0, 0.2])
## 4) すべてのbetaごとにプロット
## 4.0)
D_ave = np.array([np.average(np.array(D[k]), axis=0) for k in betas])
D_err = np.array([np.std(np.array(D[k]), axis=0) for k in betas])
## 4.a) フラクタル次元が1以下の部分のデータはすべて無視
# D_ave = np.ma.array([np.ma.average(
# np.ma.array(np.array(D[k]), mask=np.array(D[k])<1.), axis=0)
# for k in betas])
# D_err = np.ma.array([np.ma.std(
# np.ma.array(np.array(D[k]), mask=np.array(D[k])<1.), axis=0)
# for k in betas])
## 4.b) フラクタル次元が1以下の部分のデータはすべて1としてプロット
# D_ave, D_err = [], []
# for k in betas:
# d = np.array(D[k])
# d[d < 1] = 1.
# D_ave.append(np.average(d, axis=0))
# D_err.append(np.std(d, axis=0))
for i, (beta, d, d_err) in enumerate(zip(betas, D_ave, D_err)):
color = list(cm.viridis(float(i) / len(betas)))
label = label=r'$\beta = %2.2f$' % beta
## 4.c,d)
# ax.plot([T[5], T[-1]], [2., 2.], 'k-', lw=1)
## 4.c) 平均値のみプロット
# ax.plot(T, d, '.', label=label, color=color)
## 4.d) エラーバーを付けてプロット
ecolor = color[:-1] + [0.2]
ax.errorbar(T[5:], d[5:], yerr=d_err[5:], marker='.',
label=label, color=color, ecolor=ecolor)
## 4.e) 標準偏差の収束をプロットしてみる
## 標準偏差は指数関数的に減少
# ax.semilogy(T, d_err, '.', label=label, color=color)
ax.legend(loc='best')
ax.set_title('Fractal dimension')
ax.set_xlabel(r'$T$')
# ax.set_ylim((0., 2.5))
ax.set_ylabel(r'$D(T)$')
# ax.set_ylabel(r'$\sigma(D(T))$')
plt.show()
def plot_Ds_3d():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
D = {}
for i, result_data_path in enumerate(fpath):
data = np.load(result_data_path)
beta = float(data['beta'])
frames = data['frames']
Ds = data['Ds']
alpha = 0.04
T = (1. / alpha) * np.log(np.arange(frames) / 2. + 1.)
if D.has_key(beta):
D[beta].append(Ds)
else:
D[beta] = [Ds]
# betas = np.array(betas)
betas = sorted(D.keys())
D_ave = np.array([np.average(np.array(D[k]), axis=0) for k in betas])
D_err = np.array([np.std(np.array(D[k]), axis=0) for k in betas])
X, Y = np.meshgrid(T, betas)
ax.plot_wireframe(X, Y, D_ave - D_err, cstride=10, rstride=1, color='g', alpha=0.4)
ax.plot_wireframe(X, Y, D_ave + D_err, cstride=10, rstride=1, color='r', alpha=0.4)
ax.plot_wireframe(X, Y, D_ave, cstride=10, rstride=1)
ax.set_title('Fractal dimension')
ax.set_xlabel(r'$T$')
ax.set_ylabel(r'$\beta$')
ax.set_zlabel(r'$D(T)$')
plt.show()
if __name__ == '__main__':
plot_Ds()
# plot_Ds_3d()
|
mit
|
ChanChiChoi/scikit-learn
|
sklearn/decomposition/tests/test_dict_learning.py
|
47
|
8095
|
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
|
bsd-3-clause
|
kunalj101/scipy2015-blaze-bokeh
|
full_plot.py
|
6
|
6077
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import netCDF4
import pandas as pd
from bokeh.browserlib import view
from bokeh.plotting import figure, show, vplot, hplot, output_server, cursession
from bokeh.palettes import RdYlBu11, RdBu11
from bokeh.models.widgets import Select, Slider
from bokeh.models.actions import Callback
from bokeh.models import ColumnDataSource
from bokeh.models import Plot, Text
import world_countries_1 as wc
year = 1850
month = 1
years = [str(x) for x in np.arange(1850, 2015, 1)]
months = [str(x) for x in np.arange(1, 13, 1)]
months_str = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
month_str = months_str[month-1]
#source = ColumnDataSource(data=dict(image=[]))
data = netCDF4.Dataset('data/Land_and_Ocean_LatLong1.nc')
t = data.variables['temperature']
world_countries = wc.data.copy()
country= pd.DataFrame.from_dict(world_countries, orient='index')
def hex_to_rgb(value):
"""Given a color in hex format, return it in RGB."""
values = value.lstrip('#')
lv = len(values)
rgb = list(int(values[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
return rgb
class RGBAColorMapper(object):
"""Maps floating point values to rgb values over a palette"""
def __init__(self, low, high, palette):
self.range = np.linspace(low, high, len(palette))
self.r, self.g, self.b = np.array(zip(*[hex_to_rgb(i) for i in palette]))
def color(self, data):
"""Maps your data values to the pallette with linear interpolation"""
red = np.interp(data, self.range, self.r)
blue = np.interp(data, self.range, self.b)
green = np.interp(data, self.range, self.g)
# Style plot to return a grey color when value is 'nan'
red[np.isnan(red)] = 240
blue[np.isnan(blue)] = 240
green[np.isnan(green)] = 240
colors = np.dstack([red.astype(np.uint8),
green.astype(np.uint8),
blue.astype(np.uint8),
np.full_like(data, 255, dtype=np.uint8)])
return colors.view(dtype=np.uint32).reshape(data.shape)
colormap = RGBAColorMapper(-6, 6, RdBu11)
def get_slice(t, year, month):
i = (year - 1850)*12 + month - 1
time = data.variables.get('time')
return colormap.color(t[i, :, :])
output_server("earth")
plot = figure(
plot_height=540,
plot_width=1080,
toolbar_location=None,
x_axis_type=None, y_axis_type=None,
x_range=(-180, 180),
y_range=(-89, 89))
image = get_slice(t, 1891, 1)
plot.image_rgba(
image=[image],
x=[-180], y=[-89],
dw=[360], dh=[178], name="im"
)
plot.text(x=10, y=-88, text=[month_str], text_font_size='25pt', text_color='black', name="mo")
plot.text(x=-22, y=-88, text=[str(year)], text_font_size='25pt', text_color='black', name="ye")
plot.patches(xs=country['lons'], ys=country['lats'], fill_color="white", fill_alpha=0,
line_color="black", line_width=0.5)
# Legend
from bokeh.models.glyphs import Text, Rect
from bokeh.models import Plot, Range1d
from bokeh.palettes import RdBu11
from bokeh.plotting import output_notebook, show
xdr = Range1d(0, 100)
ydr = Range1d(0, 600)
legend_plot = Plot(
x_range=xdr,
y_range=ydr,
title="",
plot_width=100,
plot_height=500,
min_border=0,
toolbar_location=None,
outline_line_color="#FFFFFF",
title_text_align='left',
title_text_baseline='top',
)
minimum = Text(x=40, y=-2, text=['-6 ºC'])
legend_plot.add_glyph(minimum)
maximum = Text(x=40, y=460, text=['6 ºC'])
legend_plot.add_glyph(maximum)
palette = RdBu11
width = 40
for i, color in enumerate(palette):
rect = Rect(
x=40, y=(width * (i + 1)),
width=width, height=40,
fill_color=color, line_color='black'
)
legend_plot.add_glyph(rect)
#
import math
import numpy as np
from bokeh.models import DatetimeTickFormatter, LinearAxis, ColumnDataSource, HoverTool
from collections import OrderedDict
df = pd.read_csv('data/Land_Ocean_Monthly_Anomaly_Average.csv', index_col=0)
df['date'] = pd.to_datetime(df['time'])
df['moving_average'] = pd.rolling_mean(df['anomaly'], 12)
df = df.fillna(0)
TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,hover,previewsave"
source = ColumnDataSource(df)
p = figure(x_axis_type = "datetime", width=1000, height=200,tools=TOOLS, toolbar_location=None)
dates = np.array(df['date'], dtype=np.datetime64)
anomaly = np.array(df['anomaly'])
p.line(dates, anomaly, color='lightgrey', legend='anom')
p.line('date', 'moving_average', color='red', legend='avg', source=source, name="mva")
p.grid.grid_line_alpha=0.2
p.yaxis.axis_label = 'Anomaly(ºC)'
p.xaxis.major_label_orientation = math.pi/4
p.legend.orientation = "bottom_right"
xformatter = DatetimeTickFormatter(formats=dict(months=["%b %Y"], years=["%Y"]))
#xaxis = DatetimeAxis(formatter=xformatter)
p.xaxis[0].formatter = xformatter
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("anomaly", "@anomaly"),
("time", "@time"),
])
hover.renderers = p.select("mva")
# Layout
layout = hplot(plot, legend_plot)
bottom = vplot(layout, p)
show(bottom)
renderer = plot.select(dict(name="im"))
ds = renderer[0].data_source
month_renderer = plot.select(dict(name="mo"))
month_ds = month_renderer[0].data_source
year_renderer = plot.select(dict(name="ye"))
year_ds = year_renderer[0].data_source
import time
while True:
for year_index in np.arange(1950, 2015, 1):
year_ds.data["text"] = [str(year_index)]
#cursession().store_objects(year_ds)
for month_index in np.arange(1, 13, 1):
month_ds.data["text"] = [months_str[month_index-1]]
image = get_slice(t, year_index, month_index)
#print(image)
ds.data["image"] = [image]
cursession().store_objects(ds, month_ds, year_ds)
#cursession().store_objects(ds, year_ds)
time.sleep(0.2)
|
mit
|
TomAugspurger/pandas
|
pandas/tests/arrays/sparse/test_arithmetics.py
|
1
|
19259
|
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core import ops
from pandas.core.arrays.sparse import SparseArray, SparseDtype
@pytest.fixture(params=["integer", "block"])
def kind(request):
"""kind kwarg to pass to SparseArray/SparseSeries"""
return request.param
@pytest.fixture(params=[True, False])
def mix(request):
# whether to operate op(sparse, dense) instead of op(sparse, sparse)
return request.param
class TestSparseArrayArithmetics:
_base = np.array
_klass = SparseArray
def _assert(self, a, b):
tm.assert_numpy_array_equal(a, b)
def _check_numeric_ops(self, a, b, a_dense, b_dense, mix, op):
with np.errstate(invalid="ignore", divide="ignore"):
if mix:
result = op(a, b_dense).to_dense()
else:
result = op(a, b).to_dense()
if op in [operator.truediv, ops.rtruediv]:
# pandas uses future division
expected = op(a_dense * 1.0, b_dense)
else:
expected = op(a_dense, b_dense)
if op in [operator.floordiv, ops.rfloordiv]:
# Series sets 1//0 to np.inf, which SparseArray does not do (yet)
mask = np.isinf(expected)
if mask.any():
expected[mask] = np.nan
self._assert(result, expected)
def _check_bool_result(self, res):
assert isinstance(res, self._klass)
assert isinstance(res.dtype, SparseDtype)
assert res.dtype.subtype == np.bool
assert isinstance(res.fill_value, bool)
def _check_comparison_ops(self, a, b, a_dense, b_dense):
with np.errstate(invalid="ignore"):
# Unfortunately, trying to wrap the computation of each expected
# value is with np.errstate() is too tedious.
#
# sparse & sparse
self._check_bool_result(a == b)
self._assert((a == b).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b)
self._assert((a != b).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b)
self._assert((a >= b).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b)
self._assert((a <= b).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b)
self._assert((a > b).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b)
self._assert((a < b).to_dense(), a_dense < b_dense)
# sparse & dense
self._check_bool_result(a == b_dense)
self._assert((a == b_dense).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b_dense)
self._assert((a != b_dense).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b_dense)
self._assert((a >= b_dense).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b_dense)
self._assert((a <= b_dense).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b_dense)
self._assert((a > b_dense).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b_dense)
self._assert((a < b_dense).to_dense(), a_dense < b_dense)
def _check_logical_ops(self, a, b, a_dense, b_dense):
# sparse & sparse
self._check_bool_result(a & b)
self._assert((a & b).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b)
self._assert((a | b).to_dense(), a_dense | b_dense)
# sparse & dense
self._check_bool_result(a & b_dense)
self._assert((a & b_dense).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b_dense)
self._assert((a | b_dense).to_dense(), a_dense | b_dense)
@pytest.mark.parametrize("scalar", [0, 1, 3])
@pytest.mark.parametrize("fill_value", [None, 0, 2])
def test_float_scalar(
self, kind, mix, all_arithmetic_functions, fill_value, scalar
):
op = all_arithmetic_functions
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
a = self._klass(values, kind=kind, fill_value=fill_value)
self._check_numeric_ops(a, scalar, values, scalar, mix, op)
def test_float_scalar_comparison(self, kind):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
a = self._klass(values, kind=kind)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=0)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=2)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
def test_float_same_index(self, kind, mix, all_arithmetic_functions):
# when sp_index are the same
op = all_arithmetic_functions
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])
rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_same_index_comparison(self, kind):
# when sp_index are the same
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])
rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
def test_float_array(self, kind, mix, all_arithmetic_functions):
op = all_arithmetic_functions
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_array_different_kind(self, mix, all_arithmetic_functions):
op = all_arithmetic_functions
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = self._klass(values, kind="integer")
b = self._klass(rvalues, kind="block")
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = self._klass(values, kind="integer", fill_value=0)
b = self._klass(rvalues, kind="block")
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind="integer", fill_value=0)
b = self._klass(rvalues, kind="block", fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind="integer", fill_value=1)
b = self._klass(rvalues, kind="block", fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_array_comparison(self, kind):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
def test_int_array(self, kind, mix, all_arithmetic_functions):
op = all_arithmetic_functions
# have to specify dtype explicitly until fixing GH 667
dtype = np.int64
values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
a = self._klass(values, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype)
b = self._klass(rvalues, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype)
b = self._klass(rvalues, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype)
b = self._klass(rvalues, fill_value=0, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, fill_value=1, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype, fill_value=1)
b = self._klass(rvalues, fill_value=2, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_int_array_comparison(self, kind):
dtype = "int64"
# int32 NI ATM
values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
a = self._klass(values, dtype=dtype, kind=kind)
b = self._klass(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)
b = self._klass(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)
b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=1)
b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
@pytest.mark.parametrize("fill_value", [True, False, np.nan])
def test_bool_same_index(self, kind, fill_value):
# GH 14000
# when sp_index are the same
values = self._base([True, False, True, True], dtype=np.bool)
rvalues = self._base([True, False, True, True], dtype=np.bool)
a = self._klass(values, kind=kind, dtype=np.bool, fill_value=fill_value)
b = self._klass(rvalues, kind=kind, dtype=np.bool, fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
@pytest.mark.parametrize("fill_value", [True, False, np.nan])
def test_bool_array_logical(self, kind, fill_value):
# GH 14000
# when sp_index are the same
values = self._base([True, False, True, False, True, True], dtype=np.bool)
rvalues = self._base([True, False, False, True, False, True], dtype=np.bool)
a = self._klass(values, kind=kind, dtype=np.bool, fill_value=fill_value)
b = self._klass(rvalues, kind=kind, dtype=np.bool, fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions):
op = all_arithmetic_functions
rdtype = "int64"
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
assert b.dtype == SparseDtype(rdtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
assert b.dtype == SparseDtype(rdtype, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_mixed_array_comparison(self, kind):
rdtype = "int64"
# int32 NI ATM
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
assert b.dtype == SparseDtype(rdtype)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
assert b.dtype == SparseDtype(rdtype, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
def test_xor(self):
s = SparseArray([True, True, False, False])
t = SparseArray([True, False, True, False])
result = s ^ t
sp_index = pd.core.arrays.sparse.IntIndex(4, np.array([0, 1, 2], dtype="int32"))
expected = SparseArray([False, True, True], sparse_index=sp_index)
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize("op", [operator.eq, operator.add])
def test_with_list(op):
arr = SparseArray([0, 1], fill_value=0)
result = op(arr, [0, 1])
expected = op(arr, SparseArray([0, 1]))
tm.assert_sp_array_equal(result, expected)
def test_with_dataframe():
# GH#27910
arr = SparseArray([0, 1], fill_value=0)
df = pd.DataFrame([[1, 2], [3, 4]])
result = arr.__add__(df)
assert result is NotImplemented
def test_with_zerodim_ndarray():
# GH#27910
arr = SparseArray([0, 1], fill_value=0)
result = arr * np.array(2)
expected = arr * 2
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.abs, np.exp])
@pytest.mark.parametrize(
"arr", [SparseArray([0, 0, -1, 1]), SparseArray([None, None, -1, 1])]
)
def test_ufuncs(ufunc, arr):
result = ufunc(arr)
fill_value = ufunc(arr.fill_value)
expected = SparseArray(ufunc(np.asarray(arr)), fill_value=fill_value)
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize(
"a, b",
[
(SparseArray([0, 0, 0]), np.array([0, 1, 2])),
(SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
(SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
(SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
(SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
],
)
@pytest.mark.parametrize("ufunc", [np.add, np.greater])
def test_binary_ufuncs(ufunc, a, b):
# can't say anything about fill value here.
result = ufunc(a, b)
expected = ufunc(np.asarray(a), np.asarray(b))
assert isinstance(result, SparseArray)
tm.assert_numpy_array_equal(np.asarray(result), expected)
def test_ndarray_inplace():
sparray = SparseArray([0, 2, 0, 0])
ndarray = np.array([0, 1, 2, 3])
ndarray += sparray
expected = np.array([0, 3, 2, 3])
tm.assert_numpy_array_equal(ndarray, expected)
def test_sparray_inplace():
sparray = SparseArray([0, 2, 0, 0])
ndarray = np.array([0, 1, 2, 3])
sparray += ndarray
expected = SparseArray([0, 3, 2, 3], fill_value=0)
tm.assert_sp_array_equal(sparray, expected)
@pytest.mark.parametrize("fill_value", [True, False])
def test_invert(fill_value):
arr = np.array([True, False, False, True])
sparray = SparseArray(arr, fill_value=fill_value)
result = ~sparray
expected = SparseArray(~arr, fill_value=not fill_value)
tm.assert_sp_array_equal(result, expected)
result = ~pd.Series(sparray)
expected = pd.Series(expected)
tm.assert_series_equal(result, expected)
result = ~pd.DataFrame({"A": sparray})
expected = pd.DataFrame({"A": expected})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("fill_value", [0, np.nan])
@pytest.mark.parametrize("op", [operator.pos, operator.neg])
def test_unary_op(op, fill_value):
arr = np.array([0, 1, np.nan, 2])
sparray = SparseArray(arr, fill_value=fill_value)
result = op(sparray)
expected = SparseArray(op(arr), fill_value=op(fill_value))
tm.assert_sp_array_equal(result, expected)
|
bsd-3-clause
|
bryansim/Python
|
mousetrackerproc/mousetracker_processing.py
|
3
|
1035
|
import pandas as pd
import os
rootdir = 'C:/Users/Bryan/Desktop/MT_corrected'
def self_chips(line):
pid = ""
answer = ""
for i, char in enumerate(line):
if i <= 6:
pid = str(pid) + str(char)
if i > 63 and i < 67:
answer = answer + char
return [pid, answer]
def all_participant_colors(rootdir):
participant_colors = []
for subdir, dirs, files in os.walk(rootdir):
for file in files:
file_name = open(rootdir + "/" + file)
for i, line in enumerate(file_name):
try:
if i > 4 and i < 75 and line[23] == 'Y':
participant_colors.append(self_chips(line))
except IndexError:
pass
file_name.close()
return participant_colors
hopefully_this_works = all_participant_colors(rootdir)
hopefully_this_works = pd.DataFrame(hopefully_this_works)
hopefully_this_works.to_csv("answer.csv", index = False, header = False)
|
gpl-2.0
|
ilyes14/scikit-learn
|
examples/plot_johnson_lindenstrauss_bound.py
|
127
|
7477
|
r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
|
bsd-3-clause
|
techn0mad/selcald
|
selcald/match_frequency.py
|
1
|
1186
|
# Run with "ipython -i --matplotlib=qt match_frequency.py"
#
from __future__ import print_function
import numpy as np
# import pandas as pd
from scipy import signal
import matplotlib.pyplot as plt
RATE = 44100
# tone synthesis
def tone(freq, cycles, amp=1, rate=RATE):
len = cycles * (1.0/rate)
t = np.linspace(0, len, len * rate)
if freq is 0:
data = np.zeros(int(len * rate))
else:
data = np.sin(2 * np.pi * freq * t) * amp
return data
fig, ax = plt.subplots(1, 1, sharex=True)
for carrier in [312, 473, 716, 1084, 1479]:
print("carrier = ", carrier)
freqs = range(carrier-20, carrier+22, 2)
sig = tone(carrier * 1.0, 2000) # Reference tone
response = []
for freq in freqs:
sig_tx = tone(freq * 1.0, 2000) # Test tone
resp = np.abs(signal.correlate(sig, sig_tx, mode='same'))
response.append(resp.sum())
ax.semilogy(freqs, response, label='tone = {}'.format(carrier))
ax.set_title('Matched filter response')
# ax.axvline(626.67, ls=':') # Guardband markers
# ax.axvline(695.01, ls=':')
ax.legend(loc='best')
ax.margins(0, 0.1)
fig.set_tight_layout(True)
fig.show()
|
gpl-2.0
|
bamos/densenet.pytorch
|
plot.py
|
1
|
2151
|
#!/usr/bin/env python3
import argparse
import os
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('bmh')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('expDir', type=str)
args = parser.parse_args()
trainP = os.path.join(args.expDir, 'train.csv')
trainData = np.loadtxt(trainP, delimiter=',').reshape(-1, 3)
testP = os.path.join(args.expDir, 'test.csv')
testData = np.loadtxt(testP, delimiter=',').reshape(-1, 3)
N = 392*2 # Rolling loss over the past epoch.
trainI, trainLoss, trainErr = np.split(trainData, [1,2], axis=1)
trainI, trainLoss, trainErr = [x.ravel() for x in
(trainI, trainLoss, trainErr)]
trainI_, trainLoss_, trainErr_ = rolling(N, trainI, trainLoss, trainErr)
testI, testLoss, testErr = np.split(testData, [1,2], axis=1)
fig, ax = plt.subplots(1, 1, figsize=(6, 5))
# plt.plot(trainI, trainLoss, label='Train')
plt.plot(trainI_, trainLoss_, label='Train')
plt.plot(testI, testLoss, label='Test')
plt.xlabel('Epoch')
plt.ylabel('Cross-Entropy Loss')
plt.legend()
ax.set_yscale('log')
loss_fname = os.path.join(args.expDir, 'loss.png')
plt.savefig(loss_fname)
print('Created {}'.format(loss_fname))
fig, ax = plt.subplots(1, 1, figsize=(6, 5))
# plt.plot(trainI, trainErr, label='Train')
plt.plot(trainI_, trainErr_, label='Train')
plt.plot(testI, testErr, label='Test')
plt.xlabel('Epoch')
plt.ylabel('Error')
ax.set_yscale('log')
plt.legend()
err_fname = os.path.join(args.expDir, 'error.png')
plt.savefig(err_fname)
print('Created {}'.format(err_fname))
loss_err_fname = os.path.join(args.expDir, 'loss-error.png')
os.system('convert +append {} {} {}'.format(loss_fname, err_fname, loss_err_fname))
print('Created {}'.format(loss_err_fname))
def rolling(N, i, loss, err):
i_ = i[N-1:]
K = np.full(N, 1./N)
loss_ = np.convolve(loss, K, 'valid')
err_ = np.convolve(err, K, 'valid')
return i_, loss_, err_
if __name__ == '__main__':
main()
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.