repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nicolov/learning-from-data-homework | python/final.py | 1 | 1566 |
# coding: utf-8
# In[ ]:
from random import uniform
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
import numpy as np
from sklearn import svm
from cvxopt import matrix, solvers
import cvxopt
# cvxopt.solvers.options['show_progress'] = False
# In[ ]:
def ex11():
X = [[1,0], [0,1], [0,-1], [-1,0], [0,2],[0,-2],[-2,0]]
Y = [-1,-1,-1,1,1,1,1]
def transform(x):
return [
x[1]**2 - 2*x[0] - 1,
x[0]**2 - 2*x[1] + 1
]
plt.scatter(*zip(*map(transform, X)), c=['k' if x==1 else 'w' for x in Y])
plt.grid()
# ex11()
# In[ ]:
def ex12():
X = [[1.,0.], [0.,1.], [0.,-1.], [-1.,0.], [0.,2.],[0.,-2.],[-2.,0.]]
Y = [-1.,-1.,-1.,1.,1.,1.,1.]
ssvm = svm.SVC(kernel='poly',
C=1e10,
gamma=1,
degree=2,
coef0=1)
ssvm.fit(X, Y)
return len(ssvm.support_vectors_)
ex12()
# In[ ]:
def rand_point():
return np.array([uniform(-1, 1), uniform(-1, 1)])
def f(P):
return np.sign(P[1] - P[0] + 0.25 * np.sin(np.pi * P[0]))
def dataset_point():
p = rand_point()
return (p, f(p))
def make_dataset(N):
return [dataset_point() for _ in range(N)]
def ex13():
def do_run():
train_set = zip(*make_dataset(100))
ssvm = svm.SVC(kernel='rbf', gamma=1.5)
ssvm.fit(*train_set)
return 1 if ssvm.score(*train_set)==1.0 else 0
return np.mean([do_run() for _ in range(1000)])
ex13()
# In[ ]:
| mit |
rashisht1/gradient_coding | src/partial_replication.py | 1 | 11144 | from __future__ import print_function
import sys
import random
from util import *
import os
import numpy as np
import time
from mpi4py import MPI
import scipy.sparse as sps
def partial_replication_logistic_regression(n_procs, n_samples, n_features, input_dir, n_stragglers, n_partitions, is_real_data, params):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
rounds = params[0]
n_workers = n_procs-1
if (n_workers%(n_stragglers+1)):
print("Error: n_workers must be multiple of n_stragglers+1!")
sys.exit(0)
rows_per_worker = n_samples//((n_partitions-n_stragglers)*n_workers) # per partition num of samples
n_groups=n_workers/(n_stragglers+1)
n_separate = n_partitions-n_stragglers-1
sep_lim = n_separate*rows_per_worker
# Loading the data
if (rank):
if not is_real_data:
y = load_data(input_dir+"label.dat")
X_current = np.zeros([n_partitions*rows_per_worker,n_features])
y_current = np.zeros(n_partitions*rows_per_worker)
for i in range(n_separate):
idx = i+n_separate*(rank-1)
X_current[i*rows_per_worker:(i+1)*rows_per_worker,:] = load_data(input_dir+str(idx+1)+".dat")
y_current[i*rows_per_worker:(i+1)*rows_per_worker] = y[idx*rows_per_worker:(idx+1)*rows_per_worker]
for i in range(n_separate,n_partitions):
a = (rank-1)/(n_stragglers+1) # index of group
b = i-n_separate # position inside the group
idx = n_separate*n_workers+a*(n_stragglers+1)+b
X_current[i*rows_per_worker:(i+1)*rows_per_worker,:] = load_data(input_dir+str(idx+1)+".dat")
y_current[i*rows_per_worker:(i+1)*rows_per_worker] = y[idx*rows_per_worker:(idx+1)*rows_per_worker]
else:
y = load_data(input_dir + "label.dat")
y_current = np.zeros(n_partitions*rows_per_worker)
for i in range(n_separate):
idx = i+n_separate*(rank-1)
y_current[i*rows_per_worker:(i+1)*rows_per_worker] = y[idx*rows_per_worker:(idx+1)*rows_per_worker]
if i==0:
X_current = load_sparse_csr(input_dir+str(idx+1))
else:
X_temp = load_sparse_csr(input_dir+str(idx+1))
X_current = sps.vstack((X_current,X_temp))
for i in range(n_separate,n_partitions):
a = (rank-1)/(n_stragglers+1) # index of group
b = i-n_separate # position inside the group
idx = n_separate*n_workers+a*(n_stragglers+1)+b
y_current[i*rows_per_worker:(i+1)*rows_per_worker] = y[idx*rows_per_worker:(idx+1)*rows_per_worker]
X_temp = load_sparse_csr(input_dir+str(idx+1))
X_current = sps.vstack((X_current,X_temp))
X_current = X_current.tocsr()
# Initializing relevant variables
beta=np.zeros(n_features)
if(rank):
predy = X_current.dot(beta)
g_firstpart = -X_current.T.dot(np.divide(y_current,np.exp(np.multiply(predy,y_current))+1))
g_secondpart = -X_current.T.dot(np.divide(y_current,np.exp(np.multiply(predy,y_current))+1))
send_req1 = MPI.Request()
send_req2 = MPI.Request()
recv_reqs = []
else:
print('Stragglers are allowed to be atmost %.2f times slower'%(n_partitions*1.0/(n_partitions-n_stragglers-1)) )
msgBuffers_firstparts = [np.zeros(n_features) for i in range(n_procs-1)]
msgBuffers_secondparts = [np.zeros(n_features) for i in range(n_procs-1)]
g=np.zeros(n_features)
betaset = np.zeros((rounds, n_features))
timeset = np.zeros(rounds)
worker_timeset=np.zeros((rounds, n_procs-1))
request_set = []
recv_reqs = []
send_set = []
cnt_groups = 0
cnt_firstpart = 0
completed_groups=np.ndarray(n_groups,dtype=bool)
completed_workers = np.ndarray(n_workers,dtype=bool)
completed_firstparts=np.ndarray(n_workers,dtype=bool)
status = MPI.Status()
eta0= params[2] # ----- learning rate
alpha = params[1] # --- coefficient of l2 regularization
utemp = np.zeros(n_features) # for accelerated gradient descent
# Posting all Irecv requests for master and workers
if (rank):
for i in range(rounds):
req = comm.Irecv([beta, MPI.DOUBLE], source=0, tag=i)
recv_reqs.append(req)
else:
for i in range(rounds):
recv_reqs = []
for j in range(1,n_procs):
req1 = comm.Irecv([msgBuffers_firstparts[j-1], MPI.DOUBLE], source=j, tag=2*rounds + i)
recv_reqs.append(req1)
req2 = comm.Irecv([msgBuffers_secondparts[j-1], MPI.DOUBLE], source=j, tag=i)
recv_reqs.append(req2)
request_set.append(recv_reqs)
#######################################################################################################################
comm.Barrier()
if rank==0:
print("---- Starting Partial Replication Iterations for " +str(n_stragglers) + " stragglers ----")
orig_start_time= time.time()
for i in range(rounds):
if rank==0:
if(i%10 == 0):
print("\t >>> At Iteration %d" %(i))
send_set[:] = []
g[:]= 0
cnt_firstpart=0
completed_firstparts[:]=False
completed_groups[:]=False
cnt_groups=0
completed_workers[:]=False
start_time = time.time()
for l in range(1,n_procs):
sreq = comm.Isend([beta, MPI.DOUBLE], dest = l, tag = i)
send_set.append(sreq)
while cnt_groups<n_groups or cnt_firstpart<n_workers:
req_done = MPI.Request.Waitany(request_set[i], status)
src = status.Get_source()
tag= status.Get_tag()
worker_timeset[i,src-1]=time.time()-start_time
request_set[i].pop(req_done)
if tag == i:
completed_workers[src-1] = True
groupid = (src-1)/(n_stragglers+1)
if not completed_groups[groupid]:
completed_groups[groupid] = True
g += msgBuffers_secondparts[src-1]
cnt_groups += 1
elif tag == 2*rounds + i:
g += msgBuffers_firstparts[src-1]
completed_firstparts[src-1] = True
cnt_firstpart += 1
grad_multiplier = eta0[i]/n_samples
# ---- update step for gradient descent
# np.subtract((1-2*alpha*eta0[i])*beta , grad_multiplier*g, out=beta)
# ---- updates for accelerated gradient descent
theta = 2.0/(i+2.0)
ytemp = (1-theta)*beta + theta*utemp
betatemp = ytemp - grad_multiplier*g - (2*alpha*eta0[i])*beta
utemp = beta + (betatemp-beta)*(1/theta)
beta[:] = betatemp
timeset[i] = time.time() - start_time
betaset[i,:] = beta
ind_set = [l for l in range(1,n_procs) if not completed_workers[l-1]]
for l in ind_set:
worker_timeset[i,l-1]=-1
else:
recv_reqs[i].Wait()
sendTestBuf = send_req1.test()
if not sendTestBuf[0]:
send_req1.Cancel()
sendTestBuf = send_req2.test()
if not sendTestBuf[0]:
send_req2.Cancel()
predy=X_current[0:sep_lim,:].dot(beta)
g_firstpart = X_current[0:sep_lim,:].T.dot(np.divide(y_current[0:sep_lim],np.exp(np.multiply(predy,y_current[0:sep_lim]))+1))
g_firstpart *= -1
send_req1 = comm.Isend([g_firstpart,MPI.DOUBLE], dest=0, tag=2*rounds+i)
predy = X_current[sep_lim:,:].dot(beta)
g_secondpart = X_current[sep_lim:,:].T.dot(np.divide(y_current[sep_lim:],np.exp(np.multiply(predy,y_current[sep_lim:]))+1))
g_secondpart *= -1
send_req2 = comm.Isend([g_secondpart,MPI.DOUBLE], dest=0, tag=i)
######################################################################################################################
comm.Barrier()
if rank==0:
elapsed_time= time.time() - orig_start_time
print ("Total Time Elapsed: %.3f" %(elapsed_time))
# Load all training data
if not is_real_data:
X_train = load_data(input_dir+"1.dat")
for j in range(2,n_procs-1):
X_temp = load_data(input_dir+str(j)+".dat")
X_train = np.vstack((X_train, X_temp))
else:
X_train = load_sparse_csr(input_dir+"1")
for j in range(2,n_procs-1):
X_temp = load_sparse_csr(input_dir+str(j))
X_train = sps.vstack((X_train, X_temp))
y_train = load_data(input_dir+"label.dat")
y_train = y_train[0:X_train.shape[0]]
# Load all testing data
y_test = load_data(input_dir + "label_test.dat")
if not is_real_data:
X_test = load_data(input_dir+"test_data.dat")
else:
X_test = load_sparse_csr(input_dir+"test_data")
n_train = X_train.shape[0]
n_test = X_test.shape[0]
training_loss = np.zeros(rounds)
testing_loss = np.zeros(rounds)
auc_loss = np.zeros(rounds)
from sklearn.metrics import roc_curve, auc
for i in range(rounds):
beta = np.squeeze(betaset[i,:])
predy_train = X_train.dot(beta)
predy_test = X_test.dot(beta)
training_loss[i] = calculate_loss(y_train, predy_train, n_train)
testing_loss[i] = calculate_loss(y_test, predy_test, n_test)
fpr, tpr, thresholds = roc_curve(y_test,predy_test, pos_label=1)
auc_loss[i] = auc(fpr,tpr)
print("Iteration %d: Train Loss = %5.3f, Test Loss = %5.3f, AUC = %5.3f, Total time taken =%5.3f"%(i, training_loss[i], testing_loss[i], auc_loss[i], timeset[i]))
output_dir = input_dir + "results/"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
save_vector(training_loss, output_dir+"partialreplication_%d_%d_training_loss.dat"%(n_stragglers,n_partitions))
save_vector(testing_loss, output_dir+"partialreplication_%d_%d_testing_loss.dat"%(n_stragglers,n_partitions))
save_vector(auc_loss, output_dir+"partialreplication_%d_%d_auc.dat"%(n_stragglers,n_partitions))
save_vector(timeset, output_dir+"partialreplication_%d_%d_timeset.dat"%(n_stragglers,n_partitions))
save_matrix(worker_timeset, output_dir+"partialreplication_%d_%d_worker_timeset.dat"%(n_stragglers,n_partitions))
print(">>> Done")
comm.Barrier() | mit |
fberanizo/neural_network | mlp/mlp.py | 1 | 4555 | # -*- coding: utf-8 -*-
import numpy, matplotlib.pyplot as plt, time
from sklearn.metrics import mean_squared_error, accuracy_score, roc_auc_score
class MLP(object):
"""Class that implements a multilayer perceptron (MLP)"""
def __init__(self, hidden_layer_size=3, learning_rate=0.2, max_epochs=1000):
self.hidden_layer_size = hidden_layer_size
self.learning_rate = learning_rate
self.max_epochs = max_epochs
self.auc = 0.5
def fit(self, X, y):
"""Trains the network and returns the trained network"""
self.input_layer_size = X.shape[1]
self.output_layer_size = y.shape[1]
remaining_epochs = self.max_epochs
# Initialize weights
self.W1 = numpy.random.rand(1 + self.input_layer_size, self.hidden_layer_size)
self.W2 = numpy.random.rand(1 + self.hidden_layer_size, self.output_layer_size)
epsilon = 0.001
error = 1
self.J = [] # error
# Repeats until error is small enough or max epochs is reached
while error > epsilon and remaining_epochs > 0:
total_error = numpy.array([])
# For each input instance
for self.X, self.y in zip(X, y):
self.X = numpy.array([self.X])
self.y = numpy.array([self.y])
error, gradients = self.single_step(self.X, self.y)
total_error = numpy.append(total_error, error)
dJdW1 = gradients[0]
dJdW2 = gradients[1]
# Calculates new weights
self.W1 = self.W1 - self.learning_rate * dJdW1
self.W2 = self.W2 - self.learning_rate * dJdW2
# Saves error for plot
error = total_error.mean()
self.J.append(error)
# print 'Epoch: ' + str(remaining_epochs)
# print 'Error: ' + str(error)
remaining_epochs -= 1
# After training, we plot error in order to see how it behaves
#plt.plot(self.J[30:])
#plt.grid(1)
#plt.yscale('log')
#plt.ylabel('Cost')
#plt.xlabel('Iterations')
#plt.show()
return self
def predict(self, X):
"""Predicts test values"""
Y = map(lambda x: self.forward(numpy.array([x]))[0], X)
Y = map(lambda y: 1 if y > self.auc else 0, Y)
return numpy.array(Y)
def score(self, X, y_true):
"""Calculates accuracy"""
y_pred = map(lambda x: self.forward(numpy.array([x]))[0], X)
auc = roc_auc_score(y_true, y_pred)
y_pred = map(lambda y: 1 if y > self.auc else 0, y_pred)
y_pred = numpy.array(y_pred)
return accuracy_score(y_true.flatten(), y_pred.flatten())
def single_step(self, X, y):
"""Runs single step training method"""
self.Y = self.forward(X)
cost = self.cost(self.Y, y)
gradients = self.backpropagate(X, y)
return cost, gradients
def forward(self, X):
"""Passes input values through network and return output values"""
self.Zin = numpy.dot(X, self.W1[:-1,:])
self.Zin += numpy.dot(numpy.ones((1, 1)), self.W1[-1:,:])
self.Z = self.sigmoid(self.Zin)
self.Z = numpy.nan_to_num(self.Z)
self.Yin = numpy.dot(self.Z, self.W2[:-1,])
self.Yin += numpy.dot(numpy.ones((1, 1)), self.W2[-1:,:])
Y = self.linear(self.Yin)
Y = numpy.nan_to_num(Y)
return Y
def cost(self, Y, y):
"""Calculates network output error"""
return mean_squared_error(Y, y)
def backpropagate(self, X, y):
"""Backpropagates costs through the network"""
delta3 = numpy.multiply(-(y-self.Y), self.linear_derivative(self.Yin))
dJdW2 = numpy.dot(self.Z.T, delta3)
dJdW2 = numpy.append(dJdW2, numpy.dot(numpy.ones((1, 1)), delta3), axis=0)
delta2 = numpy.dot(delta3, self.W2[:-1,:].T)*self.sigmoid_derivative(self.Zin)
dJdW1 = numpy.dot(X.T, delta2)
dJdW1 = numpy.append(dJdW1, numpy.dot(numpy.ones((1, 1)), delta2), axis=0)
return dJdW1, dJdW2
def sigmoid(self, z):
"""Apply sigmoid activation function"""
return 1/(1+numpy.exp(-z))
def sigmoid_derivative(self, z):
"""Derivative of sigmoid function"""
return numpy.exp(-z)/((1+numpy.exp(-z))** 2)
def linear(self, z):
"""Apply linear activation function"""
return z
def linear_derivative(self, z):
"""Derivarive linear function"""
return 1
| bsd-2-clause |
eadgarchen/tensorflow | tensorflow/contrib/learn/python/learn/grid_search_test.py | 137 | 2035 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid search tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import test
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
except ImportError:
HAS_SKLEARN = False
class GridSearchTest(test.TestCase):
"""Grid search tests."""
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3)
grid_search = GridSearchCV(
classifier, {'hidden_units': [[5, 5], [10, 10]]},
scoring='accuracy',
fit_params={'steps': [50]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
test.main()
| apache-2.0 |
fabioticconi/scikit-learn | examples/ensemble/plot_random_forest_regression_multioutput.py | 28 | 2642 | """
============================================================
Comparing random forests and the multi-output meta estimator
============================================================
An example to compare multi-output regression with random forest and
the :ref:`multioutput.MultiOutputRegressor <_multiclass>` meta-estimator.
This example illustrates the use of the
:ref:`multioutput.MultiOutputRegressor <_multiclass>` meta-estimator
to perform multi-output regression. A random forest regressor is used,
which supports multi-output regression natively, so the results can be
compared.
The random forest regressor will only ever predict values within the
range of observations or closer to zero for each of the targets. As a
result the predictions are biased towards the centre of the circle.
Using a single underlying feature the model learns both the
x and y coordinate as output.
"""
print(__doc__)
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=400,
random_state=4)
max_depth = 30
regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth,
random_state=0))
regr_multirf.fit(X_train, y_train)
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
regr_rf.fit(X_train, y_train)
# Predict on new data
y_multirf = regr_multirf.predict(X_test)
y_rf = regr_rf.predict(X_test)
# Plot the results
plt.figure()
s = 50
a = 0.4
plt.scatter(y_test[:, 0], y_test[:, 1],
c="navy", s=s, marker="s", alpha=a, label="Data")
plt.scatter(y_multirf[:, 0], y_multirf[:, 1],
c="cornflowerblue", s=s, alpha=a,
label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test))
plt.scatter(y_rf[:, 0], y_rf[:, 1],
c="c", s=s, marker="^", alpha=a,
label="RF score=%.2f" % regr_rf.score(X_test, y_test))
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Comparing random forests and the multi-output meta estimator")
plt.legend()
plt.show()
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/sklearn/utils/tests/test_class_weight.py | 90 | 12846 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/jupyter_core/tests/dotipython_empty/profile_default/ipython_config.py | 24 | 20611 | # Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# Reraise exceptions encountered loading IPython extensions?
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.TerminalIPythonApp.hide_initial_ns = True
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Reraise exceptions encountered loading IPython extensions?
# c.TerminalIPythonApp.reraise_ipython_extension_failures = False
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# The IPython profile to use.
# c.TerminalIPythonApp.profile = 'default'
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = ''
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.TerminalIPythonApp.gui = None
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
#
# c.TerminalInteractiveShell.object_info_string_level = 0
#
# c.TerminalInteractiveShell.separate_out = ''
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.history_length = 10000
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.TerminalInteractiveShell.display_page = False
#
# c.TerminalInteractiveShell.debug = False
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.TerminalInteractiveShell.logstart = False
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
#
# c.TerminalInteractiveShell.quiet = False
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
#
# c.TerminalInteractiveShell.readline_use = True
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 3.4.3 |Continuum Analytics, Inc.| (default, Mar 6 2015, 12:07:41) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://binstar.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'LightBG'
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'mate -w'
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.TerminalInteractiveShell.separate_out2 = ''
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.TerminalInteractiveShell.logappend = ''
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
#
# c.TerminalInteractiveShell.xmode = 'Context'
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
#
# c.PromptManager.color_scheme = 'Linux'
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = ''
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.singleton_printers = {}
#
# c.PlainTextFormatter.type_printers = {}
# Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# c.PlainTextFormatter.max_seq_length = 1000
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.float_precision = ''
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| mit |
rayidghani/randomlogits | randomlogits.py | 1 | 1368 | from __future__ import division
import numpy as np
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import LogisticRegression, Perceptron, SGDClassifier, OrthogonalMatchingPursuit, RandomizedLogisticRegression
import pylab as pl
from itertools import chain
def TrainRandomLogits(X, y, n_logits, n_features):
clf = BaggingClassifier(base_estimator = LogisticRegression(),
n_estimators=n_logits, max_features = n_features)
clf.fit(X, y)
return clf
def GetFeatureImportances(rl):
num_logits = rl.get_params(deep=False)['n_estimators']
array = rl.estimators_features_
total_features = len(set(chain(*array)))
n_features = rl.get_params(deep=False)['max_features']
#initialize feature importance matrix
feature_importance_matrix = np.zeros((num_logits, total_features))
row = 0
for feature in rl.estimators_features_:
for i in range(n_features):
feature_importance_matrix[row][feature[i]] = rl.estimators_[row].coef_[0][i]
row += 1
feature_importance_matrix[feature_importance_matrix==0]=['nan']
mean_feature_importance = np.nanmean(feature_importance_matrix, dtype=np.float64, axis=0)
std_feature_importance = np.nanstd(feature_importance_matrix, dtype=np.float64,axis=0)
return (feature_importance_matrix,mean_feature_importance, std_feature_importance)
| mit |
plissonf/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
Lawrence-Liu/scikit-learn | examples/exercises/plot_cv_digits.py | 232 | 1206 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
chrisburr/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 230 | 2823 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
andim/scipy | scipy/signal/ltisys.py | 3 | 79165 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
from __future__ import division, print_function, absolute_import
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr
# Added pole placement
# Mar 2015: Clancy Rowley
# Rewrote lsim
# May 2015: Felix Berkenkamp
# Split lti class into subclasses
#
import warnings
import numpy as np
#np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7
#use scipy's qr until this is solved
from scipy.linalg import qr as s_qr
import numpy
from numpy import (r_, eye, real, atleast_1d, atleast_2d, poly,
squeeze, asarray, product, zeros, array,
dot, transpose, ones, zeros_like, linspace, nan_to_num)
import copy
from scipy import integrate, interpolate, linalg
from scipy._lib.six import xrange
from .filter_design import tf2zpk, zpk2tf, normalize, freqs
__all__ = ['tf2ss', 'ss2tf', 'abcd_normalize', 'zpk2ss', 'ss2zpk', 'lti',
'TransferFunction', 'ZerosPolesGain', 'StateSpace', 'lsim',
'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp', 'place_poles']
def tf2ss(num, den):
r"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator polynomials.
The denominator needs to be at least as long as the numerator.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
Examples
--------
Convert the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
to the state-space representation:
.. math::
\dot{\textbf{x}}(t) =
\begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
\textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
>>> from scipy.signal import tf2ss
>>> A, B, C, D = tf2ss(num, den)
>>> A
array([[-2., -1.],
[ 1., 0.]])
>>> B
array([[ 1.],
[ 0.]])
>>> C
array([[ 1., 2.]])
>>> D
array([ 1.])
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if M > K:
msg = "Improper transfer function. `num` is longer than `den`."
raise ValueError(msg)
if M == 0 or K == 0: # Null system
return (array([], float), array([], float), array([], float),
array([], float))
# pad numerator to have same number of columns has denominator
num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:, 0]
else:
D = array([], float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K - 2, K - 1)]
B = eye(K - 1, 1)
C = num[:, 1:] - num[:, 0] * den[1:]
return A, B, C, D
def _none_to_empty_2d(arg):
if arg is None:
return zeros((0, 0))
else:
return arg
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
def _shape_or_none(M):
if M is not None:
return M.shape
else:
return (None,) * 2
def _choice_not_none(*args):
for arg in args:
if arg is not None:
return arg
def _restore(M, shape):
if M.shape == (0, 0):
return zeros(shape)
else:
if M.shape != shape:
raise ValueError("The input arrays have incompatible shapes.")
return M
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are two-dimensional.
If enough information on the system is provided, that is, enough
properly-shaped arrays are passed to the function, the missing ones
are built from this information, ensuring the correct number of
rows and columns. Otherwise a ValueError is raised.
Parameters
----------
A, B, C, D : array_like, optional
State-space matrices. All of them are None (missing) by default.
See `ss2tf` for format.
Returns
-------
A, B, C, D : array
Properly shaped state-space matrices.
Raises
------
ValueError
If not enough information on the system was provided.
"""
A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
MA, NA = _shape_or_none(A)
MB, NB = _shape_or_none(B)
MC, NC = _shape_or_none(C)
MD, ND = _shape_or_none(D)
p = _choice_not_none(MA, MB, NC)
q = _choice_not_none(NB, ND)
r = _choice_not_none(MC, MD)
if p is None or q is None or r is None:
raise ValueError("Not enough information on the system.")
A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
A = _restore(A, (p, p))
B = _restore(B, (p, q))
C = _restore(C, (r, p))
D = _restore(D, (r, q))
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
r"""State-space to transfer function.
A, B, C, D defines a linear state-space system with `p` inputs,
`q` outputs, and `n` state variables.
Parameters
----------
A : array_like
State (or system) matrix of shape ``(n, n)``
B : array_like
Input matrix of shape ``(n, p)``
C : array_like
Output matrix of shape ``(q, n)``
D : array_like
Feedthrough (or feedforward) matrix of shape ``(q, p)``
input : int, optional
For multiple-input systems, the index of the input to use.
Returns
-------
num : 2-D ndarray
Numerator(s) of the resulting transfer function(s). `num` has one row
for each of the system's outputs. Each row is a sequence representation
of the numerator polynomial.
den : 1-D ndarray
Denominator of the resulting transfer function(s). `den` is a sequence
representation of the denominator polynomial.
Examples
--------
Convert the state-space representation:
.. math::
\dot{\textbf{x}}(t) =
\begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
\textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
>>> A = [[-2, -1], [1, 0]]
>>> B = [[1], [0]] # 2-dimensional column vector
>>> C = [[1, 2]] # 2-dimensional row vector
>>> D = 1
to the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> from scipy.signal import ss2tf
>>> ss2tf(A, B, C, D)
(array([[1, 3, 3]]), array([ 1., 2., 1.]))
"""
# transfer function is C (sI - A)**(-1) B + D
# Check consistency and make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make SIMO from possibly MIMO system.
B = B[:, input:input + 1]
D = D[:, input:input + 1]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape, axis=0) == 0) and (product(C.shape, axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape, axis=0) == 0) and (product(A.shape, axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:, 0] + B[:, 0] + C[0, :] + D
num = numpy.zeros((nout, num_states + 1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k, :])
num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
return tf2ss(*zpk2tf(z, p, k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
A, B, C, D defines a linear state-space system with `p` inputs,
`q` outputs, and `n` state variables.
Parameters
----------
A : array_like
State (or system) matrix of shape ``(n, n)``
B : array_like
Input matrix of shape ``(n, p)``
C : array_like
Output matrix of shape ``(q, n)``
D : array_like
Feedthrough (or feedforward) matrix of shape ``(q, p)``
input : int, optional
For multiple-input systems, the index of the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A, B, C, D, input=input))
class lti(object):
"""
Linear Time Invariant system base class.
Parameters
----------
*system : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
Notes
-----
`lti` instances do not exist directly. Instead, `lti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies.
"""
def __new__(cls, *system):
"""Create an instance of the appropriate subclass."""
if cls is lti:
N = len(system)
if N == 2:
return super(lti, cls).__new__(TransferFunction)
elif N == 3:
return super(lti, cls).__new__(ZerosPolesGain)
elif N == 4:
return super(lti, cls).__new__(StateSpace)
else:
raise ValueError('Needs 2, 3 or 4 arguments.')
# __new__ was called from a subclass, let it call its own functions
return super(lti, cls).__new__(cls)
def __init__(self, *system):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
self.inputs = None
self.outputs = None
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self.to_tf().num
@num.setter
def num(self, num):
obj = self.to_tf()
obj.num = num
source_class = type(self)
self._copy(source_class(obj))
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self.to_tf().den
@den.setter
def den(self, den):
obj = self.to_tf()
obj.den = den
source_class = type(self)
self._copy(source_class(obj))
@property
def zeros(self):
"""Zeros of the `ZerosPolesGain` system."""
return self.to_zpk().zeros
@zeros.setter
def zeros(self, zeros):
obj = self.to_zpk()
obj.zeros = zeros
source_class = type(self)
self._copy(source_class(obj))
@property
def poles(self):
"""Poles of the `ZerosPolesGain` system."""
return self.to_zpk().poles
@poles.setter
def poles(self, poles):
obj = self.to_zpk()
obj.poles = poles
source_class = type(self)
self._copy(source_class(obj))
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
return self.to_zpk().gain
@gain.setter
def gain(self, gain):
obj = self.to_zpk()
obj.gain = gain
source_class = type(self)
self._copy(source_class(obj))
@property
def A(self):
"""State matrix of the `StateSpace` system."""
return self.to_ss().A
@A.setter
def A(self, A):
obj = self.to_ss()
obj.A = A
source_class = type(self)
self._copy(source_class(obj))
@property
def B(self):
"""Input matrix of the `StateSpace` system."""
return self.to_ss().B
@B.setter
def B(self, B):
obj = self.to_ss()
obj.B = B
source_class = type(self)
self._copy(source_class(obj))
@property
def C(self):
"""Output matrix of the `StateSpace` system."""
return self.to_ss().C
@C.setter
def C(self, C):
obj = self.to_ss()
obj.C = C
source_class = type(self)
self._copy(source_class(obj))
@property
def D(self):
"""Feedthrough matrix of the `StateSpace` system."""
return self.to_ss().D
@D.setter
def D(self, D):
obj = self.to_ss()
obj.D = D
source_class = type(self)
self._copy(source_class(obj))
def impulse(self, X0=None, T=None, N=None):
"""
Return the impulse response of a continuous-time system.
See `scipy.signal.impulse` for details.
"""
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
"""
Return the step response of a continuous-time system.
See `scipy.signal.step` for details.
"""
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
"""
Return the response of a continuous-time system to input `U`.
See `scipy.signal.lsim` for details.
"""
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `scipy.signal.bode` for details.
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = s1.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""
Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `scipy.signal.freqresp` for details.
"""
return freqresp(self, w=w, n=n)
class TransferFunction(lti):
"""Linear Time Invariant system class in transfer function form.
Represents the system as the transfer function
:math:`H(s)=\sum_i b[i] s^i / \sum_j a[j] s^i`, where :math:`a` are
elements of the numerator `num` and :math:`b` are the elements of the
denominator `den`.
Parameters
----------
*system : arguments
The `TransferFunction` class can be instantiated with 1 or 2 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
"""
def __new__(cls, *system):
"""Handle object conversion if input is an instance of lti."""
if len(system) == 1 and isinstance(system[0], lti):
return system[0].to_tf()
# No special conversion needed
return super(TransferFunction, cls).__new__(cls)
def __init__(self, *system):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], lti):
return
super(TransferFunction, self).__init__(self, *system)
self._num = None
self._den = None
self.num, self.den = normalize(*system)
def __repr__(self):
"""Return representation of the system's transfer function"""
return '{0}(\n{1},\n{2}\n)'.format(
self.__class__.__name__,
repr(self.num),
repr(self.den),
)
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self._num
@num.setter
def num(self, num):
self._num = atleast_1d(num)
# Update dimensions
if len(self.num.shape) > 1:
self.outputs, self.inputs = self.num.shape
else:
self.outputs = 1
self.inputs = 1
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self._den
@den.setter
def den(self, den):
self._den = atleast_1d(den)
def _copy(self, system):
"""
Copy the parameters of another `TransferFunction` object
Parameters
----------
system : `TransferFunction`
The `StateSpace` system that is to be copied
"""
self.num = system.num
self.den = system.den
def to_tf(self):
"""
Return a copy of the current `TransferFunction` system.
Returns
-------
sys : instance of `TransferFunction`
The current system (copy)
"""
return copy.deepcopy(self)
def to_zpk(self):
"""
Convert system representation to `ZerosPolesGain`.
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*tf2zpk(self.num, self.den))
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*tf2ss(self.num, self.den))
class ZerosPolesGain(lti):
"""
Linear Time Invariant system class in zeros, poles, gain form.
Represents the system as the transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
"""
def __new__(cls, *system):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], lti):
return system[0].to_zpk()
# No special conversion needed
return super(ZerosPolesGain, cls).__new__(cls)
def __init__(self, *system):
"""Initialize the zeros, poles, gain LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], lti):
return
super(ZerosPolesGain, self).__init__(self, *system)
self._zeros = None
self._poles = None
self._gain = None
self.zeros, self.poles, self.gain = system
def __repr__(self):
"""Return representation of the `ZerosPolesGain` system"""
return '{0}(\n{1},\n{2},\n{3}\n)'.format(
self.__class__.__name__,
repr(self.zeros),
repr(self.poles),
repr(self.gain),
)
@property
def zeros(self):
"""Zeros of the `ZerosPolesGain` system."""
return self._zeros
@zeros.setter
def zeros(self, zeros):
self._zeros = atleast_1d(zeros)
# Update dimensions
if len(self.zeros.shape) > 1:
self.outputs, self.inputs = self.zeros.shape
else:
self.outputs = 1
self.inputs = 1
@property
def poles(self):
"""Poles of the `ZerosPolesGain` system."""
return self._poles
@poles.setter
def poles(self, poles):
self._poles = atleast_1d(poles)
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
return self._gain
@gain.setter
def gain(self, gain):
self._gain = gain
def _copy(self, system):
"""
Copy the parameters of another `ZerosPolesGain` system.
Parameters
----------
system : instance of `ZerosPolesGain`
The zeros, poles gain system that is to be copied
"""
self.poles = system.poles
self.zeros = system.zeros
self.gain = system.gain
def to_tf(self):
"""
Convert system representation to `TransferFunction`.
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain))
def to_zpk(self):
"""
Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy)
"""
return copy.deepcopy(self)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain))
class StateSpace(lti):
"""
Linear Time Invariant system class in state-space form.
Represents the system as the first order differential equation
:math:`\dot{x} = A x + B u`.
Parameters
----------
*system : arguments
The `StateSpace` class can be instantiated with 1 or 4 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies.
"""
def __new__(cls, *system):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], lti):
return system[0].to_ss()
# No special conversion needed
return super(StateSpace, cls).__new__(cls)
def __init__(self, *system):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], lti):
return
super(StateSpace, self).__init__(self, *system)
self._A = None
self._B = None
self._C = None
self._D = None
self.A, self.B, self.C, self.D = abcd_normalize(*system)
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return '{0}(\n{1},\n{2},\n{3},\n{4}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
)
@property
def A(self):
"""State matrix of the `StateSpace` system."""
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
@property
def B(self):
"""Input matrix of the `StateSpace` system."""
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.inputs = self.B.shape[-1]
@property
def C(self):
"""Output matrix of the `StateSpace` system."""
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.outputs = self.C.shape[0]
@property
def D(self):
"""Feedthrough matrix of the `StateSpace` system."""
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
def _copy(self, system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
self.A = system.A
self.B = system.B
self.C = system.C
self.D = system.D
def to_tf(self, **kwargs):
"""
Convert system representation to `TransferFunction`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D,
**kwargs))
def to_zpk(self, **kwargs):
"""
Convert system representation to `ZerosPolesGain`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D,
**kwargs))
def to_ss(self):
"""
Return a copy of the current `StateSpace` system.
Returns
-------
sys : instance of `StateSpace`
The current system (copy)
"""
return copy.deepcopy(self)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=True):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U = 0 or None, a zero input is used.
T : array_like
The time steps at which the input is defined and at which the
output is desired. Must be nonnegative, increasing, and equally spaced.
X0 : array_like, optional
The initial conditions on the state vector (zero by default).
interp : bool, optional
Whether to use linear (True, the default) or zero-order-hold (False)
interpolation for the input array.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time evolution of the state vector.
Examples
--------
Simulate a double integrator y'' = u, with a constant input u = 1
>>> from scipy import signal
>>> system = signal.lti([[0., 1.], [0., 0.]], [[0.], [1.]], [[1., 0.]], 0.)
>>> t = np.linspace(0, 5)
>>> u = np.ones_like(t)
>>> tout, y, x = signal.lsim(system, u, t)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D))
n_states = A.shape[0]
n_inputs = B.shape[1]
n_steps = T.size
if X0 is None:
X0 = zeros(n_states, sys.A.dtype)
xout = zeros((n_steps, n_states), sys.A.dtype)
if T[0] == 0:
xout[0] = X0
elif T[0] > 0:
# step forward to initial time, with zero input
xout[0] = dot(X0, linalg.expm(transpose(A) * T[0]))
else:
raise ValueError("Initial time must be nonnegative")
no_input = (U is None
or (isinstance(U, (int, float)) and U == 0.)
or not np.any(U))
if n_steps == 1:
yout = squeeze(dot(xout, transpose(C)))
if not no_input:
yout += squeeze(dot(U, transpose(D)))
return T, squeeze(yout), squeeze(xout)
dt = T[1] - T[0]
if not np.allclose((T[1:] - T[:-1]) / dt, 1.0):
warnings.warn("Non-uniform timesteps are deprecated. Results may be "
"slow and/or inaccurate.", DeprecationWarning)
return lsim2(system, U, T, X0)
if no_input:
# Zero input: just use matrix exponential
# take transpose because state is a row vector
expAT_dt = linalg.expm(transpose(A) * dt)
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], expAT_dt)
yout = squeeze(dot(xout, transpose(C)))
return T, squeeze(yout), squeeze(xout)
# Nonzero input
U = atleast_1d(U)
if U.ndim == 1:
U = U[:, np.newaxis]
if U.shape[0] != n_steps:
raise ValueError("U must have the same number of rows "
"as elements in T.")
if U.shape[1] != n_inputs:
raise ValueError("System does not define that many inputs.")
if not interp:
# Zero-order hold
# Algorithm: to integrate from time 0 to time dt, we solve
# xdot = A x + B u, x(0) = x0
# udot = 0, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 ] [ u0 ]
M = np.vstack([np.hstack([A * dt, B * dt]),
np.zeros((n_inputs, n_states + n_inputs))])
# transpose everything because the state and input are row vectors
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd = expMT[n_states:, :n_states]
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd)
else:
# Linear interpolation between steps
# Algorithm: to integrate from time 0 to time dt, with linear
# interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
# xdot = A x + B u, x(0) = x0
# udot = (u1 - u0) / dt, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 I ] [ u0 ]
# [u1 - u0] [ 0 0 0 ] [u1 - u0]
M = np.vstack([np.hstack([A * dt, B * dt,
np.zeros((n_states, n_inputs))]),
np.hstack([np.zeros((n_inputs, n_states + n_inputs)),
np.identity(n_inputs)]),
np.zeros((n_inputs, n_states + 2 * n_inputs))])
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd1 = expMT[n_states+n_inputs:, :n_states]
Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1
for i in xrange(1, n_steps):
xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1))
yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if X0 is None:
X = squeeze(sys.B)
else:
X = squeeze(sys.B + X0)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
_, h, _ = lsim(sys, 0., T, X, interp=False)
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0, interp=False)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = signal.bode(s1)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Examples
--------
# Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([], [1, 1, 1], [5])
# transfer function: H(s) = 5 / (s-1)^3
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
sys = system.to_tf()
else:
sys = lti(*system).to_tf()
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
return w, h
# This class will be used by place_poles to return its results
# see http://code.activestate.com/recipes/52308/
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def _valid_inputs(A, B, poles, method, rtol, maxiter):
"""
Check the poles come in complex conjugage pairs
Check shapes of A, B and poles are compatible.
Check the method chosen is compatible with provided poles
Return update method to use and ordered poles
"""
poles = np.asarray(poles)
if poles.ndim > 1:
raise ValueError("Poles must be a 1D array like.")
# Will raise ValueError if poles do not come in complex conjugates pairs
poles = _order_complex_poles(poles)
if A.ndim > 2:
raise ValueError("A must be a 2D array/matrix.")
if B.ndim > 2:
raise ValueError("B must be a 2D array/matrix")
if A.shape[0] != A.shape[1]:
raise ValueError("A must be square")
if len(poles) > A.shape[0]:
raise ValueError("maximum number of poles is %d but you asked for %d" %
(A.shape[0], len(poles)))
if len(poles) < A.shape[0]:
raise ValueError("number of poles is %d but you should provide %d" %
(len(poles), A.shape[0]))
r = np.linalg.matrix_rank(B)
for p in poles:
if sum(p == poles) > r:
raise ValueError("at least one of the requested pole is repeated "
"more than rank(B) times")
# Choose update method
update_loop = _YT_loop
if method not in ('KNV0','YT'):
raise ValueError("The method keyword must be one of 'YT' or 'KNV0'")
if method == "KNV0":
update_loop = _KNV0_loop
if not all(np.isreal(poles)):
raise ValueError("Complex poles are not supported by KNV0")
if maxiter < 1:
raise ValueError("maxiter must be at least equal to 1")
# We do not check rtol <= 0 as the user can use a negative rtol to
# force maxiter iterations
if rtol > 1:
raise ValueError("rtol can not be greater than 1")
return update_loop, poles
def _order_complex_poles(poles):
"""
Check we have complex conjugates pairs and reorder P according to YT, ie
real_poles, complex_i, conjugate complex_i, ....
The lexicographic sort on the complex poles is added to help the user to
compare sets of poles.
"""
ordered_poles = np.sort(poles[np.isreal(poles)])
im_poles = []
for p in np.sort(poles[np.imag(poles) < 0]):
if np.conj(p) in poles:
im_poles.extend((p, np.conj(p)))
ordered_poles = np.hstack((ordered_poles, im_poles))
if poles.shape[0] != len(ordered_poles):
raise ValueError("Complex poles must come with their conjugates")
return ordered_poles
def _KNV0(B, ker_pole, transfer_matrix, j, poles):
"""
Algorithm "KNV0" Kautsky et Al. Robust pole
assignment in linear state feedback, Int journal of Control
1985, vol 41 p 1129->1155
http://la.epfl.ch/files/content/sites/la/files/
users/105941/public/KautskyNicholsDooren
"""
# Remove xj form the base
transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)
# If we QR this matrix in full mode Q=Q0|Q1
# then Q1 will be a single column orthogonnal to
# Q0, that's what we are looking for !
# After merge of gh-4249 great speed improvements could be achieved
# using QR updates instead of full QR in the line below
# To debug with numpy qr uncomment the line below
# Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete")
Q, R = s_qr(transfer_matrix_not_j, mode="full")
mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)
yj = np.dot(mat_ker_pj, Q[:, -1])
# If Q[:, -1] is "almost" orthogonal to ker_pole[j] its
# projection into ker_pole[j] will yield a vector
# close to 0. As we are looking for a vector in ker_pole[j]
# simply stick with transfer_matrix[:, j] (unless someone provides me with
# a better choice ?)
if not np.allclose(yj, 0):
xj = yj/np.linalg.norm(yj)
transfer_matrix[:, j] = xj
# KNV does not support complex poles, using YT technique the two lines
# below seem to work 9 out of 10 times but it is not reliable enough:
# transfer_matrix[:, j]=real(xj)
# transfer_matrix[:, j+1]=imag(xj)
# Add this at the beginning of this function if you wish to test
# complex support:
# if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])):
# return
# Problems arise when imag(xj)=>0 I have no idea on how to fix this
def _YT_real(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.1 page 19 related to real pairs
"""
# step 1 page 19
u = Q[:, -2, np.newaxis]
v = Q[:, -1, np.newaxis]
# step 2 page 19
m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) -
np.dot(v, u.T)), ker_pole[j])
# step 3 page 19
um, sm, vm = np.linalg.svd(m)
# mu1, mu2 two first columns of U => 2 first lines of U.T
mu1, mu2 = um.T[:2, :, np.newaxis]
# VM is V.T with numpy we want the first two lines of V.T
nu1, nu2 = vm[:2, :, np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
transfer_matrix_j_mo_transfer_matrix_j = np.vstack((
transfer_matrix[:, i, np.newaxis],
transfer_matrix[:, j, np.newaxis]))
if not np.allclose(sm[0], sm[1]):
ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1)
ker_pole_i_nu1 = np.dot(ker_pole[j], nu1)
ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1))
else:
ker_pole_ij = np.vstack((
np.hstack((ker_pole[i],
np.zeros(ker_pole[i].shape))),
np.hstack((np.zeros(ker_pole[j].shape),
ker_pole[j]))
))
mu_nu_matrix = np.vstack(
(np.hstack((mu1, mu2)), np.hstack((nu1, nu2)))
)
ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix)
transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_ij, 0):
transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij /
np.linalg.norm(transfer_matrix_ij))
transfer_matrix[:, i] = transfer_matrix_ij[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = transfer_matrix_ij[
transfer_matrix[:, i].shape[0]:, 0
]
else:
# As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to
# Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to
# ker_pole_mu_nu and iterate. As we are looking for a vector in
# Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help
# (that's a guess, not a claim !)
transfer_matrix[:, i] = ker_pole_mu_nu[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = ker_pole_mu_nu[
transfer_matrix[:, i].shape[0]:, 0
]
def _YT_complex(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.2 page 20 related to complex pairs
"""
# step 1 page 20
ur = np.sqrt(2)*Q[:, -2, np.newaxis]
ui = np.sqrt(2)*Q[:, -1, np.newaxis]
u = ur + 1j*ui
# step 2 page 20
ker_pole_ij = ker_pole[i]
m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) -
np.dot(np.conj(u), u.T)), ker_pole_ij)
# step 3 page 20
e_val, e_vec = np.linalg.eig(m)
# sort eigenvalues according to their module
e_val_idx = np.argsort(np.abs(e_val))
mu1 = e_vec[:, e_val_idx[-1], np.newaxis]
mu2 = e_vec[:, e_val_idx[-2], np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
# remember transfer_matrix_i has been split as
# transfer_matrix[i]=real(transfer_matrix_i) and
# transfer_matrix[j]=imag(transfer_matrix_i)
transfer_matrix_j_mo_transfer_matrix_j = (
transfer_matrix[:, i, np.newaxis] +
1j*transfer_matrix[:, j, np.newaxis]
)
if not np.allclose(np.abs(e_val[e_val_idx[-1]]),
np.abs(e_val[e_val_idx[-2]])):
ker_pole_mu = np.dot(ker_pole_ij, mu1)
else:
mu1_mu2_matrix = np.hstack((mu1, mu2))
ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix)
transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_i_j, 0):
transfer_matrix_i_j = (transfer_matrix_i_j /
np.linalg.norm(transfer_matrix_i_j))
transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0])
transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0])
else:
# same idea as in YT_real
transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0])
transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0])
def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Algorithm "YT" Tits, Yang. Globally Convergent
Algorithms for Robust Pole Assignment by State Feedback
http://drum.lib.umd.edu/handle/1903/5598
The poles P have to be sorted accordingly to section 6.2 page 20
"""
# The IEEE edition of the YT paper gives useful information on the
# optimal update order for the real poles in order to minimize the number
# of times we have to loop over all poles, see page 1442
nb_real = poles[np.isreal(poles)].shape[0]
# hnb => Half Nb Real
hnb = nb_real // 2
# Stick to the indices in the paper and then remove one to get numpy array
# index it is a bit easier to link the code to the paper this way even if it
# is not very clean. The paper is unclear about what should be done when
# there is only one real pole => use KNV0 on this real pole seem to work
if nb_real > 0:
#update the biggest real pole with the smallest one
update_order = [[nb_real], [1]]
else:
update_order = [[],[]]
r_comp = np.arange(nb_real+1, len(poles)+1, 2)
# step 1.a
r_p = np.arange(1, hnb+nb_real % 2)
update_order[0].extend(2*r_p)
update_order[1].extend(2*r_p+1)
# step 1.b
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 1.c
r_p = np.arange(1, hnb+1)
update_order[0].extend(2*r_p-1)
update_order[1].extend(2*r_p)
# step 1.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.a
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+j)
# step 2.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.c
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(hnb+1, nb_real+1):
idx_1 = i+j
if idx_1 > nb_real:
idx_1 = i+j-nb_real
update_order[0].append(i)
update_order[1].append(idx_1)
# step 2.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 3.a
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+hnb)
# step 3.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
update_order = np.array(update_order).T-1
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for i, j in update_order:
if i == j:
assert i == 0, "i!=0 for KNV call in YT"
assert np.isreal(poles[i]), "calling KNV on a complex pole"
_KNV0(B, ker_pole, transfer_matrix, i, poles)
else:
transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j),
axis=1)
# after merge of gh-4249 great speed improvements could be
# achieved using QR updates instead of full QR in the line below
#to debug with numpy qr uncomment the line below
#Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete")
Q, _ = s_qr(transfer_matrix_not_i_j, mode="full")
if np.isreal(poles[i]):
assert np.isreal(poles[j]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_real(ker_pole, Q, transfer_matrix, i, j)
else:
assert ~np.isreal(poles[i]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_complex(ker_pole, Q, transfer_matrix, i, j)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs(
(det_transfer_matrix -
det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Loop over all poles one by one and apply KNV method 0 algorithm
"""
# This method is useful only because we need to be able to call
# _KNV0 from YT without looping over all poles, otherwise it would
# have been fine to mix _KNV0_loop and _KNV0 in a single function
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for j in range(B.shape[0]):
_KNV0(B, ker_pole, transfer_matrix, j, poles)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30):
"""
Compute K such that eigenvalues (A - dot(B, K))=poles.
K is the gain matrix such as the plant described by the linear system
``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``,
as close as possible to those asked for in poles.
SISO, MISO and MIMO systems are supported.
Parameters
----------
A, B : ndarray
State-space representation of linear system ``AX + BU``.
poles : array_like
Desired real poles and/or complex conjugates poles.
Complex poles are only supported with ``method="YT"`` (default).
method: {'YT', 'KNV0'}, optional
Which method to choose to find the gain matrix K. One of:
- 'YT': Yang Tits
- 'KNV0': Kautsky, Nichols, Van Dooren update method 0
See References and Notes for details on the algorithms.
rtol: float, optional
After each iteration the determinant of the eigenvectors of
``A - B*K`` is compared to its previous value, when the relative
error between these two values becomes lower than `rtol` the algorithm
stops. Default is 1e-3.
maxiter: int, optional
Maximum number of iterations to compute the gain matrix.
Default is 30.
Returns
-------
full_state_feedback : Bunch object
full_state_feedback is composed of:
gain_matrix : 1-D ndarray
The closed loop matrix K such as the eigenvalues of ``A-BK``
are as close as possible to the requested poles.
computed_poles : 1-D ndarray
The poles corresponding to ``A-BK`` sorted as first the real
poles in increasing order, then the complex congugates in
lexicographic order.
requested_poles : 1-D ndarray
The poles the algorithm was asked to place sorted as above,
they may differ from what was achieved.
X : 2-D ndarray
The transfer matrix such as ``X * diag(poles) = (A - B*K)*X``
(see Notes)
rtol : float
The relative tolerance achieved on ``det(X)`` (see Notes).
`rtol` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
nb_iter : int
The number of iterations performed before converging.
`nb_iter` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
Notes
-----
The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et
al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer
matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses
rank-2 updates. This yields on average more robust solutions (see [2]_
pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV
does not in its original version. Only update method 0 proposed by KNV has
been implemented here, hence the name ``'KNV0'``.
KNV extended to complex poles is used in Matlab's ``place`` function, YT is
distributed under a non-free licence by Slicot under the name ``robpole``.
It is unclear and undocumented how KNV0 has been extended to complex poles
(Tits and Yang claim on page 14 of their paper that their method can not be
used to extend KNV to complex poles), therefore only YT supports them in
this implementation.
As the solution to the problem of pole placement is not unique for MIMO
systems, both methods start with a tentative transfer matrix which is
altered in various way to increase its determinant. Both methods have been
proven to converge to a stable solution, however depending on the way the
initial transfer matrix is chosen they will converge to different
solutions and therefore there is absolutely no guarantee that using
``'KNV0'`` will yield results similar to Matlab's or any other
implementation of these algorithms.
Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'``
is only provided because it is needed by ``'YT'`` in some specific cases.
Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'``
when ``abs(det(X))`` is used as a robustness indicator.
[2]_ is available as a technical report on the following URL:
http://drum.lib.umd.edu/handle/1903/5598
References
----------
.. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment
in linear state feedback", International Journal of Control, Vol. 41
pp. 1129-1155, 1985.
.. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust
pole assignment by state feedback, IEEE Transactions on Automatic
Control, Vol. 41, pp. 1432-1452, 1996.
Examples
--------
A simple example demonstrating real pole placement using both KNV and YT
algorithms. This is example number 1 from section 4 of the reference KNV
publication ([1]_):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ],
... [-0.5814, -4.290, 0, 0.6750 ],
... [ 1.067, 4.273, -6.654, 5.893 ],
... [ 0.0480, 4.273, 1.343, -2.104 ]])
>>> B = np.array([[ 0, 5.679 ],
... [ 1.136, 1.136 ],
... [ 0, 0, ],
... [-3.146, 0 ]])
>>> P = np.array([-0.2, -0.5, -5.0566, -8.6659])
Now compute K with KNV method 0, with the default YT method and with the YT
method while forcing 100 iterations of the algorithm and print some results
after each call.
>>> fsf1 = signal.place_poles(A, B, P, method='KNV0')
>>> fsf1.gain_matrix
array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785],
[ 0.50587268, 0.57779091, 0.51795763, -0.41991442]])
>>> fsf2 = signal.place_poles(A, B, P) # uses YT method
>>> fsf2.computed_poles
array([-8.6659, -5.0566, -0.5 , -0.2 ])
>>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100)
>>> fsf3.X
array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j],
[-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j],
[-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j],
[ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]])
The absolute value of the determinant of X is a good indicator to check the
robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing
it. Below a comparison of the robustness of the results above:
>>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X))
True
>>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X))
True
Now a simple example for complex poles:
>>> A = np.array([[ 0, 7/3., 0, 0 ],
... [ 0, 0, 0, 7/9. ],
... [ 0, 0, 0, 0 ],
... [ 0, 0, 0, 0 ]])
>>> B = np.array([[ 0, 0 ],
... [ 0, 0 ],
... [ 1, 0 ],
... [ 0, 1 ]])
>>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3.
>>> fsf = signal.place_poles(A, B, P, method='YT')
We can plot the desired and computed poles in the complex plane:
>>> t = np.linspace(0, 2*np.pi, 401)
>>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle
>>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag,
... 'wo', label='Desired')
>>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx',
... label='Placed')
>>> plt.grid()
>>> plt.axis('image')
>>> plt.axis([-1.1, 1.1, -1.1, 1.1])
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1)
"""
# Move away all the inputs checking, it only adds noise to the code
update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter)
# The current value of the relative tolerance we achieved
cur_rtol = 0
# The number of iterations needed before converging
nb_iter = 0
# Step A: QR decomposition of B page 1132 KN
# to debug with numpy qr uncomment the line below
# u, z = np.linalg.qr(B, mode="complete")
u, z = s_qr(B, mode="full")
rankB = np.linalg.matrix_rank(B)
u0 = u[:, :rankB]
u1 = u[:, rankB:]
z = z[:rankB, :]
# If we can use the identity matrix as X the solution is obvious
if B.shape[0] == rankB:
# if B is square and full rank there is only one solution
# such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0])
# i.e K=inv(B)*(diag(P)-A)
# if B has as many lines as its rank (but not square) there are many
# solutions and we can choose one using least squares
# => use lstsq in both cases.
# In both cases the transfer matrix X will be eye(A.shape[0]) and I
# can hardly think of a better one so there is nothing to optimize
#
# for complex poles we use the following trick
#
# |a -b| has for eigenvalues a+b and a-b
# |b a|
#
# |a+bi 0| has the obvious eigenvalues a+bi and a-bi
# |0 a-bi|
#
# e.g solving the first one in R gives the solution
# for the second one in C
diag_poles = np.zeros(A.shape)
idx = 0
while idx < poles.shape[0]:
p = poles[idx]
diag_poles[idx, idx] = np.real(p)
if ~np.isreal(p):
diag_poles[idx, idx+1] = -np.imag(p)
diag_poles[idx+1, idx+1] = np.real(p)
diag_poles[idx+1, idx] = np.imag(p)
idx += 1 # skip next one
idx += 1
gain_matrix = np.linalg.lstsq(B, diag_poles-A)[0]
transfer_matrix = np.eye(A.shape[0])
cur_rtol = np.nan
nb_iter = np.nan
else:
# step A (p1144 KNV) and begining of step F: decompose
# dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors
# in the same loop
ker_pole = []
# flag to skip the conjugate of a complex pole
skip_conjugate = False
# select orthonormal base ker_pole for each Pole and vectors for
# transfer_matrix
for j in range(B.shape[0]):
if skip_conjugate:
skip_conjugate = False
continue
pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T
# after QR Q=Q0|Q1
# only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix.
# Q1 is orthogonnal to Q0 and will be multiplied by the zeros in
# R when using mode "complete". In default mode Q1 and the zeros
# in R are not computed
# To debug with numpy qr uncomment the line below
# Q, _ = np.linalg.qr(pole_space_j, mode="complete")
Q, _ = s_qr(pole_space_j, mode="full")
ker_pole_j = Q[:, pole_space_j.shape[1]:]
# We want to select one vector in ker_pole_j to build the transfer
# matrix, however qr returns sometimes vectors with zeros on the
# same line for each pole and this yields very long convergence
# times.
# Or some other times a set of vectors, one with zero imaginary
# part and one (or several) with imaginary parts. After trying
# many ways to select the best possible one (eg ditch vectors
# with zero imaginary part for complex poles) I ended up summing
# all vectors in ker_pole_j, this solves 100% of the problems and
# is a valid choice for transfer_matrix.
# This way for complex poles we are sure to have a non zero
# imaginary part that way, and the problem of lines full of zeros
# in transfer_matrix is solved too as when a vector from
# ker_pole_j has a zero the other one(s) when
# ker_pole_j.shape[1]>1) for sure won't have a zero there.
transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis]
transfer_matrix_j = (transfer_matrix_j /
np.linalg.norm(transfer_matrix_j))
if ~np.isreal(poles[j]): # complex pole
transfer_matrix_j = np.hstack([np.real(transfer_matrix_j),
np.imag(transfer_matrix_j)])
ker_pole.extend([ker_pole_j, ker_pole_j])
# Skip next pole as it is the conjugate
skip_conjugate = True
else: # real pole, nothing to do
ker_pole.append(ker_pole_j)
if j == 0:
transfer_matrix = transfer_matrix_j
else:
transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j))
if rankB > 1: # otherwise there is nothing we can optimize
stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix,
poles, B, maxiter, rtol)
if not stop and rtol > 0:
# if rtol<=0 the user has probably done that on purpose,
# don't annoy him
err_msg = (
"Convergence was not reached after maxiter iterations.\n"
"You asked for a relative tolerance of %f we got %f" %
(rtol, cur_rtol)
)
warnings.warn(err_msg)
# reconstruct transfer_matrix to match complex conjugate pairs,
# ie transfer_matrix_j/transfer_matrix_j+1 are
# Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after
transfer_matrix = transfer_matrix.astype(complex)
idx = 0
while idx < poles.shape[0]-1:
if ~np.isreal(poles[idx]):
rel = transfer_matrix[:, idx].copy()
img = transfer_matrix[:, idx+1]
# rel will be an array referencing a column of transfer_matrix
# if we don't copy() it will changer after the next line and
# and the line after will not yield the correct value
transfer_matrix[:, idx] = rel-1j*img
transfer_matrix[:, idx+1] = rel+1j*img
idx += 1 # skip next one
idx += 1
try:
m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles),
transfer_matrix.T)).T
gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A))
except np.linalg.LinAlgError:
raise ValueError("The poles you've chosen can't be placed. "
"Check the controllability matrix and try "
"another set of poles")
# Beware: Kautsky solves A+BK but the usual form is A-BK
gain_matrix = -gain_matrix
# K still contains complex with ~=0j imaginary parts, get rid of them
gain_matrix = np.real(gain_matrix)
full_state_feedback = Bunch()
full_state_feedback.gain_matrix = gain_matrix
full_state_feedback.computed_poles = _order_complex_poles(
np.linalg.eig(A - np.dot(B, gain_matrix))[0]
)
full_state_feedback.requested_poles = poles
full_state_feedback.X = transfer_matrix
full_state_feedback.rtol = cur_rtol
full_state_feedback.nb_iter = nb_iter
return full_state_feedback
| bsd-3-clause |
evgchz/scikit-learn | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
amifsud/sot-state-observation | python/test/elasticityCalibration.py | 2 | 2848 | import numpy as np
from numpy import linalg as LA
import matplotlib.pyplot as plt
from scipy import stats
path = 'logs/hrp-2/static-calibration/ds-frontal/robot/2015-05-28/'
#path = 'logs/hrp-2/static-calibration/ss-frontal/simu/2014-05-30/'
#path = 'logs/hrp-2/static-calibration/ds-lateral/simu/2014-06-05/'
robotMass = 58.0
gravity = 9.8
dslateral =False
axis = 'x'
spaceBetweenFeet = 0.19
size = 33
period = 3200
firstTime = 35690
dslateral = False
axis = 'x'
fl = np.genfromtxt (path+"HRP2LAAS-forceLLEG.dat")
fr = np.genfromtxt (path+"HRP2LAAS-forceRLEG.dat")
fr = fr.copy()
fr.resize(fl.shape[0],fr.shape[1])
f = (fl + fr)
m = (fl - fr)
realCom = np.genfromtxt (path+"real-com-sout.dat")
flexThetaU = np.genfromtxt (path+"/com-stabilizedEstimator-flexThetaU.dat")
#size = 81 #ds-frontal/simu/2014-05-30/
samplef = np.array((0.0,)*size)
sampleCom = np.array((0.0,)*size)
sampleTh = np.array((0.0,)*size)
i0 = 5
i1 = 1
i2 = 2
if axis =='y':
i0=4
i1=2
i2=1
firstIndice = firstTime-flexThetaU[0,0]
for i in range(0,size):
index = firstIndice + period*i
samplef[i] = f[index,i0]
if dslateral:
samplef[i] = m[index,3] * spaceBetweenFeet/2
sampleCom[i] = realCom[index,i1] * robotMass * gravity
sampleTh[i] = flexThetaU[index,i2]
slope, intercept, r_value, p_value, std_err = stats.linregress(samplef,sampleTh)
line = slope*samplef+intercept
#comRef = np.genfromtxt ("/tmp/comref-sout.dat")
#er[:,1:er.shape[1]]=er[:,1:er.shape[1]]-er2[:,1:er.shape[1]]
f1 = plt.figure();ax1 = f1.add_subplot(111)
f2 = plt.figure();ax2 = f2.add_subplot(111)
f3 = plt.figure();ax3 = f3.add_subplot(111)
#f4 = plt.figure();ax4 = f4.add_subplot(111)
#f5 = plt.figure();ax5 = f5.add_subplot(111)
#f6 = plt.figure();ax6 = f6.add_subplot(111)
#f7 = plt.figure();ax7 = f7.add_subplot(111)
#f8 = plt.figure();ax8 = f8.add_subplot(111)
#f9 = plt.figure();ax9 = f9.add_subplot(111)
#f10 = plt.figure();ax10=f10.add_subplot(111)
ax1.plot(samplef , sampleCom , label='tau Vs CoM')
ax2.plot(samplef , sampleTh , label='tau Vs Th')
ax2.plot(samplef , line , label='linearRegression')
ax3.plot(sampleCom , sampleTh, label='CoM Vs Th')
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles, labels)
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(handles, labels)
handles, labels = ax3.get_legend_handles_labels()
ax3.legend(handles, labels)
#handles, labels = ax4.get_legend_handles_labels()
#ax4.legend(handles, labels)
#handles, labels = ax5.get_legend_handles_labels()
#ax5.legend(handles, labels)
#handles, labels = ax6.get_legend_handles_labels()
#ax6.legend(handles, labels)
#handles, labels = ax7.get_legend_handles_labels()
#ax7.legend(handles, labels)
#handles, labels = ax10.get_legend_handles_labels()
#ax10.legend(handles, labels)
plt.show()
| lgpl-3.0 |
magic2du/contact_matrix | Contact_maps/DeepLearning/DeepLearningTool/DL_contact_matrix_load2-new10fold_03_12_2015_parallel.py | 2 | 43344 |
# coding: utf-8
# In[5]:
import sys, os
sys.path.append('../../../libs/')
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
# In[6]:
#filename = 'SUCCESS_log_CrossValidation_load_DL_remoteFisherM1_DL_RE_US_DL_RE_US_1_1_19MAY2014.txt'
#filename = 'listOfDDIsHaveOver2InterfacesHave40-75_Examples_2010_real_selected.txt' #for testing
# set settings for this script
settings = {}
settings['filename'] = 'ddi_examples_40_60_over2top_diff_name_2014.txt'
settings['fisher_mode'] = 'FisherM1ONLY'# settings['fisher_mode'] = 'FisherM1ONLY'
settings['with_auc_score'] = False
settings['reduce_ratio'] = 1
settings['SVM'] = 0
settings['DL'] = 1
settings['SAE_SVM'] = 1
settings['SAE_SVM_COMBO'] = 1
settings['SVM_RBF'] = 1
settings['SAE_SVM_RBF'] = 1
settings['SAE_SVM_RBF_COMBO'] = 1
settings['SVM_POLY'] = 0
settings['DL_S'] = 1
settings['DL_U'] = 0
settings['finetune_lr'] = 1
settings['batch_size'] = 100
settings['pretraining_interations'] = 5001
settings['pretrain_lr'] = 0.001
settings['training_epochs'] = 20001
settings['hidden_layers_sizes'] = [100, 100]
settings['corruption_levels'] = [0, 0]
filename = settings['filename']
file_obj = FileOperator(filename)
ddis = file_obj.readStripLines()
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_contact_matrix_load' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
logger.info('Input DDI file: ' + filename)
#logger.debug('This message should go to the log file')
for key, value in settings.items():
logger.info(key +': '+ str(value))
# In[6]:
# In[7]:
class DDI_family_base(object):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/du/Documents/Vectors_Fishers_aaIndex_raw_2014/'):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/sun/Downloads/contactmatrix/contactmatrixanddeeplearningcode/data_test/'):
def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/big/du/Protein_Protein_Interaction_Project/Contact_Matrix_Project/Vectors_Fishers_aaIndex_raw_2014_paper/'):
""" get total number of sequences in a ddi familgy
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
LOO_data['FisherM1'][1]
"""
self.ddi = ddi
self.Vectors_Fishers_aaIndex_raw_folder = Vectors_Fishers_aaIndex_raw_folder
self.ddi_folder = self.Vectors_Fishers_aaIndex_raw_folder + ddi + '/'
self.total_number_of_sequences = self.get_total_number_of_sequences()
self.raw_data = {}
self.positve_negative_number = {}
self.equal_size_data = {}
for seq_no in range(1, self.total_number_of_sequences+1):
self.raw_data[seq_no] = self.get_raw_data_for_selected_seq(seq_no)
try:
#positive_file = self.ddi_folder + 'numPos_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(positive_file)
#lines = file_obj.readStripLines()
#import pdb; pdb.set_trace()
count_pos = int(np.sum(self.raw_data[seq_no][:, -1]))
count_neg = self.raw_data[seq_no].shape[0] - count_pos
#self.positve_negative_number[seq_no] = {'numPos': int(float(lines[0]))}
#assert int(float(lines[0])) == count_pos
self.positve_negative_number[seq_no] = {'numPos': count_pos}
#negative_file = self.ddi_folder + 'numNeg_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(negative_file)
#lines = file_obj.readStripLines()
#self.positve_negative_number[seq_no]['numNeg'] = int(float(lines[0]))
self.positve_negative_number[seq_no]['numNeg'] = count_neg
except Exception,e:
print ddi, seq_no
print str(e)
logger.info(ddi + str(seq_no))
logger.info(str(e))
# get data for equal positive and negative
n_pos = self.positve_negative_number[seq_no]['numPos']
n_neg = self.positve_negative_number[seq_no]['numNeg']
index_neg = range(n_pos, n_pos + n_neg)
random.shuffle(index_neg)
index_neg = index_neg[: n_pos]
positive_examples = self.raw_data[seq_no][ : n_pos, :]
negative_examples = self.raw_data[seq_no][index_neg, :]
self.equal_size_data[seq_no] = np.vstack((positive_examples, negative_examples))
def get_LOO_training_and_reduced_traing(self, seq_no, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get the leave one out traing data, reduced traing
Parameters:
seq_no:
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_LOO = np.array([])
train_y_LOO = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
total_number_of_sequences = self.total_number_of_sequences
equal_size_data_selected_sequence = self.equal_size_data[seq_no]
#get test data for selected sequence
test_X, test_y = self.select_X_y(equal_size_data_selected_sequence, fisher_mode = fisher_mode)
total_sequences = range(1, total_number_of_sequences+1)
loo_sequences = [i for i in total_sequences if i != seq_no]
number_of_reduced = len(loo_sequences)/reduce_ratio if len(loo_sequences)/reduce_ratio !=0 else 1
random.shuffle(loo_sequences)
reduced_sequences = loo_sequences[:number_of_reduced]
#for loo data
for current_no in loo_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_LOO.ndim ==1:
train_X_LOO = current_X
else:
train_X_LOO = np.vstack((train_X_LOO, current_X))
train_y_LOO = np.concatenate((train_y_LOO, current_y))
#for reduced data
for current_no in reduced_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
#def get_ten_fold_crossvalid_one_subset(self, start_subset, end_subset, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
def get_ten_fold_crossvalid_one_subset(self, train_index, test_index, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get traing data, reduced traing data for 10-fold crossvalidation
Parameters:
start_subset: index of start of the testing data
end_subset: index of end of the testing data
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_10fold = np.array([])
train_y_10fold = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
test_X = np.array([])
test_y = np.array([])
total_number_of_sequences = self.total_number_of_sequences
#get test data for selected sequence
#for current_no in range(start_subset, end_subset):
for num in test_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if test_X.ndim ==1:
test_X = current_X
else:
test_X = np.vstack((test_X, current_X))
test_y = np.concatenate((test_y, current_y))
#total_sequences = range(1, total_number_of_sequences+1)
#ten_fold_sequences = [i for i in total_sequences if not(i in range(start_subset, end_subset))]
#number_of_reduced = len(ten_fold_sequences)/reduce_ratio if len(ten_fold_sequences)/reduce_ratio !=0 else 1
#random.shuffle(ten_fold_sequences)
#reduced_sequences = ten_fold_sequences[:number_of_reduced]
number_of_reduced = len(train_index)/reduce_ratio if len(train_index)/reduce_ratio !=0 else 1
random.shuffle(train_index)
reduced_sequences = train_index[:number_of_reduced]
#for 10-fold cross-validation data
#for current_no in ten_fold_sequences:
for num in train_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_10fold.ndim ==1:
train_X_10fold = current_X
else:
train_X_10fold = np.vstack((train_X_10fold, current_X))
train_y_10fold = np.concatenate((train_y_10fold, current_y))
#for reduced data
for num in reduced_sequences:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
def get_total_number_of_sequences(self):
""" get total number of sequences in a ddi familgy
Parameters:
ddi: string
Vectors_Fishers_aaIndex_raw_folder: string
Returns:
n: int
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path +'allPairs.txt'
all_pairs = np.loadtxt(filename)
return len(all_pairs)
def get_raw_data_for_selected_seq(self, seq_no):
""" get raw data for selected seq no in a family
Parameters:
ddi:
seq_no:
Returns:
data: raw data in the sequence file
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path + 'F0_20_F1_20_Sliding_17_11_F0_20_F1_20_Sliding_17_11_ouput_'+ str(seq_no) + '.txt'
data = np.loadtxt(filename)
return data
def select_X_y(self, data, fisher_mode = ''):
""" select subset from the raw input data set
Parameters:
data: data from matlab txt file
fisher_mode: subset base on this Fisher of AAONLY...
Returns:
selected X, y
"""
y = data[:,-1] # get lable
if fisher_mode == 'FisherM1': # fisher m1 plus AA index
a = data[:, 20:227]
b = data[:, 247:454]
X = np.hstack((a,b))
elif fisher_mode == 'FisherM1ONLY':
a = data[:, 20:40]
b = data[:, 247:267]
X = np.hstack((a,b))
elif fisher_mode == 'AAONLY':
a = data[:, 40:227]
b = data[:, 267:454]
X = np.hstack((a,b))
else:
raise('there is an error in mode')
return X, y
# In[7]:
# In[7]:
# In[8]:
import sklearn.preprocessing
def saveAsCsv(with_auc_score, fname, score_dict, arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
if with_auc_score == False:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
else:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest'] + score_dict.keys())
for arg in arguments:
writer.writerow([i for i in arg])
csvfile.close()
def LOO_out_performance_for_all(ddis):
for ddi in ddis:
try:
one_ddi_family = LOO_out_performance_for_one_ddi(ddi)
one_ddi_family.get_LOO_perfermance(settings = settings)
except Exception,e:
print str(e)
logger.info("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
class LOO_out_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_LOO_perfermance(self, settings = None):
fisher_mode = settings['fisher_mode']
analysis_scr = []
with_auc_score = settings['with_auc_score']
reduce_ratio = settings['reduce_ratio']
for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
print seq_no
logger.info('sequence number: ' + str(seq_no))
if settings['SVM']:
print "SVM"
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_LOO_training_and_reduced_traing(seq_no,fisher_mode = fisher_mode, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# Deep learning part
min_max_scaler = Preprocessing_Scaler_with_mean_point5()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
pretraining_X_minmax = min_max_scaler.transform(train_X_LOO)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = cal_epochs(settings['training_epochs'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['DL']:
print "direct deep learning"
# direct deep learning
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if 0:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_epochs_for_reduced = cal_epochs(1500, pretraining_X_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs_for_reduced,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S']:
# deep learning using split network
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = cal_epochs(settings['training_epochs'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
report_name = filename + '_' + '_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' +str(training_epochs) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(y_test, test_predicted, with_auc_score), analysis_scr)
# In[9]:
#for 10-fold cross validation
def ten_fold_crossvalid_performance_for_all(ddis):
for ddi in ddis:
try:
process_one_ddi_tenfold(ddi)
except Exception,e:
print str(e)
logger.debug("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
def process_one_ddi_tenfold(ddi):
"""A function to waste CPU cycles"""
logger.info('DDI: %s' % ddi)
try:
one_ddi_family = {}
one_ddi_family[ddi] = Ten_fold_crossvalid_performance_for_one_ddi(ddi)
one_ddi_family[ddi].get_ten_fold_crossvalid_perfermance(settings=settings)
except Exception,e:
print str(e)
logger.debug("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
return None
class Ten_fold_crossvalid_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_ten_fold_crossvalid_perfermance(self, settings = None):
fisher_mode = settings['fisher_mode']
analysis_scr = []
with_auc_score = settings['with_auc_score']
reduce_ratio = settings['reduce_ratio']
#for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
#subset_size = math.floor(self.ddi_obj.total_number_of_sequences / 10.0)
kf = KFold(self.ddi_obj.total_number_of_sequences, n_folds = 10, shuffle = True)
#for subset_no in range(1, 11):
for ((train_index, test_index),subset_no) in izip(kf,range(1,11)):
#for train_index, test_index in kf;
print("Subset:", subset_no)
print("Train index: ", train_index)
print("Test index: ", test_index)
#logger.info('subset number: ' + str(subset_no))
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_ten_fold_crossvalid_one_subset(train_index, test_index, fisher_mode = fisher_mode, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
if settings['SVM']:
print "SVM"
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_POLY']:
print "SVM_POLY"
L1_SVC_POLY_Selector = SVC(C=1, kernel='poly').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_POLY_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_POLY_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
min_max_scaler = Preprocessing_Scaler_with_mean_point5()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = settings['training_epochs']
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
#### new prepresentation
x = X_train_pre_validation_minmax
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(X_train_pre_validation_minmax)
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax)
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_A)
new_x_train_scaled = standard_scaler.transform(new_x_train_minmax_A)
new_x_test_scaled = standard_scaler.transform(new_x_test_minmax_A)
new_x_train_combo = np.hstack((scaled_train_X, new_x_train_scaled))
new_x_test_combo = np.hstack((scaled_test_X, new_x_test_scaled))
if settings['SAE_SVM']:
print 'SAE followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF']:
print 'SAE followed by SVM RBF'
x = X_train_pre_validation_minmax
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_COMBO']:
print 'SAE followed by SVM with combo feature'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_combo, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_combo)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_COMBO', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_combo)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_COMBO', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF_COMBO']:
print 'SAE followed by SVM RBF with combo feature'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_combo, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_combo)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF_COMBO', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_combo)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF_COMBO', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['DL']:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if settings['DL_U']:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_X_minmax = min_max_scaler.transform(train_X_10fold)
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S']:
# deep learning using split network
y_test = test_y
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
report_name = filename + '_' + '_test10fold_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' + str(training_epochs) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(test_y, predicted_test_y, with_auc_score), analysis_scr)
# In[10]:
#LOO_out_performance_for_all(ddis)
#LOO_out_performance_for_all(ddis)
from multiprocessing import Pool
pool = Pool(8)
pool.map(process_one_ddi_tenfold, ddis[:])
pool.close()
pool.join()
# In[25]:
x = logging._handlers.copy()
for i in x:
log.removeHandler(i)
i.flush()
i.close()
| gpl-2.0 |
nlproc/splunkml | bin/mcpredict.py | 1 | 2357 | #!env python
import os
import sys
sys.path.append(
os.path.join(
os.environ.get( "SPLUNK_HOME", "/opt/splunk/6.1.3" ),
"etc/apps/framework/contrib/splunk-sdk-python/1.3.0",
)
)
from collections import Counter, OrderedDict
from math import log
from nltk import tokenize
import execnet
import json
from splunklib.searchcommands import Configuration, Option
from splunklib.searchcommands import dispatch, validators
from remote_commands import OptionRemoteStreamingCommand, ValidateLocalFile
@Configuration(clear_required_fields=False)
class MCPredict(OptionRemoteStreamingCommand):
model = Option(require=True, validate=ValidateLocalFile(mode='r',extension="pkl",subdir='classifiers',nohandle=True))
code = """
import os, sys, itertools, collections, numbers
try:
import cStringIO as StringIO
except:
import StringIO
import numpy as np
import scipy.sparse as sp
from multiclassify import process_records
from gensim.models import LsiModel, TfidfModel, LdaModel
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.externals import joblib
if __name__ == "__channelexec__":
args = channel.receive()
records = []
for record in channel:
if not record:
break
records.append(record)
if records:
records = np.array(records)
# Try loading existing model
try:
model = joblib.load(args['model'])
encoder = model['encoder']
est = model['est']
target = model['target']
fields = model['fields']
if model.get('text'):
if model['text'] == 'lsi':
textmodel = LsiModel.load(args['model'].replace(".pkl",".%s" % model['text']))
elif model['text'] == 'tfidf':
textmodel = TfidfModel.load(args['model'].replace(".pkl",".%s" % model['text']))
else:
textmodel = model['text']
except Exception as e:
print >> sys.stderr, "ERROR", e
channel.send({ 'error': "Couldn't find model %s" % args['model']})
else:
X, y_labels, textmodel = process_records(records, fields, target, textmodel=textmodel)
print >> sys.stderr, X.shape
y = est.predict(X)
y_labels = encoder.inverse_transform(y)
for i, record in enumerate(records):
record['%s_predicted' % target] = y_labels.item(i)
channel.send(record)
"""
def __dir__(self):
return ['model']
dispatch(MCPredict, sys.argv, sys.stdin, sys.stdout, __name__)
| apache-2.0 |
cloudmesh/ansible-cloudmesh-face | performance/boxplot.py | 1 | 3118 | #! /usr/local/bin/python
#
# ON OSX THERE IS A BUG USING MATPLOTLIB IN VIRTUALENV THUS WE JUST
# USE THE USR LOCAL BASED MATPLOTLIB
#
# DO NOT USE /usr/bin/env python
# ON OSX
"""Usage: boxplot.py --kind=KIND --os=OS --host=HOST
Arguments:
HOST Host name
OS OS name
KIND category or classifier
Options:
-h --help
-o OS
"""
from __future__ import print_function
from docopt import docopt
import glob
from cloudmesh_client.common.hostlist import Parameter
import os.path
import textwrap
import numpy as np
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
from pprint import pprint
import warnings;
with warnings.catch_warnings():
warnings.simplefilter("ignore");
import matplotlib.pyplot as plt
if __name__ == '__main__':
arguments = docopt(__doc__)
print(arguments)
kind = arguments['--kind']
hosts = Parameter.expand(arguments["--host"])
oses = Parameter.expand(arguments["--os"])
print(hosts)
print(oses)
#
# CLEAN
#
for host in hosts:
for osystem in oses:
data = {
"os": osystem,
"host": host,
"kind": kind
}
name = "{os}_{kind}_{host}.csv".format(**data)
if os.path.isfile(name):
os.remove(name)
print("delete", name)
for host in hosts:
for osystem in oses:
data = {
"os": osystem,
"host": host,
"kind": kind
}
data["name"] = "{os}_{kind}_{host}".format(**data)
pattern = "{name}_*.csv".format(**data)
files = glob.glob(pattern)
if files != []:
r = ["real,user,sys\n"]
for f in files:
with open(f) as f:
lines = f.readlines()[1:]
r = r + lines
print("Merge Files -> {name}.csv\n".format(**data))
for f in files:
print(' ', f)
print()
with open("{name}.csv".format(**data), 'w') as f:
f.write(''.join(r))
# columns = DataFrame()
values = []
for osystem in oses:
for host in hosts:
data = {
"os": osystem,
"host": host,
"kind": kind
}
data["name"] = "{os}_{kind}_{host}.csv".format(**data)
if os.path.isfile(data["name"]):
d = pd.read_csv("{name}".format(**data))
for v in d.real:
data["value"] = v
values.append([data['value'], "{os}\n{host}".format(**data)])
pprint(values)
df = DataFrame(values)
df.columns = ['Time in s', 'Host']
print(df)
df.boxplot(column='Time in s',
by='Host',
rot=0)
plt.suptitle('Performance Comparison OpenFace: {}'.format(kind))
pdf = "boxplot-{}.png".format(kind)
plt.savefig(pdf)
plt.close()
os.system("open {}".format(pdf))
| apache-2.0 |
albertaparicio/tfg-voice-conversion | seq2seq_pytorch_main.py | 1 | 38363 | # -*- coding: utf-8 -*-
# TODO Add argparser
"""
Translation with a Sequence to Sequence Network and Attention
*************************************************************
**Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_
In this project we will be teaching a neural network to translate from
French to English.
::
[KEY: > input, = target, < output]
> il est en train de peindre un tableau .
= he is painting a picture .
< he is painting a picture .
> pourquoi ne pas essayer ce vin delicieux ?
= why not try that delicious wine ?
< why not try that delicious wine ?
> elle n est pas poete mais romanciere .
= she is not a poet but a novelist .
< she not not a poet but a novelist .
> vous etes trop maigre .
= you re too skinny .
< you re all alone .
... to varying degrees of success.
This is made possible by the simple but powerful idea of the `sequence
to sequence network <http://arxiv.org/abs/1409.3215>`__, in which two
recurrent neural networks work together to transform one sequence to
another. An encoder network condenses an input sequence into a vector,
and a decoder network unfolds that vector into a new sequence.
.. figure:: /_static/img/seq-seq-images/seq2seq.png
:alt:
To improve upon this model we'll use an `attention
mechanism <https://arxiv.org/abs/1409.0473>`__, which lets the decoder
learn to focus over a specific range of the input sequence.
**Recommended Reading:**
I assume you have at least installed PyTorch, know Python, and
understand Tensors:
- http://pytorch.org/ For installation instructions
- :doc:`/beginner/deep_learning_60min_blitz` to get started with PyTorch in
general
- :doc:`/beginner/pytorch_with_examples` for a wide and deep overview
- :doc:`/beginner/former_torchies_tutorial` if you are former Lua Torch user
It would also be useful to know about Sequence to Sequence networks and
how they work:
- `Learning Phrase Representations using RNN Encoder-Decoder for
Statistical Machine Translation <http://arxiv.org/abs/1406.1078>`__
- `Sequence to Sequence Learning with Neural
Networks <http://arxiv.org/abs/1409.3215>`__
- `Neural Machine Translation by Jointly Learning to Align and
Translate <https://arxiv.org/abs/1409.0473>`__
- `A Neural Conversational Model <http://arxiv.org/abs/1506.05869>`__
You will also find the previous tutorials on
:doc:`/intermediate/char_rnn_classification_tutorial`
and :doc:`/intermediate/char_rnn_generation_tutorial`
helpful as those concepts are very similar to the Encoder and Decoder
models, respectively.
And for more, read the papers that introduced these topics:
- `Learning Phrase Representations using RNN Encoder-Decoder for
Statistical Machine Translation <http://arxiv.org/abs/1406.1078>`__
- `Sequence to Sequence Learning with Neural
Networks <http://arxiv.org/abs/1409.3215>`__
- `Neural Machine Translation by Jointly Learning to Align and
Translate <https://arxiv.org/abs/1409.0473>`__
- `A Neural Conversational Model <http://arxiv.org/abs/1506.05869>`__
**Requirements**
"""
from __future__ import division, print_function, unicode_literals
import argparse
import glob
import gzip
import os
import random
from sys import version_info
import h5py
import numpy as np
import torch
import torch.nn as nn
from ahoproc_tools import error_metrics
from tfglib.seq2seq_normalize import mask_data
from tfglib.utils import init_logger
from torch import optim
from torch.autograd import Variable
from seq2seq_dataloader import DataLoader
from seq2seq_pytorch_model import AttnDecoderRNN, EncoderRNN
use_cuda = torch.cuda.is_available()
# Conditional imports
if version_info.major > 2:
import pickle
else:
import cPickle as pickle
logger, opts = None, None
if __name__ == '__main__':
# logger.debug('Before parsing args')
parser = argparse.ArgumentParser(
description="Convert voice signal with seq2seq model")
parser.add_argument('--train_data_path', type=str,
default="tcstar_data_trim/training/")
parser.add_argument('--train_out_file', type=str,
default="tcstar_data_trim/seq2seq_train_datatable")
parser.add_argument('--test_data_path', type=str,
default="tcstar_data_trim/test/")
parser.add_argument('--test_out_file', type=str,
default="tcstar_data_trim/seq2seq_test_datatable")
parser.add_argument('--val_fraction', type=float, default=0.25)
parser.add_argument('--save-h5', dest='save_h5', action='store_true',
help='Save dataset to .h5 file')
parser.add_argument('--max_seq_length', type=int, default=500)
parser.add_argument('--params_len', type=int, default=44)
# parser.add_argument('--patience', type=int, default=4,
# help="Patience epochs to do validation, if validation "
# "score is worse than train for patience epochs "
# ", quit training. (Def: 4).")
# parser.add_argument('--enc_rnn_layers', type=int, default=1)
# parser.add_argument('--dec_rnn_layers', type=int, default=1)
parser.add_argument('--hidden_size', type=int, default=256)
# parser.add_argument('--cell_type', type=str, default="lstm")
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--epoch', type=int, default=50)
parser.add_argument('--learning_rate', type=float, default=0.0005)
# parser.add_argument('--dropout', type=float, default=0)
parser.add_argument('--teacher_forcing_ratio', type=float, default=1)
parser.add_argument('--SOS_token', type=int, default=0)
# parser.add_argument('--optimizer', type=str, default="adam")
# parser.add_argument('--clip_norm', type=float, default=5)
# parser.add_argument('--attn_length', type=int, default=500)
# parser.add_argument('--attn_size', type=int, default=256)
# parser.add_argument('--save_every', type=int, default=100)
parser.add_argument('--no-train', dest='do_train',
action='store_false', help='Flag to train or not.')
parser.add_argument('--no-test', dest='do_test',
action='store_false', help='Flag to test or not.')
parser.add_argument('--save_path', type=str, default="training_results")
parser.add_argument('--pred_path', type=str, default="torch_predicted")
# parser.add_argument('--tb_path', type=str, default="")
parser.add_argument('--log', type=str, default="INFO")
parser.add_argument('--load_model', dest='load_model', action='store_true',
help='Load previous model before training')
parser.add_argument('--server', dest='server', action='store_true',
help='Commands to be run or not run if we are running '
'on server')
parser.set_defaults(do_train=True, do_test=True, save_h5=False,
server=False) # ,
# load_model=False)
opts = parser.parse_args()
# Initialize logger
logger_level = opts.log
logger = init_logger(name=__name__, level=opts.log)
logger.debug('Parsed arguments')
if not os.path.exists(os.path.join(opts.save_path, 'torch_train')):
os.makedirs(os.path.join(opts.save_path, 'torch_train'))
# save config
with gzip.open(os.path.join(opts.save_path, 'torch_train', 'config.pkl.gz'),
'wb') as cf:
pickle.dump(opts, cf)
def main(args):
logger.debug('Main')
# If-else for training and testing
if args.do_train:
dl = DataLoader(args, logger_level=args.log,
max_seq_length=args.max_seq_length)
encoder1 = EncoderRNN(args.params_len, args.hidden_size, args.batch_size)
attn_decoder1 = AttnDecoderRNN(args.hidden_size, args.params_len,
batch_size=args.batch_size, n_layers=1,
max_length=args.max_seq_length,
dropout_p=0.1)
if use_cuda:
encoder1 = encoder1.cuda()
attn_decoder1 = attn_decoder1.cuda()
trained_encoder, trained_decoder = train_epochs(dl, encoder1, attn_decoder1)
if args.do_test:
# TODO What do we do for testing?
# pass
dl = DataLoader(args, logger_level=args.log, test=True,
max_seq_length=args.max_seq_length)
if args.load_model:
encoder = EncoderRNN(args.params_len, args.hidden_size, args.batch_size)
decoder = AttnDecoderRNN(args.hidden_size, args.params_len,
batch_size=args.batch_size, n_layers=1,
max_length=args.max_seq_length,
dropout_p=0.1)
if use_cuda:
encoder = encoder.cuda()
decoder = decoder.cuda()
else:
encoder = trained_encoder
decoder = trained_decoder
test(encoder, decoder, dl)
######################################################################
# Loading data files
# ==================
#
# The data for this project is a set of many thousands of English to
# French translation pairs.
#
# `This question on Open Data Stack
# Exchange <http://opendata.stackexchange.com/questions/3888/dataset-of
# -sentences-translated-into-many-languages>`__
# pointed me to the open translation site http://tatoeba.org/ which has
# downloads available at http://tatoeba.org/eng/downloads - and better
# yet, someone did the extra work of splitting language pairs into
# individual text files here: http://www.manythings.org/anki/
#
# The English to French pairs are too big to include in the repo, so
# download to ``data/eng-fra.txt`` before continuing. The file is a tab
# separated list of translation pairs:
#
# ::
#
# I am cold. Je suis froid.
#
# .. Note::
# Download the data from
# `here <https://download.pytorch.org/tutorial/data.zip>`_
# and extract it to the current directory.
######################################################################
# Similar to the character encoding used in the character-level RNN
# tutorials, we will be representing each word in a language as a one-hot
# vector, or giant vector of zeros except for a single one (at the index
# of the word). Compared to the dozens of characters that might exist in a
# language, there are many many more words, so the encoding vector is much
# larger. We will however cheat a bit and trim the data to only use a few
# thousand words per language.
#
# .. figure:: /_static/img/seq-seq-images/word-encoding.png
# :alt:
#
#
######################################################################
# We'll need a unique index per word to use as the inputs and targets of
# the networks later. To keep track of all this we will use a helper class
# called ``Lang`` which has word → index (``word2index``) and index → word
# (``index2word``) dictionaries, as well as a count of each word
# ``word2count`` to use to later replace rare words.
#
# EOS_token = 1
#
#
# class Lang:
# def __init__(self, name):
# self.name = name
# self.word2index = {}
# self.word2count = {}
# self.index2word = {0: "SOS", 1: "EOS"}
# self.n_words = 2 # Count SOS and EOS
#
# def add_sentence(self, sentence):
# for word in sentence.split(' '):
# self.add_word(word)
#
# def add_word(self, word):
# if word not in self.word2index:
# self.word2index[word] = self.n_words
# self.word2count[word] = 1
# self.index2word[self.n_words] = word
# self.n_words += 1
# else:
# self.word2count[word] += 1
#
#
# ######################################################################
# # The files are all in Unicode, to simplify we will turn Unicode
# # characters to ASCII, make everything lowercase, and trim most
# # punctuation.
# #
#
# # Turn a Unicode string to plain ASCII, thanks to
# # http://stackoverflow.com/a/518232/2809427
# def unicode_to_ascii(s):
# return ''.join(
# c for c in unicodedata.normalize('NFD', s)
# if unicodedata.category(c) != 'Mn'
# )
#
#
# # Lowercase, trim, and remove non-letter characters
# def normalize_string(s):
# s = unicode_to_ascii(s.lower().strip())
# s = re.sub(r"([.!?])", r" \1", s)
# s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
# return s
#
#
# ######################################################################
# # To read the data file we will split the file into lines, and then split
# # lines into pairs. The files are all English → Other Language, so if we
# # want to translate from Other Language → English I added the ``reverse``
# # flag to reverse the pairs.
# #
#
# def read_langs(lang1, lang2, reverse=False):
# print("Reading lines...")
#
# # Read the file and split into lines
# lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8'). \
# read().strip().split('\n')
#
# # Split every line into pairs and normalize
# pairs = [[normalize_string(s) for s in l.split('\t')] for l in lines]
#
# # Reverse pairs, make Lang instances
# if reverse:
# pairs = [list(reversed(p)) for p in pairs]
# input_lang = Lang(lang2)
# output_lang = Lang(lang1)
# else:
# input_lang = Lang(lang1)
# output_lang = Lang(lang2)
#
# return input_lang, output_lang, pairs
######################################################################
# Since there are a *lot* of example sentences and we want to train
# something quickly, we'll trim the data set to only relatively short and
# simple sentences. Here the maximum length is 10 words (that includes
# ending punctuation) and we're filtering to sentences that translate to
# the form "I am" or "He is" etc. (accounting for apostrophes replaced
# earlier).
#
#
# eng_prefixes = (
# "i am ", "i m ",
# "he is", "he s ",
# "she is", "she s",
# "you are", "you re ",
# "we are", "we re ",
# "they are", "they re "
# )
#
#
# def filter_pair(p):
# return len(p[0].split(' ')) < opts.max_seq_length and \
# len(p[1].split(' ')) < opts.max_seq_length and \
# p[1].startswith(eng_prefixes)
#
#
# def filter_pairs(pairs):
# return [pair for pair in pairs if filter_pair(pair)]
######################################################################
# The full process for preparing the data is:
#
# - Read text file and split into lines, split lines into pairs
# - Normalize text, filter by length and content
# - Make word lists from sentences in pairs
#
#
# def prepare_data(lang1, lang2, reverse=False):
# input_lang, output_lang, pairs = read_langs(lang1, lang2, reverse)
# print("Read %s sentence pairs" % len(pairs))
# pairs = filter_pairs(pairs)
# print("Trimmed to %s sentence pairs" % len(pairs))
# print("Counting words...")
# for pair in pairs:
# input_lang.add_sentence(pair[0])
# output_lang.add_sentence(pair[1])
# print("Counted words:")
# print(input_lang.name, input_lang.n_words)
# print(output_lang.name, output_lang.n_words)
# return input_lang, output_lang, pairs
#
#
# input_lang, output_lang, pairs = prepare_data('eng', 'fra', True)
# print(random.choice(pairs))
######################################################################
# .. note:: There are other forms of attention that work around the length
# limitation by using a relative position approach. Read about "local
# attention" in `Effective Approaches to Attention-based Neural Machine
# Translation <https://arxiv.org/abs/1508.04025>`__.
#
# Training
# ========
#
# Preparing Training Data
# -----------------------
#
# To train, for each pair we will need an input tensor (indexes of the
# words in the input sentence) and target tensor (indexes of the words in
# the target sentence). While creating these vectors we will append the
# EOS token to both sequences.
#
#
# def indexes_from_sentence(lang, sentence):
# return [lang.word2index[word] for word in sentence.split(' ')]
#
#
# def variable_from_sentence(lang, sentence):
# indexes = indexes_from_sentence(lang, sentence)
# indexes.append(EOS_token)
# result = Variable(torch.LongTensor(indexes).view(-1, 1))
# if use_cuda:
# return result.cuda()
# else:
# return result
#
#
# def variables_from_pair(pair):
# input_variable = variable_from_sentence(input_lang, pair[0])
# target_variable = variable_from_sentence(output_lang, pair[1])
# return input_variable, target_variable
######################################################################
# Training the Model
# ------------------
#
# To train we run the input sentence through the encoder, and keep track
# of every output and the latest hidden state. Then the decoder is given
# the ``<SOS>`` token as its first input, and the last hidden state of the
# decoder as its first hidden state.
#
# "Teacher forcing" is the concept of using the real target outputs as
# each next input, instead of using the decoder's guess as the next input.
# Using teacher forcing causes it to converge faster but `when the trained
# network is exploited, it may exhibit
# instability <http://minds.jacobs-university.de/sites/default/files/uploads
# /papers/ESNTutorialRev.pdf>`__.
#
# You can observe outputs of teacher-forced networks that read with
# coherent grammar but wander far from the correct translation -
# intuitively it has learned to represent the output grammar and can "pick
# up" the meaning once the teacher tells it the first few words, but it
# has not properly learned how to create the sentence from the translation
# in the first place.
#
# Because of the freedom PyTorch's autograd gives us, we can randomly
# choose to use teacher forcing or not with a simple if statement. Turn
# ``teacher_forcing_ratio`` up to use more of it.
#
def train(input_variable, target_variable, encoder, decoder,
encoder_optimizer,
decoder_optimizer, criterion, max_length):
encoder_hidden = encoder.init_hidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(
input_variable[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0][0]
# decoder_input = Variable(torch.LongTensor([[opts.SOS_token]]))
decoder_input = Variable(torch.zeros(opts.batch_size, opts.params_len))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < opts.teacher_forcing_ratio \
else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
loss += criterion(decoder_output, target_variable[di])
decoder_input = target_variable[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
loss += criterion(decoder_output[0], target_variable[di])
# if ni == EOS_token:
# break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.data[0] / target_length
######################################################################
# This is a helper function to print time elapsed and estimated time
# remaining given the current time and progress %.
#
import time
import math
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since, percent):
now = time.time()
s = now - since
es = s / percent
rs = es - s
return '%s (ETA: %s)' % (as_minutes(s), as_minutes(rs))
######################################################################
# The whole training process looks like this:
#
# - Start a timer
# - Initialize optimizers and criterion
# - Create set of training pairs
# - Start empty losses array for plotting
#
# Then we call ``train`` many times and occasionally print the progress (%
# of epochs, time so far, estimated time) and average loss.
#
def train_epochs(dataloader, encoder, decoder):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
batch_idx = 0
total_batch_idx = 0
curr_epoch = 0
b_epoch = dataloader.train_batches_per_epoch
# encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
# decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
encoder_optimizer = optim.Adam(encoder.parameters(), lr=opts.learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=opts.learning_rate)
# training_pairs = [variables_from_pair(random.choice(pairs))
# for _ in range(n_epochs)]
# criterion = nn.NLLLoss()
criterion = nn.MSELoss()
# Train on dataset batches
for src_batch, src_batch_seq_len, trg_batch, trg_mask in \
dataloader.next_batch():
if curr_epoch == 0 and batch_idx == 0:
logger.info(
'Batches per epoch: {}'.format(b_epoch))
logger.info(
'Total batches: {}'.format(b_epoch * opts.epoch))
# beg_t = timeit.default_timer()
# for epoch in range(1, n_epochs + 1):
# training_pair = training_pairs[epoch - 1]
# input_variable = training_pair[0]
# target_variable = training_pair[1]
# Transpose data to be shaped (max_seq_length, num_sequences, params_len)
input_variable = Variable(
torch.from_numpy(src_batch[:, :, 0:44]).float()
).transpose(1, 0).contiguous()
target_variable = Variable(
torch.from_numpy(trg_batch).float()
).transpose(1, 0).contiguous()
input_variable = input_variable.cuda() if use_cuda else input_variable
target_variable = target_variable.cuda() if use_cuda else target_variable
loss = train(input_variable, target_variable, encoder, decoder,
encoder_optimizer, decoder_optimizer, criterion,
opts.max_seq_length)
print_loss_total += loss
# plot_loss_total += loss
print_loss_avg = print_loss_total / (total_batch_idx + 1)
plot_losses.append(print_loss_avg)
logger.info(
'Batch {:2.0f}/{:2.0f} - Epoch {:2.0f}/{:2.0f} ({:3.2%}) - Loss={'
':.8f} - Time: {'
'!s}'.format(
batch_idx,
b_epoch,
curr_epoch + 1,
opts.epoch,
((batch_idx % b_epoch) + 1) / b_epoch,
print_loss_avg,
time_since(start,
(total_batch_idx + 1) / (b_epoch * opts.epoch))))
if batch_idx >= b_epoch:
curr_epoch += 1
batch_idx = 0
print_loss_total = 0
# Save model
# Instructions for saving and loading a model:
# http://pytorch.org/docs/notes/serialization.html
# with gzip.open(
enc_file = os.path.join(opts.save_path, 'torch_train',
'encoder_{}.pkl'.format(
curr_epoch)) # , 'wb') as enc:
torch.save(encoder.state_dict(), enc_file)
# with gzip.open(
dec_file = os.path.join(opts.save_path, 'torch_train',
'decoder_{}.pkl'.format(
curr_epoch)) # , 'wb') as dec:
torch.save(decoder.state_dict(), dec_file)
# TODO Validation?
batch_idx += 1
total_batch_idx += 1
if curr_epoch >= opts.epoch:
logger.info('Finished epochs -> BREAK')
break
if not opts.server:
show_plot(plot_losses)
else:
save_path = os.path.join(opts.save_path, 'torch_train', 'graphs')
if not os.path.exists(save_path):
os.makedirs(save_path)
np.savetxt(os.path.join(save_path, 'train_losses' + '.csv'), plot_losses)
return encoder, decoder
######################################################################
# Plotting results
# ----------------
#
# Plotting is done with matplotlib, using the array of loss values
# ``plot_losses`` saved while training.
#
if not opts.server:
import matplotlib
matplotlib.use('TKagg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def show_plot(points, filename='train_loss'):
if not os.path.exists(
os.path.join(opts.save_path, 'torch_train', 'graphs')):
os.makedirs(os.path.join(opts.save_path, 'torch_train', 'graphs'))
plt.figure()
# fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
# loc = ticker.MultipleLocator(base=0.2)
# ax.yaxis.set_major_locator(loc)
plt.plot(points)
plt.grid(b=True)
plt.savefig(
os.path.join(opts.save_path, 'torch_train', 'graphs',
filename + '.eps'),
bbox_inches='tight')
######################################################################
# Evaluation
# ==========
#
# Evaluation is mostly the same as training, but there are no targets so
# we simply feed the decoder's predictions back to itself for each step.
# Every time it predicts a word we add it to the output string, and if it
# predicts the EOS token we stop there. We also store the decoder's
# attention outputs for display later.
#
def test(encoder, decoder, dl):
if opts.load_model:
# Get filenames of last epoch files
enc_file = sorted(glob.glob(
os.path.join(opts.save_path, 'torch_train', 'encoder*.pkl')))[-1]
dec_file = sorted(glob.glob(
os.path.join(opts.save_path, 'torch_train', 'decoder*.pkl')))[-1]
# Open model files and load
# with gzip.open(enc_file, 'r') as enc:
# enc_f = pickle.load(enc)
encoder.load_state_dict(torch.load(enc_file))
#
# with gzip.open(dec_file, 'wb') as dec:
decoder.load_state_dict(torch.load(dec_file))
batch_idx = 0
n_batch = 0
attentions = []
for (src_batch_padded, src_batch_seq_len, trg_batch, trg_mask) in dl.next_batch(
test=True):
src_batch = []
# Take the last `seq_len` timesteps of each sequence to remove padding
for i in range(src_batch_padded.shape[0]):
src_batch.append(src_batch_padded[i,-src_batch_seq_len[i]:,:])
# TODO Get filename from datatable
f_name = format(n_batch + 1,
'0' + str(max(5, len(str(dl.src_test_data.shape[0])))))
input_variable = Variable(
torch.from_numpy(src_batch[:, :, 0:44]).float()
).transpose(1, 0).contiguous()
input_variable = input_variable.cuda() if use_cuda else input_variable
input_length = input_variable.size()[0]
encoder_hidden = encoder.init_hidden()
encoder_outputs = Variable(
torch.zeros(opts.max_seq_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei],
encoder_hidden)
encoder_outputs[ei] = encoder_outputs[ei] + encoder_output[0][0]
# decoder_input = Variable(torch.LongTensor([[SOS_token]])) # SOS
decoder_input = Variable(torch.zeros(opts.batch_size, opts.params_len))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
decoder_hidden = encoder_hidden
decoded_frames = []
decoder_attentions = torch.zeros(opts.max_seq_length, opts.batch_size,
opts.max_seq_length)
for di in range(opts.max_seq_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
# topv, topi = decoder_output.data.topk(1)
# ni = topi[0][0]
# if ni == EOS_token:
# decoded_frames.append('<EOS>')
# break
# else:
decoded_frames.append(decoder_output.data.cpu().numpy())
# decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_output
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
# Decode output frames
predictions = np.array(decoded_frames).transpose((1, 0, 2))
attentions.append(decoder_attentions[:di + 1].numpy().transpose((1, 0, 2)))
# TODO Decode speech data and display attentions
# Save original U/V flags to save them to file
raw_uv_flags = predictions[:, :, 42]
# Unscale target and predicted parameters
for i in range(predictions.shape[0]):
src_spk_index = int(src_batch[i, 0, 44])
trg_spk_index = int(src_batch[i, 0, 45])
# Prepare filename
# Get speakers names
src_spk_name = dl.s2s_datatable.src_speakers[src_spk_index]
trg_spk_name = dl.s2s_datatable.trg_speakers[trg_spk_index]
# Make sure the save directory exists
tf_pred_path = os.path.join(opts.test_data_path, opts.pred_path)
if not os.path.exists(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name)):
os.makedirs(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name))
# # TODO Get filename from datatable
# f_name = format(i + 1, '0' + str(
# max(5, len(str(dl.src_test_data.shape[0])))))
with h5py.File(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name,
f_name + '_' + str(i) + '.h5'), 'w') as file:
file.create_dataset('predictions', data=predictions[i],
compression="gzip",
compression_opts=9)
file.create_dataset('target', data=trg_batch[i],
compression="gzip",
compression_opts=9)
file.create_dataset('mask', data=trg_mask[i],
compression="gzip",
compression_opts=9)
trg_spk_max = dl.train_trg_speakers_max[trg_spk_index, :]
trg_spk_min = dl.train_trg_speakers_min[trg_spk_index, :]
trg_batch[i, :, 0:42] = (trg_batch[i, :, 0:42] * (
trg_spk_max - trg_spk_min)) + trg_spk_min
predictions[i, :, 0:42] = (predictions[i, :, 0:42] * (
trg_spk_min - trg_spk_min)) + trg_spk_min
# Round U/V flags
predictions[i, :, 42] = np.round(predictions[i, :, 42])
# Remove padding in prediction and target parameters
masked_trg = mask_data(trg_batch[i], trg_mask[i])
trg_batch[i] = np.ma.filled(masked_trg, fill_value=0.0)
unmasked_trg = np.ma.compress_rows(masked_trg)
masked_pred = mask_data(predictions[i], trg_mask[i])
predictions[i] = np.ma.filled(masked_pred, fill_value=0.0)
unmasked_prd = np.ma.compress_rows(masked_pred)
# # Apply U/V flag to lf0 and mvf params
# unmasked_prd[:, 40][unmasked_prd[:, 42] == 0] = -1e10
# unmasked_prd[:, 41][unmasked_prd[:, 42] == 0] = 1000
# Apply ground truth flags to prediction
unmasked_prd[:, 40][unmasked_trg[:, 42] == 0] = -1e10
unmasked_prd[:, 41][unmasked_trg[:, 42] == 0] = 1000
file.create_dataset('unmasked_prd', data=unmasked_prd,
compression="gzip",
compression_opts=9)
file.create_dataset('unmasked_trg', data=unmasked_trg,
compression="gzip",
compression_opts=9)
file.create_dataset('trg_max', data=trg_spk_max,
compression="gzip",
compression_opts=9)
file.create_dataset('trg_min', data=trg_spk_min,
compression="gzip",
compression_opts=9)
file.close()
# Save predictions to files
np.savetxt(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name,
f_name + '_' + str(i) + '.vf.dat'),
unmasked_prd[:, 41]
)
np.savetxt(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name,
f_name + '_' + str(i) + '.lf0.dat'),
unmasked_prd[:, 40]
)
np.savetxt(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name,
f_name + '_' + str(i) + '.mcp.dat'),
unmasked_prd[:, 0:40],
delimiter='\t'
)
np.savetxt(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name,
f_name + '_' + str(i) + '.uv.dat'),
raw_uv_flags[i, :]
)
# Display metrics
print('Num - {}'.format(n_batch))
print('MCD = {} dB'.format(
error_metrics.MCD(unmasked_trg[:, 0:40].reshape(-1, 40),
unmasked_prd[:, 0:40].reshape(-1, 40))))
acc, _, _, _ = error_metrics.AFPR(unmasked_trg[:, 42].reshape(-1, 1),
unmasked_prd[:, 42].reshape(-1, 1))
print('U/V accuracy = {}'.format(acc))
pitch_rmse = error_metrics.RMSE(
np.exp(unmasked_trg[:, 40].reshape(-1, 1)),
np.exp(unmasked_prd[:, 40].reshape(-1, 1)))
print('Pitch RMSE = {}'.format(pitch_rmse))
# Increase batch index
if batch_idx >= dl.test_batches_per_epoch:
break
batch_idx += 1
n_batch += 1
# Dump attentions to pickle file
logger.info('Saving attentions to pickle file')
with gzip.open(
os.path.join(opts.save_path, 'torch_train', 'attentions.pkl.gz'),
'wb') as att_file:
pickle.dump(attentions, att_file)
######################################################################
# We can evaluate random sentences from the training set and print out the
# input, target, and output to make some subjective quality judgements:
#
# def evaluate_randomly(encoder, decoder, n=10):
# for i in range(n):
# pair = random.choice(pairs)
# print('>', pair[0])
# print('=', pair[1])
# output_words, attentions = evaluate(encoder, decoder, pair[0])
# output_sentence = ' '.join(output_words)
# print('<', output_sentence)
# print('')
######################################################################
# Training and Evaluating
# =======================
#
# With all these helper functions in place (it looks like extra work, but
# it's easier to run multiple experiments easier) we can actually
# initialize a network and start training.
#
# Remember that the input sentences were heavily filtered. For this small
# dataset we can use relatively small networks of 256 hidden nodes and a
# single GRU layer. After about 40 minutes on a MacBook CPU we'll get some
# reasonable results.
#
# .. Note::
# If you run this notebook you can train, interrupt the kernel,
# evaluate, and continue training later. Comment out the lines where the
# encoder and decoder are initialized and run ``trainEpochs`` again.
#
######################################################################
#
# evaluate_randomly(encoder1, attn_decoder1)
######################################################################
# Visualizing Attention
# ---------------------
#
# A useful property of the attention mechanism is its highly interpretable
# outputs. Because it is used to weight specific encoder outputs of the
# input sequence, we can imagine looking where the network is focused most
# at each time step.
#
# You could simply run ``plt.matshow(attentions)`` to see attention output
# displayed as a matrix, with the columns being input steps and rows being
# output steps:
#
#
# output_words, attentions = evaluate(
# encoder1, attn_decoder1, "je suis trop froid .")
# plt.matshow(attentions.numpy())
######################################################################
# For a better viewing experience we will do the extra work of adding axes
# and labels:
#
def show_attention():
# Load attentions
logger.info('Loading attentions to pickle file')
with gzip.open(
os.path.join(opts.save_path, 'torch_train', 'attentions.pkl.gz'),
'r') as att_file:
attentions = pickle.load(att_file)
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# # Set up axes
# ax.set_xticklabels([''] + input_sentence.split(' ') +
# ['<EOS>'], rotation=90)
# ax.set_yticklabels([''] + output_words)
#
# # Show label at every tick
# ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
# ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
# def evaluate_and_show_attention(input_sentence):
# output_words, attentions = evaluate(
# encoder1, attn_decoder1, input_sentence)
# print('input =', input_sentence)
# print('output =', ' '.join(output_words))
# show_attention(input_sentence, output_words, attentions)
#
#
# evaluate_and_show_attention("elle a cinq ans de moins que moi .")
#
# evaluate_and_show_attention("elle est trop petit .")
#
# evaluate_and_show_attention("je ne crains pas de mourir .")
#
# evaluate_and_show_attention("c est un jeune directeur plein de talent .")
######################################################################
# Exercises
# =========
#
# - Try with a different dataset
#
# - Another language pair
# - Human → Machine (e.g. IOT commands)
# - Chat → Response
# - Question → Answer
#
# - Replace the embedding pre-trained word embeddings such as word2vec or
# GloVe
# - Try with more layers, more hidden units, and more sentences. Compare
# the training time and results.
# - If you use a translation file where pairs have two of the same phrase
# (``I am test \t I am test``), you can use this as an autoencoder. Try
# this:
#
# - Train as an autoencoder
# - Save only the Encoder network
# - Train a new Decoder for translation from there
#
if __name__ == '__main__':
logger.debug('Before calling main')
main(opts)
| gpl-3.0 |
sanketloke/scikit-learn | examples/neural_networks/plot_mlp_training_curves.py | 56 | 3596 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'adam'}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
| bsd-3-clause |
IshankGulati/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
DamiPayne/Feature-Agglomeration-Clustering | AGG Cluster.py | 1 | 2209 | import collections
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.cluster import FeatureAgglomeration
from sklearn.feature_extraction.text import TfidfVectorizer
from pprint import pprint
import csv
import pandas
def word_tokenizer(text):
#tokenizes and stems the text
tokens = word_tokenize(text)
stemmer = PorterStemmer()
tokens = [stemmer.stem(t) for t in tokens if t not in stopwords.words('english')]
return tokens
def cluster_sentences(sentences, nb_of_clusters=5):
tfidf_vectorizer = TfidfVectorizer(tokenizer=word_tokenizer,
stop_words=stopwords.words('english'),
max_df=0.9,
min_df=0.05,
lowercase=True)
#builds a tf-idf matrix for the sentences
tfidf_matrix_1 = tfidf_vectorizer.fit_transform(sentences)
tfidf_matrix = tfidf_matrix_1.todense()
kmeans = FeatureAgglomeration(n_clusters=nb_of_clusters)
kmeans.fit(tfidf_matrix)
clusters = collections.defaultdict(list)
for i, label in enumerate(kmeans.labels_):
clusters[label].append(i)
return dict(clusters)
import csv
with open(r"PATH" ) as f: #add the path to the CSV file
reader = csv.reader(f)
Pre_sentence = list(reader)
flatten = lambda l: [item for sublist in l for item in sublist]
sentences = flatten(Pre_sentence)
with open(r'Path') as g: #enables comparision to pre-labeled data-set
reader_cat = csv.reader(g)
Pre_Cat = list(reader_cat)
Cats = flatten(Pre_Cat)
if __name__ == "__main__":
#Example data set, uncomment to use
# sentences = ["Nature is beautiful","I like green apples",
# "We should protect the trees","Fruit trees provide fruits",
# "Green apples are tasty","My name is Dami"]
nclusters = 19
clusters = cluster_sentences(sentences, nclusters)
for cluster in range(nclusters):
print ("Grouped Engagements ",cluster,":")
for i,sentence in enumerate(clusters[cluster]):
print ("\tEngagement ", Cats[sentence],": ", sentences[sentence])
| mit |
B3AU/waveTree | examples/decomposition/plot_kernel_pca.py | 8 | 1970 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import pylab as pl
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
pl.figure()
pl.subplot(2, 2, 1, aspect='equal')
pl.title("Original space")
reds = y == 0
blues = y == 1
pl.plot(X[reds, 0], X[reds, 1], "ro")
pl.plot(X[blues, 0], X[blues, 1], "bo")
pl.xlabel("$x_1$")
pl.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
pl.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
pl.subplot(2, 2, 2, aspect='equal')
pl.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
pl.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
pl.title("Projection by PCA")
pl.xlabel("1st principal component")
pl.ylabel("2nd component")
pl.subplot(2, 2, 3, aspect='equal')
pl.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
pl.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
pl.title("Projection by KPCA")
pl.xlabel("1st principal component in space induced by $\phi$")
pl.ylabel("2nd component")
pl.subplot(2, 2, 4, aspect='equal')
pl.plot(X_back[reds, 0], X_back[reds, 1], "ro")
pl.plot(X_back[blues, 0], X_back[blues, 1], "bo")
pl.title("Original space after inverse transform")
pl.xlabel("$x_1$")
pl.ylabel("$x_2$")
pl.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
pl.show()
| bsd-3-clause |
jerli/sympy | sympy/physics/quantum/state.py | 58 | 29186 | """Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import u, range
from sympy.printing.pretty.stringpict import stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u("\N{MATHEMATICAL LEFT ANGLE BRACKET}")
_rbracket_ucode = u("\N{MATHEMATICAL RIGHT ANGLE BRACKET}")
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u("\N{LIGHT VERTICAL BAR}")
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}'), \
u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}'), \
u('\N{BOX DRAWINGS LIGHT VERTICAL}')
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in set([_lbracket, _lbracket_ucode]):
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in set([_rbracket, _rbracket_ucode]):
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in set([_straight_bracket, _straight_bracket_ucode]):
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
| bsd-3-clause |
vivekmishra1991/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
satishgoda/bokeh | bokeh/mplexporter/exporter.py | 11 | 12080 | """
Matplotlib Exporter
===================
This submodule contains tools for crawling a matplotlib figure and exporting
relevant pieces to a renderer.
"""
from __future__ import absolute_import
import warnings
import io
from . import utils
import matplotlib
from matplotlib import transforms
class Exporter(object):
"""Matplotlib Exporter
Parameters
----------
renderer : Renderer object
The renderer object called by the exporter to create a figure
visualization. See mplexporter.Renderer for information on the
methods which should be defined within the renderer.
close_mpl : bool
If True (default), close the matplotlib figure as it is rendered. This
is useful for when the exporter is used within the notebook, or with
an interactive matplotlib backend.
"""
def __init__(self, renderer, close_mpl=True):
self.close_mpl = close_mpl
self.renderer = renderer
def run(self, fig):
"""
Run the exporter on the given figure
Parmeters
---------
fig : matplotlib.Figure instance
The figure to export
"""
# Calling savefig executes the draw() command, putting elements
# in the correct place.
fig.savefig(io.BytesIO(), format='png', dpi=fig.dpi)
if self.close_mpl:
import matplotlib.pyplot as plt
plt.close(fig)
self.crawl_fig(fig)
@staticmethod
def process_transform(transform, ax=None, data=None, return_trans=False,
force_trans=None):
"""Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
"""
if isinstance(transform, transforms.BlendedGenericTransform):
warnings.warn("Blended transforms not yet supported. "
"Zoom behavior may not work as expected.")
if force_trans is not None:
if data is not None:
data = (transform - force_trans).transform(data)
transform = force_trans
code = "display"
if ax is not None:
for (c, trans) in [("data", ax.transData),
("axes", ax.transAxes),
("figure", ax.figure.transFigure),
("display", transforms.IdentityTransform())]:
if transform.contains_branch(trans):
code, transform = (c, transform - trans)
break
if data is not None:
if return_trans:
return code, transform.transform(data), transform
else:
return code, transform.transform(data)
else:
if return_trans:
return code, transform
else:
return code
def crawl_fig(self, fig):
"""Crawl the figure and process all axes"""
with self.renderer.draw_figure(fig=fig,
props=utils.get_figure_properties(fig)):
for ax in fig.axes:
self.crawl_ax(ax)
def crawl_ax(self, ax):
"""Crawl the axes and process all elements within"""
with self.renderer.draw_axes(ax=ax,
props=utils.get_axes_properties(ax)):
for line in ax.lines:
self.draw_line(ax, line)
for text in ax.texts:
self.draw_text(ax, text)
for (text, ttp) in zip([ax.xaxis.label, ax.yaxis.label, ax.title],
["xlabel", "ylabel", "title"]):
if(hasattr(text, 'get_text') and text.get_text()):
self.draw_text(ax, text, force_trans=ax.transAxes,
text_type=ttp)
for artist in ax.artists:
# TODO: process other artists
if isinstance(artist, matplotlib.text.Text):
self.draw_text(ax, artist)
for patch in ax.patches:
self.draw_patch(ax, patch)
for collection in ax.collections:
self.draw_collection(ax, collection)
for image in ax.images:
self.draw_image(ax, image)
legend = ax.get_legend()
if legend is not None:
props = utils.get_legend_properties(ax, legend)
with self.renderer.draw_legend(legend=legend, props=props):
if props['visible']:
self.crawl_legend(ax, legend)
def crawl_legend(self, ax, legend):
"""
Recursively look through objects in legend children
"""
legendElements = list(utils.iter_all_children(legend._legend_box,
skipContainers=True))
legendElements.append(legend.legendPatch)
for child in legendElements:
# force a large zorder so it appears on top
child.set_zorder(1E6 + child.get_zorder())
try:
# What kind of object...
if isinstance(child, matplotlib.patches.Patch):
self.draw_patch(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.text.Text):
if not (child is legend.get_children()[-1]
and child.get_text() == 'None'):
self.draw_text(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.lines.Line2D):
self.draw_line(ax, child, force_trans=ax.transAxes)
else:
warnings.warn("Legend element %s not impemented" % child)
except NotImplementedError:
warnings.warn("Legend element %s not impemented" % child)
def draw_line(self, ax, line, force_trans=None):
"""Process a matplotlib line and call renderer.draw_line"""
coordinates, data = self.process_transform(line.get_transform(),
ax, line.get_xydata(),
force_trans=force_trans)
linestyle = utils.get_line_style(line)
if linestyle['dasharray'] in ['None', 'none', None]:
linestyle = None
markerstyle = utils.get_marker_style(line)
if (markerstyle['marker'] in ['None', 'none', None]
or markerstyle['markerpath'][0].size == 0):
markerstyle = None
label = line.get_label()
if markerstyle or linestyle:
self.renderer.draw_marked_line(data=data, coordinates=coordinates,
linestyle=linestyle,
markerstyle=markerstyle,
label=label,
mplobj=line)
def draw_text(self, ax, text, force_trans=None, text_type=None):
"""Process a matplotlib text object and call renderer.draw_text"""
content = text.get_text()
if content:
transform = text.get_transform()
position = text.get_position()
coords, position = self.process_transform(transform, ax,
position,
force_trans=force_trans)
style = utils.get_text_style(text)
self.renderer.draw_text(text=content, position=position,
coordinates=coords,
text_type=text_type,
style=style, mplobj=text)
def draw_patch(self, ax, patch, force_trans=None):
"""Process a matplotlib patch object and call renderer.draw_path"""
vertices, pathcodes = utils.SVG_path(patch.get_path())
transform = patch.get_transform()
coordinates, vertices = self.process_transform(transform,
ax, vertices,
force_trans=force_trans)
linestyle = utils.get_path_style(patch, fill=patch.get_fill())
self.renderer.draw_path(data=vertices,
coordinates=coordinates,
pathcodes=pathcodes,
style=linestyle,
mplobj=patch)
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
processed_paths = [utils.SVG_path(path) for path in paths]
path_coords, tr = self.process_transform(
transform, ax, return_trans=True, force_trans=force_pathtrans)
processed_paths = [(tr.transform(path[0]), path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection)
def draw_image(self, ax, image):
"""Process a matplotlib image object and call renderer.draw_image"""
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image)
| bsd-3-clause |
gregcaporaso/scikit-bio | skbio/io/format/ordination.py | 7 | 14424 | r"""
Ordination results format (:mod:`skbio.io.format.ordination`)
=============================================================
.. currentmodule:: skbio.io.format.ordination
The ordination results file format (``ordination``) stores the results of an
ordination method in a human-readable, text-based format. The format supports
storing the results of various ordination methods available in scikit-bio,
including (but not necessarily limited to) PCoA, CA, RDA, and CCA.
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.stats.ordination.OrdinationResults` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
The format is text-based, consisting of six attributes that describe the
ordination results:
- ``Eigvals``: 1-D
- ``Proportion explained``: 1-D
- ``Species``: 2-D
- ``Site``: 2-D
- ``Biplot``: 2-D
- ``Site constraints``: 2-D
The attributes in the file *must* be in this order.
Each attribute is defined in its own section of the file, where sections are
separated by a blank (or whitespace-only) line. Each attribute begins with a
header line, which contains the attribute's name (as listed above), followed by
a tab character, followed by one or more tab-separated dimensions (integers)
that describe the shape of the attribute's data.
The attribute's data follows its header line, and is stored in tab-separated
format. ``Species``, ``Site``, and ``Site constraints`` store species and site
IDs, respectively, as the first column, followed by the 2-D data array.
An example of this file format might look like::
Eigvals<tab>4
0.36<tab>0.18<tab>0.07<tab>0.08
Proportion explained<tab>4
0.46<tab>0.23<tab>0.10<tab>0.10
Species<tab>9<tab>4
Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00
Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14
Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10
Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22
Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22
Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38
Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43
Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05
Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69
Site<tab>10<tab>4
Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24
Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69
Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11
Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66
Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61
Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28
Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42
Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00
Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17
Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28
Biplot<tab>3<tab>3
-0.16<tab>0.63<tab>0.76
-0.99<tab>0.06<tab>-0.04
0.18<tab>-0.97<tab>0.03
Site constraints<tab>10<tab>4
Site0<tab>0.69<tab>-3.08<tab>-0.32<tab>-1.24
Site1<tab>0.66<tab>-3.06<tab>0.23<tab>2.69
Site2<tab>0.63<tab>-3.04<tab>0.78<tab>-3.11
Site3<tab>1.10<tab>0.50<tab>-1.55<tab>0.66
Site4<tab>-0.97<tab>0.06<tab>-1.12<tab>-0.61
Site5<tab>1.05<tab>0.53<tab>-0.43<tab>0.28
Site6<tab>-1.02<tab>0.10<tab>-0.00<tab>-0.42
Site7<tab>0.99<tab>0.57<tab>0.67<tab>-0.00
Site8<tab>-1.08<tab>0.13<tab>1.11<tab>1.17
Site9<tab>0.94<tab>0.61<tab>1.79<tab>-1.28
If a given result attribute is not present (e.g. ``Biplot``), it should still
be defined and declare its dimensions as 0. For example::
Biplot<tab>0<tab>0
All attributes are optional except for ``Eigvals``.
Examples
--------
Assume we have the following tab-delimited text file storing the
ordination results in ``ordination`` format::
Eigvals<tab>4
0.36<tab>0.18<tab>0.07<tab>0.08
Proportion explained<tab>4
0.46<tab>0.23<tab>0.10<tab>0.10
Species<tab>9<tab>4
Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00
Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14
Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10
Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22
Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22
Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38
Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43
Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05
Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69
Site<tab>10<tab>4
Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24
Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69
Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11
Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66
Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61
Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28
Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42
Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00
Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17
Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28
Biplot<tab>0<tab>0
Site constraints<tab>0<tab>0
Load the ordination results from the file:
>>> from io import StringIO
>>> from skbio import OrdinationResults
>>> or_f = StringIO(
... "Eigvals\t4\n"
... "0.36\t0.18\t0.07\t0.08\n"
... "\n"
... "Proportion explained\t4\n"
... "0.46\t0.23\t0.10\t0.10\n"
... "\n"
... "Species\t9\t4\n"
... "Species0\t0.11\t0.28\t-0.20\t-0.00\n"
... "Species1\t0.14\t0.30\t0.39\t-0.14\n"
... "Species2\t-1.01\t0.09\t-0.19\t-0.10\n"
... "Species3\t-1.03\t0.10\t0.22\t0.22\n"
... "Species4\t1.05\t0.53\t-0.43\t0.22\n"
... "Species5\t0.99\t0.57\t0.67\t-0.38\n"
... "Species6\t0.25\t-0.17\t-0.20\t0.43\n"
... "Species7\t0.14\t-0.85\t-0.01\t0.05\n"
... "Species8\t0.41\t-0.70\t0.21\t-0.69\n"
... "\n"
... "Site\t10\t4\n"
... "Site0\t0.71\t-3.08\t0.21\t-1.24\n"
... "Site1\t0.58\t-3.00\t-0.94\t2.69\n"
... "Site2\t0.76\t-3.15\t2.13\t-3.11\n"
... "Site3\t1.11\t1.07\t-1.87\t0.66\n"
... "Site4\t-0.97\t-0.06\t-0.69\t-0.61\n"
... "Site5\t1.04\t0.45\t-0.63\t0.28\n"
... "Site6\t-0.95\t-0.08\t0.13\t-0.42\n"
... "Site7\t0.94\t-0.10\t0.52\t-0.00\n"
... "Site8\t-1.14\t0.49\t0.47\t1.17\n"
... "Site9\t1.03\t1.03\t2.74\t-1.28\n"
... "\n"
... "Biplot\t0\t0\n"
... "\n"
... "Site constraints\t0\t0\n")
>>> ord_res = OrdinationResults.read(or_f)
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from skbio.stats.ordination import OrdinationResults
from skbio.io import create_format, OrdinationFormatError
ordination = create_format('ordination')
@ordination.sniffer()
def _ordination_sniffer(fh):
# Smells an ordination file if *all* of the following lines are present
# *from the beginning* of the file:
# - eigvals header (minimally parsed)
# - another line (contents ignored)
# - a whitespace-only line
# - proportion explained header (minimally parsed)
try:
_parse_header(fh, 'Eigvals', 1)
next_line = next(fh, None)
if next_line is not None:
_check_empty_line(fh)
_parse_header(fh, 'Proportion explained', 1)
return True, {}
except OrdinationFormatError:
pass
return False, {}
@ordination.reader(OrdinationResults)
def _ordination_to_ordination_results(fh):
eigvals = _parse_vector_section(fh, 'Eigvals')
if eigvals is None:
raise OrdinationFormatError("At least one eigval must be present.")
_check_empty_line(fh)
prop_expl = _parse_vector_section(fh, 'Proportion explained')
_check_length_against_eigvals(prop_expl, eigvals,
'proportion explained values')
_check_empty_line(fh)
species = _parse_array_section(fh, 'Species')
_check_length_against_eigvals(species, eigvals,
'coordinates per species')
_check_empty_line(fh)
site = _parse_array_section(fh, 'Site')
_check_length_against_eigvals(site, eigvals,
'coordinates per site')
_check_empty_line(fh)
# biplot does not have ids to parse (the other arrays do)
biplot = _parse_array_section(fh, 'Biplot', has_ids=False)
_check_empty_line(fh)
cons = _parse_array_section(fh, 'Site constraints')
if cons is not None and site is not None:
if not np.array_equal(cons.index, site.index):
raise OrdinationFormatError(
"Site constraints ids and site ids must be equal: %s != %s" %
(cons.index, site.index))
return OrdinationResults(
short_method_name='', long_method_name='', eigvals=eigvals,
features=species, samples=site, biplot_scores=biplot,
sample_constraints=cons, proportion_explained=prop_expl)
def _parse_header(fh, header_id, num_dimensions):
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for %s header." % header_id)
header = line.strip().split('\t')
# +1 for the header ID
if len(header) != num_dimensions + 1 or header[0] != header_id:
raise OrdinationFormatError("%s header not found." % header_id)
return header
def _check_empty_line(fh):
"""Check that the next line in `fh` is empty or whitespace-only."""
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for blank line separating "
"sections.")
if line.strip():
raise OrdinationFormatError("Expected an empty line.")
def _check_length_against_eigvals(data, eigvals, label):
if data is not None:
num_vals = data.shape[-1]
num_eigvals = eigvals.shape[-1]
if num_vals != num_eigvals:
raise OrdinationFormatError(
"There should be as many %s as eigvals: %d != %d" %
(label, num_vals, num_eigvals))
def _parse_vector_section(fh, header_id):
header = _parse_header(fh, header_id, 1)
# Parse how many values we are waiting for
num_vals = int(header[1])
if num_vals == 0:
# The ordination method didn't generate the vector, so set it to None
vals = None
else:
# Parse the line with the vector values
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for line containing values "
"for %s section." % header_id)
vals = pd.Series(np.asarray(line.strip().split('\t'),
dtype=np.float64))
if len(vals) != num_vals:
raise OrdinationFormatError(
"Expected %d values in %s section, but found %d." %
(num_vals, header_id, len(vals)))
return vals
def _parse_array_section(fh, header_id, has_ids=True):
"""Parse an array section of `fh` identified by `header_id`."""
# Parse the array header
header = _parse_header(fh, header_id, 2)
# Parse the dimensions of the array
rows = int(header[1])
cols = int(header[2])
ids = None
if rows == 0 and cols == 0:
# The ordination method didn't generate the array data for 'header', so
# set it to None
data = None
elif rows == 0 or cols == 0:
# Both dimensions should be 0 or none of them are zero
raise OrdinationFormatError("One dimension of %s is 0: %d x %d" %
(header_id, rows, cols))
else:
# Parse the data
data = np.empty((rows, cols), dtype=np.float64)
if has_ids:
ids = []
for i in range(rows):
# Parse the next row of data
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for row %d in %s "
"section." % (i + 1, header_id))
vals = line.strip().split('\t')
if has_ids:
ids.append(vals[0])
vals = vals[1:]
if len(vals) != cols:
raise OrdinationFormatError(
"Expected %d values, but found %d in row %d." %
(cols, len(vals), i + 1))
data[i, :] = np.asarray(vals, dtype=np.float64)
data = pd.DataFrame(data, index=ids)
return data
@ordination.writer(OrdinationResults)
def _ordination_results_to_ordination(obj, fh):
_write_vector_section(fh, 'Eigvals', obj.eigvals)
_write_vector_section(fh, 'Proportion explained', obj.proportion_explained)
_write_array_section(fh, 'Species', obj.features)
_write_array_section(fh, 'Site', obj.samples)
_write_array_section(fh, 'Biplot', obj.biplot_scores, has_ids=False)
_write_array_section(fh, 'Site constraints', obj.sample_constraints,
include_section_separator=False)
def _write_vector_section(fh, header_id, vector):
if vector is None:
shape = 0
else:
shape = vector.shape[0]
fh.write("%s\t%d\n" % (header_id, shape))
if vector is not None:
fh.write(_format_vector(vector.values))
fh.write("\n")
def _write_array_section(fh, header_id, data, has_ids=True,
include_section_separator=True):
# write section header
if data is None:
shape = (0, 0)
else:
shape = data.shape
fh.write("%s\t%d\t%d\n" % (header_id, shape[0], shape[1]))
# write section data
if data is not None:
if not has_ids:
for vals in data.values:
fh.write(_format_vector(vals))
else:
for id_, vals in zip(data.index, data.values):
fh.write(_format_vector(vals, id_))
if include_section_separator:
fh.write("\n")
def _format_vector(vector, id_=None):
formatted_vector = '\t'.join(np.asarray(vector, dtype=np.str))
if id_ is None:
return "%s\n" % formatted_vector
else:
return "%s\t%s\n" % (id_, formatted_vector)
| bsd-3-clause |
voxlol/scikit-learn | sklearn/datasets/svmlight_format.py | 114 | 15826 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
kaiserroll14/301finalproject | main/pandas/stats/tests/test_ols.py | 9 | 31424 | """
Unit test suite for OLS and PanelOLS classes
"""
# pylint: disable-msg=W0212
from __future__ import division
from datetime import datetime
from pandas import compat
from distutils.version import LooseVersion
import nose
import numpy as np
from numpy.testing.decorators import slow
from pandas import date_range, bdate_range
from pandas.core.panel import Panel
from pandas import DataFrame, Index, Series, notnull, datetools
from pandas.stats.api import ols
from pandas.stats.ols import _filter_data
from pandas.stats.plm import NonPooledPanelOLS, PanelOLS
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assertRaisesRegexp)
import pandas.util.testing as tm
import pandas.compat as compat
from .common import BaseTest
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm
except ImportError:
_have_statsmodels = False
def _check_repr(obj):
repr(obj)
str(obj)
def _compare_ols_results(model1, model2):
tm.assertIsInstance(model1, type(model2))
if hasattr(model1, '_window_type'):
_compare_moving_ols(model1, model2)
else:
_compare_fullsample_ols(model1, model2)
def _compare_fullsample_ols(model1, model2):
assert_series_equal(model1.beta, model2.beta)
def _compare_moving_ols(model1, model2):
assert_frame_equal(model1.beta, model2.beta)
class TestOLS(BaseTest):
_multiprocess_can_split_ = True
# TODO: Add tests for OLS y predict
# TODO: Right now we just check for consistency between full-sample and
# rolling/expanding results of the panel OLS. We should also cross-check
# with trusted implementations of panel OLS (e.g. R).
# TODO: Add tests for non pooled OLS.
@classmethod
def setUpClass(cls):
super(TestOLS, cls).setUpClass()
try:
import matplotlib as mpl
mpl.use('Agg', warn=False)
except ImportError:
pass
if not _have_statsmodels:
raise nose.SkipTest("no statsmodels")
def testOLSWithDatasets_ccard(self):
self.checkDataSet(sm.datasets.ccard.load(), skip_moving=True)
self.checkDataSet(sm.datasets.cpunish.load(), skip_moving=True)
self.checkDataSet(sm.datasets.longley.load(), skip_moving=True)
self.checkDataSet(sm.datasets.stackloss.load(), skip_moving=True)
@slow
def testOLSWithDatasets_copper(self):
self.checkDataSet(sm.datasets.copper.load())
@slow
def testOLSWithDatasets_scotland(self):
self.checkDataSet(sm.datasets.scotland.load())
# degenerate case fails on some platforms
# self.checkDataSet(datasets.ccard.load(), 39, 49) # one col in X all
# 0s
def testWLS(self):
# WLS centered SS changed (fixed) in 0.5.0
sm_version = sm.version.version
if sm_version < LooseVersion('0.5.0'):
raise nose.SkipTest("WLS centered SS not fixed in statsmodels"
" version {0}".format(sm_version))
X = DataFrame(np.random.randn(30, 4), columns=['A', 'B', 'C', 'D'])
Y = Series(np.random.randn(30))
weights = X.std(1)
self._check_wls(X, Y, weights)
weights.ix[[5, 15]] = np.nan
Y[[2, 21]] = np.nan
self._check_wls(X, Y, weights)
def _check_wls(self, x, y, weights):
result = ols(y=y, x=x, weights=1 / weights)
combined = x.copy()
combined['__y__'] = y
combined['__weights__'] = weights
combined = combined.dropna()
endog = combined.pop('__y__').values
aweights = combined.pop('__weights__').values
exog = sm.add_constant(combined.values, prepend=False)
sm_result = sm.WLS(endog, exog, weights=1 / aweights).fit()
assert_almost_equal(sm_result.params, result._beta_raw)
assert_almost_equal(sm_result.resid, result._resid_raw)
self.checkMovingOLS('rolling', x, y, weights=weights)
self.checkMovingOLS('expanding', x, y, weights=weights)
def checkDataSet(self, dataset, start=None, end=None, skip_moving=False):
exog = dataset.exog[start: end]
endog = dataset.endog[start: end]
x = DataFrame(exog, index=np.arange(exog.shape[0]),
columns=np.arange(exog.shape[1]))
y = Series(endog, index=np.arange(len(endog)))
self.checkOLS(exog, endog, x, y)
if not skip_moving:
self.checkMovingOLS('rolling', x, y)
self.checkMovingOLS('rolling', x, y, nw_lags=0)
self.checkMovingOLS('expanding', x, y, nw_lags=0)
self.checkMovingOLS('rolling', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1, nw_overlap=True)
def checkOLS(self, exog, endog, x, y):
reference = sm.OLS(endog, sm.add_constant(exog, prepend=False)).fit()
result = ols(y=y, x=x)
# check that sparse version is the same
sparse_result = ols(y=y.to_sparse(), x=x.to_sparse())
_compare_ols_results(result, sparse_result)
assert_almost_equal(reference.params, result._beta_raw)
assert_almost_equal(reference.df_model, result._df_model_raw)
assert_almost_equal(reference.df_resid, result._df_resid_raw)
assert_almost_equal(reference.fvalue, result._f_stat_raw[0])
assert_almost_equal(reference.pvalues, result._p_value_raw)
assert_almost_equal(reference.rsquared, result._r2_raw)
assert_almost_equal(reference.rsquared_adj, result._r2_adj_raw)
assert_almost_equal(reference.resid, result._resid_raw)
assert_almost_equal(reference.bse, result._std_err_raw)
assert_almost_equal(reference.tvalues, result._t_stat_raw)
assert_almost_equal(reference.cov_params(), result._var_beta_raw)
assert_almost_equal(reference.fittedvalues, result._y_fitted_raw)
_check_non_raw_results(result)
def checkMovingOLS(self, window_type, x, y, weights=None, **kwds):
window = sm.tools.tools.rank(x.values) * 2
moving = ols(y=y, x=x, weights=weights, window_type=window_type,
window=window, **kwds)
# check that sparse version is the same
sparse_moving = ols(y=y.to_sparse(), x=x.to_sparse(),
weights=weights,
window_type=window_type,
window=window, **kwds)
_compare_ols_results(moving, sparse_moving)
index = moving._index
for n, i in enumerate(moving._valid_indices):
if window_type == 'rolling' and i >= window:
prior_date = index[i - window + 1]
else:
prior_date = index[0]
date = index[i]
x_iter = {}
for k, v in compat.iteritems(x):
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
static = ols(y=y_iter, x=x_iter, weights=weights, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
_check_non_raw_results(moving)
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat', 'p_value',
'r2', 'r2_adj', 'rmse', 'std_err', 't_stat',
'var_beta']
def compare(self, static, moving, event_index=None,
result_index=None):
index = moving._index
# Check resid if we have a time index specified
if event_index is not None:
ref = static._resid_raw[-1]
label = index[event_index]
res = moving.resid[label]
assert_almost_equal(ref, res)
ref = static._y_fitted_raw[-1]
res = moving.y_fitted[label]
assert_almost_equal(ref, res)
# Check y_fitted
for field in self.FIELDS:
attr = '_%s_raw' % field
ref = getattr(static, attr)
res = getattr(moving, attr)
if result_index is not None:
res = res[result_index]
assert_almost_equal(ref, res)
def test_ols_object_dtype(self):
df = DataFrame(np.random.randn(20, 2), dtype=object)
model = ols(y=df[0], x=df[1])
summary = repr(model)
class TestOLSMisc(tm.TestCase):
_multiprocess_can_split_ = True
'''
For test coverage with faux data
'''
@classmethod
def setUpClass(cls):
super(TestOLSMisc, cls).setUpClass()
if not _have_statsmodels:
raise nose.SkipTest("no statsmodels")
def test_f_test(self):
x = tm.makeTimeDataFrame()
y = x.pop('A')
model = ols(y=y, x=x)
hyp = '1*B+1*C+1*D=0'
result = model.f_test(hyp)
hyp = ['1*B=0',
'1*C=0',
'1*D=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
self.assertRaises(Exception, model.f_test, '1*A=0')
def test_r2_no_intercept(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
x_with = x.copy()
x_with['intercept'] = 1.
model1 = ols(y=y, x=x)
model2 = ols(y=y, x=x_with, intercept=False)
assert_series_equal(model1.beta, model2.beta)
# TODO: can we infer whether the intercept is there...
self.assertNotEqual(model1.r2, model2.r2)
# rolling
model1 = ols(y=y, x=x, window=20)
model2 = ols(y=y, x=x_with, window=20, intercept=False)
assert_frame_equal(model1.beta, model2.beta)
self.assertTrue((model1.r2 != model2.r2).all())
def test_summary_many_terms(self):
x = DataFrame(np.random.randn(100, 20))
y = np.random.randn(100)
model = ols(y=y, x=x)
model.summary
def test_y_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
model1 = ols(y=y, x=x)
assert_series_equal(model1.y_predict, model1.y_fitted)
assert_almost_equal(model1._y_predict_raw, model1._y_fitted_raw)
def test_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
model1 = ols(y=y, x=x)
assert_series_equal(model1.predict(), model1.y_predict)
assert_series_equal(model1.predict(x=x), model1.y_predict)
assert_series_equal(model1.predict(beta=model1.beta), model1.y_predict)
exog = x.copy()
exog['intercept'] = 1.
rs = Series(np.dot(exog.values, model1.beta.values), x.index)
assert_series_equal(model1.y_predict, rs)
x2 = x.reindex(columns=x.columns[::-1])
assert_series_equal(model1.predict(x=x2), model1.y_predict)
x3 = x2 + 10
pred3 = model1.predict(x=x3)
x3['intercept'] = 1.
x3 = x3.reindex(columns=model1.beta.index)
expected = Series(np.dot(x3.values, model1.beta.values), x3.index)
assert_series_equal(expected, pred3)
beta = Series(0., model1.beta.index)
pred4 = model1.predict(beta=beta)
assert_series_equal(Series(0., pred4.index), pred4)
def test_predict_longer_exog(self):
exogenous = {"1998": "4760", "1999": "5904", "2000": "4504",
"2001": "9808", "2002": "4241", "2003": "4086",
"2004": "4687", "2005": "7686", "2006": "3740",
"2007": "3075", "2008": "3753", "2009": "4679",
"2010": "5468", "2011": "7154", "2012": "4292",
"2013": "4283", "2014": "4595", "2015": "9194",
"2016": "4221", "2017": "4520"}
endogenous = {"1998": "691", "1999": "1580", "2000": "80",
"2001": "1450", "2002": "555", "2003": "956",
"2004": "877", "2005": "614", "2006": "468",
"2007": "191"}
endog = Series(endogenous)
exog = Series(exogenous)
model = ols(y=endog, x=exog)
pred = model.y_predict
self.assertTrue(pred.index.equals(exog.index))
def test_longpanel_series_combo(self):
wp = tm.makePanel()
lp = wp.to_frame()
y = lp.pop('ItemA')
model = ols(y=y, x=lp, entity_effects=True, window=20)
self.assertTrue(notnull(model.beta.values).all())
tm.assertIsInstance(model, PanelOLS)
model.summary
def test_series_rhs(self):
y = tm.makeTimeSeries()
x = tm.makeTimeSeries()
model = ols(y=y, x=x)
expected = ols(y=y, x={'x': x})
assert_series_equal(model.beta, expected.beta)
# GH 5233/5250
assert_series_equal(model.y_predict, model.predict(x=x))
def test_various_attributes(self):
# just make sure everything "works". test correctness elsewhere
x = DataFrame(np.random.randn(100, 5))
y = np.random.randn(100)
model = ols(y=y, x=x, window=20)
series_attrs = ['rank', 'df', 'forecast_mean', 'forecast_vol']
for attr in series_attrs:
value = getattr(model, attr)
tm.assertIsInstance(value, Series)
# works
model._results
def test_catch_regressor_overlap(self):
df1 = tm.makeTimeDataFrame().ix[:, ['A', 'B']]
df2 = tm.makeTimeDataFrame().ix[:, ['B', 'C', 'D']]
y = tm.makeTimeSeries()
data = {'foo': df1, 'bar': df2}
self.assertRaises(Exception, ols, y=y, x=data)
def test_plm_ctor(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
model = ols(y=y, x=x, intercept=False)
model.summary
model = ols(y=y, x=Panel(x))
model.summary
def test_plm_attrs(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
rmodel = ols(y=y, x=x, window=10)
model = ols(y=y, x=x)
model.resid
rmodel.resid
def test_plm_lagged_y_predict(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
model = ols(y=y, x=x, window=10)
result = model.lagged_y_predict(2)
def test_plm_f_test(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
model = ols(y=y, x=x)
hyp = '1*a+1*b=0'
result = model.f_test(hyp)
hyp = ['1*a=0',
'1*b=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
def test_plm_exclude_dummy_corner(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
model = ols(
y=y, x=x, entity_effects=True, dropped_dummies={'entity': 'D'})
model.summary
self.assertRaises(Exception, ols, y=y, x=x, entity_effects=True,
dropped_dummies={'entity': 'E'})
def test_columns_tuples_summary(self):
# #1837
X = DataFrame(np.random.randn(10, 2), columns=[('a', 'b'), ('c', 'd')])
Y = Series(np.random.randn(10))
# it works!
model = ols(y=Y, x=X)
model.summary
class TestPanelOLS(BaseTest):
_multiprocess_can_split_ = True
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat',
'p_value', 'r2', 'r2_adj', 'rmse', 'std_err',
't_stat', 'var_beta']
_other_fields = ['resid', 'y_fitted']
def testFiltering(self):
result = ols(y=self.panel_y2, x=self.panel_x2)
x = result._x
index = x.index.get_level_values(0)
index = Index(sorted(set(index)))
exp_index = Index([datetime(2000, 1, 1), datetime(2000, 1, 3)])
self.assertTrue
(exp_index.equals(index))
index = x.index.get_level_values(1)
index = Index(sorted(set(index)))
exp_index = Index(['A', 'B'])
self.assertTrue(exp_index.equals(index))
x = result._x_filtered
index = x.index.get_level_values(0)
index = Index(sorted(set(index)))
exp_index = Index([datetime(2000, 1, 1),
datetime(2000, 1, 3),
datetime(2000, 1, 4)])
self.assertTrue(exp_index.equals(index))
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = [[6, 14, 1],
[9, 17, 1],
[30, 48, 1]]
assert_almost_equal(exp_x, result._x.values)
exp_x_filtered = [[6, 14, 1],
[9, 17, 1],
[30, 48, 1],
[11, 20, 1],
[12, 21, 1]]
assert_almost_equal(exp_x_filtered, result._x_filtered.values)
self.assertTrue(result._x_filtered.index.levels[0].equals(
result.y_fitted.index))
def test_wls_panel(self):
y = tm.makeTimeDataFrame()
x = Panel({'x1': tm.makeTimeDataFrame(),
'x2': tm.makeTimeDataFrame()})
y.ix[[1, 7], 'A'] = np.nan
y.ix[[6, 15], 'B'] = np.nan
y.ix[[3, 20], 'C'] = np.nan
y.ix[[5, 11], 'D'] = np.nan
stack_y = y.stack()
stack_x = DataFrame(dict((k, v.stack())
for k, v in compat.iteritems(x)))
weights = x.std('items')
stack_weights = weights.stack()
stack_y.index = stack_y.index._tuple_index
stack_x.index = stack_x.index._tuple_index
stack_weights.index = stack_weights.index._tuple_index
result = ols(y=y, x=x, weights=1 / weights)
expected = ols(y=stack_y, x=stack_x, weights=1 / stack_weights)
assert_almost_equal(result.beta, expected.beta)
for attr in ['resid', 'y_fitted']:
rvals = getattr(result, attr).stack().values
evals = getattr(expected, attr).values
assert_almost_equal(rvals, evals)
def testWithTimeEffects(self):
result = ols(y=self.panel_y2, x=self.panel_x2, time_effects=True)
assert_almost_equal(result._y_trans.values.flat, [0, -0.5, 0.5])
exp_x = [[0, 0], [-10.5, -15.5], [10.5, 15.5]]
assert_almost_equal(result._x_trans.values, exp_x)
# _check_non_raw_results(result)
def testWithEntityEffects(self):
result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True)
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[0., 6., 14., 1.], [0, 9, 17, 1], [1, 30, 48, 1]],
index=result._x.index, columns=['FE_B', 'x1', 'x2',
'intercept'],
dtype=float)
tm.assert_frame_equal(result._x, exp_x.ix[:, result._x.columns])
# _check_non_raw_results(result)
def testWithEntityEffectsAndDroppedDummies(self):
result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True,
dropped_dummies={'entity': 'B'})
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[1., 6., 14., 1.], [1, 9, 17, 1], [0, 30, 48, 1]],
index=result._x.index, columns=['FE_A', 'x1', 'x2',
'intercept'],
dtype=float)
tm.assert_frame_equal(result._x, exp_x.ix[:, result._x.columns])
# _check_non_raw_results(result)
def testWithXEffects(self):
result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'])
assert_almost_equal(result._y.values.flat, [1, 4, 5])
res = result._x
exp_x = DataFrame([[0., 0., 14., 1.], [0, 1, 17, 1], [1, 0, 48, 1]],
columns=['x1_30', 'x1_9', 'x2', 'intercept'],
index=res.index, dtype=float)
assert_frame_equal(res, exp_x.reindex(columns=res.columns))
def testWithXEffectsAndDroppedDummies(self):
result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'],
dropped_dummies={'x1': 30})
res = result._x
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[1., 0., 14., 1.], [0, 1, 17, 1], [0, 0, 48, 1]],
columns=['x1_6', 'x1_9', 'x2', 'intercept'],
index=res.index, dtype=float)
assert_frame_equal(res, exp_x.reindex(columns=res.columns))
def testWithXEffectsAndConversion(self):
result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'])
assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])
exp_x = [[0, 0, 0, 1, 1], [1, 0, 0, 0, 1], [0, 1, 1, 0, 1],
[0, 0, 0, 1, 1]]
assert_almost_equal(result._x.values, exp_x)
exp_index = Index(['x1_B', 'x1_C', 'x2_baz', 'x2_foo', 'intercept'])
self.assertTrue(exp_index.equals(result._x.columns))
# _check_non_raw_results(result)
def testWithXEffectsAndConversionAndDroppedDummies(self):
result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'],
dropped_dummies={'x2': 'foo'})
assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])
exp_x = [[0, 0, 0, 0, 1], [1, 0, 1, 0, 1], [0, 1, 0, 1, 1],
[0, 0, 0, 0, 1]]
assert_almost_equal(result._x.values, exp_x)
exp_index = Index(['x1_B', 'x1_C', 'x2_bar', 'x2_baz', 'intercept'])
self.assertTrue(exp_index.equals(result._x.columns))
# _check_non_raw_results(result)
def testForSeries(self):
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y)
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y, nw_lags=0)
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y, nw_lags=1,
nw_overlap=True)
def testRolling(self):
self.checkMovingOLS(self.panel_x, self.panel_y)
def testRollingWithFixedEffects(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
entity_effects=True)
self.checkMovingOLS(self.panel_x, self.panel_y, intercept=False,
entity_effects=True)
def testRollingWithTimeEffects(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
time_effects=True)
def testRollingWithNeweyWest(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
nw_lags=1)
def testRollingWithEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
cluster='entity')
def testUnknownClusterRaisesValueError(self):
assertRaisesRegexp(ValueError, "Unrecognized cluster.*ridiculous",
self.checkMovingOLS, self.panel_x, self.panel_y,
cluster='ridiculous')
def testRollingWithTimeEffectsAndEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
time_effects=True, cluster='entity')
def testRollingWithTimeCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
cluster='time')
def testRollingWithNeweyWestAndEntityCluster(self):
self.assertRaises(ValueError, self.checkMovingOLS,
self.panel_x, self.panel_y,
nw_lags=1, cluster='entity')
def testRollingWithNeweyWestAndTimeEffectsAndEntityCluster(self):
self.assertRaises(ValueError,
self.checkMovingOLS, self.panel_x, self.panel_y,
nw_lags=1, cluster='entity',
time_effects=True)
def testExpanding(self):
self.checkMovingOLS(
self.panel_x, self.panel_y, window_type='expanding')
def testNonPooled(self):
self.checkNonPooled(y=self.panel_y, x=self.panel_x)
self.checkNonPooled(y=self.panel_y, x=self.panel_x,
window_type='rolling', window=25, min_periods=10)
def testUnknownWindowType(self):
assertRaisesRegexp(ValueError, "window.*ridiculous",
self.checkNonPooled, y=self.panel_y, x=self.panel_x,
window_type='ridiculous', window=25, min_periods=10)
def checkNonPooled(self, x, y, **kwds):
# For now, just check that it doesn't crash
result = ols(y=y, x=x, pool=False, **kwds)
_check_repr(result)
for attr in NonPooledPanelOLS.ATTRIBUTES:
_check_repr(getattr(result, attr))
def checkMovingOLS(self, x, y, window_type='rolling', **kwds):
window = 25 # must be larger than rank of x
moving = ols(y=y, x=x, window_type=window_type,
window=window, **kwds)
index = moving._index
for n, i in enumerate(moving._valid_indices):
if window_type == 'rolling' and i >= window:
prior_date = index[i - window + 1]
else:
prior_date = index[0]
date = index[i]
x_iter = {}
for k, v in compat.iteritems(x):
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
static = ols(y=y_iter, x=x_iter, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
_check_non_raw_results(moving)
def checkForSeries(self, x, y, series_x, series_y, **kwds):
# Consistency check with simple OLS.
result = ols(y=y, x=x, **kwds)
reference = ols(y=series_y, x=series_x, **kwds)
self.compare(reference, result)
def compare(self, static, moving, event_index=None,
result_index=None):
# Check resid if we have a time index specified
if event_index is not None:
staticSlice = _period_slice(static, -1)
movingSlice = _period_slice(moving, event_index)
ref = static._resid_raw[staticSlice]
res = moving._resid_raw[movingSlice]
assert_almost_equal(ref, res)
ref = static._y_fitted_raw[staticSlice]
res = moving._y_fitted_raw[movingSlice]
assert_almost_equal(ref, res)
# Check y_fitted
for field in self.FIELDS:
attr = '_%s_raw' % field
ref = getattr(static, attr)
res = getattr(moving, attr)
if result_index is not None:
res = res[result_index]
assert_almost_equal(ref, res)
def test_auto_rolling_window_type(self):
data = tm.makeTimeDataFrame()
y = data.pop('A')
window_model = ols(y=y, x=data, window=20, min_periods=10)
rolling_model = ols(y=y, x=data, window=20, min_periods=10,
window_type='rolling')
assert_frame_equal(window_model.beta, rolling_model.beta)
def test_group_agg(self):
from pandas.stats.plm import _group_agg
values = np.ones((10, 2)) * np.arange(10).reshape((10, 1))
bounds = np.arange(5) * 2
f = lambda x: x.mean(axis=0)
agged = _group_agg(values, bounds, f)
assert(agged[1][0] == 2.5)
assert(agged[2][0] == 4.5)
# test a function that doesn't aggregate
f2 = lambda x: np.zeros((2, 2))
self.assertRaises(Exception, _group_agg, values, bounds, f2)
def _check_non_raw_results(model):
_check_repr(model)
_check_repr(model.resid)
_check_repr(model.summary_as_matrix)
_check_repr(model.y_fitted)
_check_repr(model.y_predict)
def _period_slice(panelModel, i):
index = panelModel._x_trans.index
period = index.levels[0][i]
L, R = index.get_major_bounds(period, period)
return slice(L, R)
class TestOLSFilter(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
date_index = date_range(datetime(2009, 12, 11), periods=3,
freq=datetools.bday)
ts = Series([3, 1, 4], index=date_index)
self.TS1 = ts
date_index = date_range(datetime(2009, 12, 11), periods=5,
freq=datetools.bday)
ts = Series([1, 5, 9, 2, 6], index=date_index)
self.TS2 = ts
date_index = date_range(datetime(2009, 12, 11), periods=3,
freq=datetools.bday)
ts = Series([5, np.nan, 3], index=date_index)
self.TS3 = ts
date_index = date_range(datetime(2009, 12, 11), periods=5,
freq=datetools.bday)
ts = Series([np.nan, 5, 8, 9, 7], index=date_index)
self.TS4 = ts
data = {'x1': self.TS2, 'x2': self.TS4}
self.DF1 = DataFrame(data=data)
data = {'x1': self.TS2, 'x2': self.TS4}
self.DICT1 = data
def testFilterWithSeriesRHS(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS1, {'x1': self.TS2}, None)
self.tsAssertEqual(self.TS1, lhs)
self.tsAssertEqual(self.TS2[:3], rhs['x1'])
self.tsAssertEqual(self.TS2, rhs_pre['x1'])
def testFilterWithSeriesRHS2(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS2, {'x1': self.TS1}, None)
self.tsAssertEqual(self.TS2[:3], lhs)
self.tsAssertEqual(self.TS1, rhs['x1'])
self.tsAssertEqual(self.TS1, rhs_pre['x1'])
def testFilterWithSeriesRHS3(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS3, {'x1': self.TS4}, None)
exp_lhs = self.TS3[2:3]
exp_rhs = self.TS4[2:3]
exp_rhs_pre = self.TS4[1:]
self.tsAssertEqual(exp_lhs, lhs)
self.tsAssertEqual(exp_rhs, rhs['x1'])
self.tsAssertEqual(exp_rhs_pre, rhs_pre['x1'])
def testFilterWithDataFrameRHS(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS1, self.DF1, None)
exp_lhs = self.TS1[1:]
exp_rhs1 = self.TS2[1:3]
exp_rhs2 = self.TS4[1:3]
self.tsAssertEqual(exp_lhs, lhs)
self.tsAssertEqual(exp_rhs1, rhs['x1'])
self.tsAssertEqual(exp_rhs2, rhs['x2'])
def testFilterWithDictRHS(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS1, self.DICT1, None)
exp_lhs = self.TS1[1:]
exp_rhs1 = self.TS2[1:3]
exp_rhs2 = self.TS4[1:3]
self.tsAssertEqual(exp_lhs, lhs)
self.tsAssertEqual(exp_rhs1, rhs['x1'])
self.tsAssertEqual(exp_rhs2, rhs['x2'])
def tsAssertEqual(self, ts1, ts2):
self.assert_numpy_array_equal(ts1, ts2)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
petr-devaikin/dancee | helpers/optimize_features.py | 1 | 1194 | # Perform dictionary learning for extracted features
# Input: features_extracted.json
# Output: features_extracted_opt.json
import sklearn.decomposition as decomp
import json
with open('features_extracted.json') as features_json:
fragments = json.load(features_json)
feature_list = [feature for feature in fragments['1']['features']]
for histo in ['histo5_part', 'histo10_part', 'histo20_part', 'histo40_part', 'histo5', 'histo10', 'histo20', 'histo40']:
feature_dicts = {}
for feature in feature_list:
data = []
for fragment in fragments:
data.append(fragments[fragment]['features'][feature][histo])
print 'Learning for ', feature, histo
feature_dicts[feature] = decomp.DictionaryLearning()
feature_dicts[feature].fit(data)
#break
print 'Transforming'
for fragment in fragments:
for feature in feature_list:
transformed_histo = feature_dicts[feature].transform([fragments[fragment]['features'][feature][histo]])[0]
fragments[fragment]['features'][feature][histo] = transformed_histo.tolist()
#break
print 'Writing the results'
with open('features_extracted_opt.json', 'w') as features_file:
json.dump(fragments, features_file)
| gpl-3.0 |
brain-research/mirage-rl-qprop | sandbox/rocky/tf/exploration_strategies/ou_strategy.py | 1 | 2170 | from rllab.misc.overrides import overrides
from rllab.misc.ext import AttrDict
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.spaces.box import Box
from rllab.exploration_strategies.base import ExplorationStrategy
import numpy as np
import numpy.random as nr
class OUStrategy(ExplorationStrategy, Serializable):
"""
This strategy implements the Ornstein-Uhlenbeck process, which adds
time-correlated noise to the actions taken by the deterministic policy.
The OU process satisfies the following stochastic differential equation:
dxt = theta*(mu - xt)*dt + sigma*dWt
where Wt denotes the Wiener process
"""
def __init__(self, env_spec, mu=0, theta=0.15, sigma=0.3, **kwargs):
assert isinstance(env_spec.action_space, Box)
assert len(env_spec.action_space.shape) == 1
Serializable.quick_init(self, locals())
self.mu = mu
self.theta = theta
self.sigma = sigma
self.action_space = env_spec.action_space
self.state = np.ones(self.action_space.flat_dim) * self.mu
self.reset()
def __getstate__(self):
d = Serializable.__getstate__(self)
d["state"] = self.state
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
self.state = d["state"]
@overrides
def reset(self):
self.state = np.ones(self.action_space.flat_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))
self.state = x + dx
return self.state
@overrides
def get_action(self, t, observation, policy, **kwargs):
action, _ = policy.get_action(observation)
ou_state = self.evolve_state()
return np.clip(action + ou_state, self.action_space.low, self.action_space.high)
if __name__ == "__main__":
ou = OUStrategy(env_spec=AttrDict(action_space=Box(low=-1, high=1, shape=(1,))), mu=0, theta=0.15, sigma=0.3)
states = []
for i in range(1000):
states.append(ou.evolve_state()[0])
import matplotlib.pyplot as plt
plt.plot(states)
plt.show()
| mit |
njpayne/euclid | python/regressors.py | 1 | 7350 | import numpy as np
import time
import pylab
import os
import math
import pydot
import matplotlib.pyplot as plt
from sklearn import tree, neighbors, svm, ensemble, linear_model, svm
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.learning_curve import learning_curve
from sklearn.externals.six import StringIO
from sklearn.cross_validation import ShuffleSplit, cross_val_predict, cross_val_score
from sklearn.grid_search import GridSearchCV
from plot_learning_curve import plot_learning_curve, plot_validation_curve, plot_learning_curve_iter, plot_adaclassifier
data_location = "../Data" # read data from os.path.join(data_location, <filename>)
results_location = "Results" # save results text/graph to os.path.join(results_location, <filename>)
def run_decision_tree(training_features, training_labels, test_features, test_labels, passed_parameters = None, headings = None, title = ""):
"""
Regresses the data using sklearn's decision tree
Does not natively support pruning so max_depth is being used
Parameters
----------
training_data: data used to train the classifier. For each row, item 0 assumed to be the label
test_data: data used to test the regressor.
Returns
-------
prediction: predicted labels of the test data
accuracy: percent of test data labels accurately predicted
"""
estimator = tree.DecisionTreeRegressor()
#set up parameters for the classifier
if(passed_parameters == None):
parameters = {'max_depth': None}
else:
parameters = passed_parameters
#create cross validation iterator
cv = ShuffleSplit(training_features.shape[0], n_iter=5, test_size=0.2, random_state=0)
#set up tuning algorithm
regressor = GridSearchCV(estimator=estimator, cv=cv, param_grid=parameters)
#fit the classifier
regressor.fit(training_features, training_labels)
test_prediction = regressor.predict(test_features)
test_accuracy = regressor.score(test_features, test_labels)
#show the best result
estimator = tree.DecisionTreeRegressor(max_depth = regressor.best_estimator_.max_depth)
estimator.fit(training_features, training_labels)
#save the visualization of the decision tree only use the top 5 levels for now
if(headings.shape[0] == training_features.shape[1]):
tree_data = StringIO()
tree.export_graphviz(estimator, out_file=tree_data, max_depth=regressor.best_estimator_.max_depth, feature_names=headings)
graph = pydot.graph_from_dot_data(tree_data.getvalue())
graph.write_pdf(os.path.join(results_location, "Decision Tree Model_%s.pdf" % title))
time_2 = time.time()
## Plot the results
#plt.figure()
#plt.scatter(X, y, c="k", label="data")
#plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
#plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
#plt.xlabel("data")
#plt.ylabel("target")
#plt.title("Decision Tree Regression")
#plt.legend()
#plt.show()
return test_prediction, test_accuracy
def run_boosting(training_features, training_labels, test_features, test_labels, passed_parameters = None):
"""
Classifies the data using sklearn's ADAboost
Does not natively support pruning so max_depth is being used for the decision tree
Parameters
----------
training_data: data used to train the classifier. For each row, item 0 assumed to be the label
test_data: data used to test the classifier. For each row, item 0 assumed to be the label
max_depth: maximum tree depth to be applied (will simulate pruning)
Returns
-------
prediction: predicted labels of the test data
accuracy: percent of test data labels accurately predicted
"""
time_1 = time.time()
#set up underlying decision tree classifier
base_regressor = tree.DecisionTreeRegressor()
#set up the boosting method
estimator = ensemble.AdaBoostRegressor(base_estimator = base_regressor)
#set up parameters for the classifier
passed_parameters = {'base_estimator__max_depth': range(1, 5), 'n_estimators' : range(10, 200, 50), 'learning_rate' : [1] }
#create cross validation iterator
cv = ShuffleSplit(training_features.shape[0], n_iter=5, test_size=0.2, random_state=0)
#set up tuning algorithm
regressor = GridSearchCV(estimator=estimator, cv=cv, param_grid=passed_parameters)
#fit the classifier
regressor.fit(training_features, training_labels)
#get the prediction and accuracy of the test set
test_prediction = regressor.predict(test_features)
test_accuracy = regressor.score(test_features, test_labels)
return test_prediction, test_accuracy
def run_random_forest(training_features, training_labels, test_features, test_labels, passed_parameters = None, ):
estimator = ensemble.RandomForestRegressor(random_state=0, n_estimators=25)
#set up parameters for the classifier
if(passed_parameters == None):
parameters = {'max_depth': None}
else:
parameters = passed_parameters
#create cross validation iterator
cv = ShuffleSplit(training_features.shape[0], n_iter=5, test_size=0.2, random_state=0)
#set up tuning algorithm
regressor = GridSearchCV(estimator=estimator, cv=cv, param_grid=parameters)
#fit the classifier
regressor.fit(training_features, training_labels)
test_prediction = regressor.predict(test_features)
test_accuracy = regressor.score(test_features, test_labels)
time_2 = time.time()
return test_prediction, test_accuracy
def run_linear_regression(training_features, training_labels, test_features, test_labels, passed_parameters = None, headings = ["Linear"]):
#set up linear regressor
estimator = linear_model.LinearRegression(fit_intercept = True)
estimator.fit(training_features, training_labels)
prediction = estimator.predict(X = test_features)
score = estimator.score(X = test_features, y = test_labels)
if(training_features.shape[1] == 1):
fig, ax = plt.subplots()
ax.scatter(training_labels, prediction)
ax.plot([training_labels.min(), training_labels.max()], [training_labels.min(), training_labels.max()], 'k--', lw=4)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
pylab.savefig(os.path.join(results_location, "Linear - " + headings[-1] + '.png'))
return prediction, score
def run_support_vector_regressor(training_features, training_labels, test_features, test_labels, passed_parameters = None):
estimator = svm.SVR()
#set up parameters for the classifier
if(passed_parameters == None):
parameters = {'kernel': ['linear']}
else:
parameters = passed_parameters
#create cross validation iterator
cv = ShuffleSplit(training_features.shape[0], n_iter=5, test_size=0.2, random_state=0)
#set up tuning algorithm
regressor = GridSearchCV(estimator=estimator, cv=cv, param_grid=parameters)
#fit the classifier
regressor.fit(training_features, training_labels)
test_prediction = regressor.predict(test_features)
test_accuracy = regressor.score(test_features, test_labels)
time_2 = time.time()
return test_prediction, test_accuracy
| gpl-2.0 |
hainm/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
bayespy/bayespy | bayespy/demos/pca.py | 5 | 4657 | ################################################################################
# Copyright (C) 2011-2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import numpy as np
import matplotlib.pyplot as plt
import bayespy.plot as myplt
from bayespy.utils import misc
from bayespy.utils import random
from bayespy import nodes
from bayespy.inference.vmp.vmp import VB
from bayespy.inference.vmp import transformations
import bayespy.plot as bpplt
def model(M, N, D):
# Construct the PCA model with ARD
# ARD
alpha = nodes.Gamma(1e-2,
1e-2,
plates=(D,),
name='alpha')
# Loadings
W = nodes.GaussianARD(0,
alpha,
shape=(D,),
plates=(M,1),
name='W')
# States
X = nodes.GaussianARD(0,
1,
shape=(D,),
plates=(1,N),
name='X')
# PCA
F = nodes.SumMultiply('i,i', W, X,
name='F')
# Noise
tau = nodes.Gamma(1e-2, 1e-2,
name='tau')
# Noisy observations
Y = nodes.GaussianARD(F, tau,
name='Y')
# Initialize some nodes randomly
X.initialize_from_random()
W.initialize_from_random()
return VB(Y, F, W, X, tau, alpha)
@bpplt.interactive
def run(M=10, N=100, D_y=3, D=5, seed=42, rotate=False, maxiter=1000, debug=False, plot=True):
if seed is not None:
np.random.seed(seed)
# Generate data
w = np.random.normal(0, 1, size=(M,1,D_y))
x = np.random.normal(0, 1, size=(1,N,D_y))
f = misc.sum_product(w, x, axes_to_sum=[-1])
y = f + np.random.normal(0, 0.1, size=(M,N))
# Construct model
Q = model(M, N, D)
# Data with missing values
mask = random.mask(M, N, p=0.5) # randomly missing
y[~mask] = np.nan
Q['Y'].observe(y, mask=mask)
# Run inference algorithm
if rotate:
# Use rotations to speed up learning
rotW = transformations.RotateGaussianARD(Q['W'], Q['alpha'])
rotX = transformations.RotateGaussianARD(Q['X'])
R = transformations.RotationOptimizer(rotW, rotX, D)
if debug:
Q.callback = lambda : R.rotate(check_bound=True,
check_gradient=True)
else:
Q.callback = R.rotate
# Use standard VB-EM alone
Q.update(repeat=maxiter)
# Plot results
if plot:
plt.figure()
bpplt.timeseries_normal(Q['F'], scale=2)
bpplt.timeseries(f, color='g', linestyle='-')
bpplt.timeseries(y, color='r', linestyle='None', marker='+')
if __name__ == '__main__':
import sys, getopt, os
try:
opts, args = getopt.getopt(sys.argv[1:],
"",
["m=",
"n=",
"d=",
"k=",
"seed=",
"maxiter=",
"debug",
"rotate"])
except getopt.GetoptError:
print('python demo_pca.py <options>')
print('--m=<INT> Dimensionality of data vectors')
print('--n=<INT> Number of data vectors')
print('--d=<INT> Dimensionality of the latent vectors in the model')
print('--k=<INT> Dimensionality of the true latent vectors')
print('--rotate Apply speed-up rotations')
print('--maxiter=<INT> Maximum number of VB iterations')
print('--seed=<INT> Seed (integer) for the random number generator')
print('--debug Check that the rotations are implemented correctly')
sys.exit(2)
kwargs = {}
for opt, arg in opts:
if opt == "--rotate":
kwargs["rotate"] = True
elif opt == "--maxiter":
kwargs["maxiter"] = int(arg)
elif opt == "--debug":
kwargs["debug"] = True
elif opt == "--seed":
kwargs["seed"] = int(arg)
elif opt in ("--m",):
kwargs["M"] = int(arg)
elif opt in ("--n",):
kwargs["N"] = int(arg)
elif opt in ("--d",):
kwargs["D"] = int(arg)
elif opt in ("--k",):
kwargs["D_y"] = int(arg)
run(**kwargs)
plt.show()
| mit |
jkarnows/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
JeanKossaifi/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
hsiaoyi0504/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
FluidityStokes/fluidity | examples/hokkaido-nansei-oki_tsunami/raw_data/plotinputwave.py | 1 | 2529 | #!/usr/bin/env python3
from fluidity_tools import stat_parser
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, show
import getopt
import sys
import csv
def usage():
print("plotinputwave.py -b starttime -e endtime --save=basename")
def get_inputelevation(t):
InputWaveReader = csv.reader(open('InputWave.csv', 'rb'), delimiter='\t')
data=[]
for (time, heigth) in InputWaveReader:
data.append((float(time), float(heigth)))
for i in range(1,len(data)):
if data[i][0]<t:
continue
t1=data[max(0,i-1)][0]
t2=data[i][0]
h1=data[max(0,i-1)][1]
h2=data[i][1]
return h1*(t-t2)/(t1-t2)+h2*(t-t1)/(t2-t1)
print("Warning: t is outside the available data. Using last available waterheigth...")
return data[-1][1]
def main(argv=None):
dt=0.05 # use same timestep than in csv file
try:
opts, args = getopt.getopt(sys.argv[1:], "t:e:b:", ['save='])
except getopt.GetoptError:
print("Getopterror :(")
usage()
sys.exit(2)
subtitle=''
subtitle_pure=''
endtime=22.5
starttime=0.0
save=False
for opt, arg in opts:
if opt == '--save':
save=True
savename=arg
elif opt=='-h' or opt=='--help':
usage()
sys.exit(2)
elif opt=='-t':
subtitle=', '+arg
subtitle_pure=arg
elif opt=='-b':
starttime=float(arg)
elif opt=='-e':
endtime=float(arg)
print("Generating plot")
print('Using dt=', dt)
starttimestep=int(max(0,starttime/dt))
endtimestep=int(endtime/dt)
print('starttimestep=', starttimestep)
print('endtimestep=', endtimestep)
# fill in measurement data
input_elevation=[]
time=[]
for i in range(starttimestep, endtimestep):
time.append(i*dt)
elev=get_inputelevation(time[-1])
input_elevation.append(elev*100.0) # in cm
plt.ion() # switch in interactive mode
fig1= figure()
subplt1 = fig1.add_subplot(111, xlabel='Time [s]', ylabel='Water level [cm]')
subplt1.plot(time, input_elevation) # plot gauge1 detector data
if not save:
plt.draw()
raw_input("Press Enter to exit")
else:
plt.savefig(savename+'.pdf', facecolor='white', edgecolor='black', dpi=100)
print('Saved to '+savename+'.pdf')
# for i in range(timesteps):
# gauge1.append(s["water"]["FreeSurface"]["gauge1"])
if __name__ == "__main__":
main()
| lgpl-2.1 |
mikecroucher/GPy | GPy/plotting/abstract_plotting_library.py | 6 | 13998 | #===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy.plotting.abstract_plotting_library nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
#===============================================================================
# Make sure that the necessary files and functions are
# defined in the plotting library:
class AbstractPlottingLibrary(object):
def __init__(self):
"""
Set the defaults dictionary in the _defaults variable:
E.g. for matplotlib we define a file defaults.py and
set the dictionary of it here:
from . import defaults
_defaults = defaults.__dict__
"""
self._defaults = {}
self.__defaults = None
@property
def defaults(self):
#===============================================================================
if self.__defaults is None:
from collections import defaultdict
class defaultdict(defaultdict):
def __getattr__(self, *args, **kwargs):
return defaultdict.__getitem__(self, *args, **kwargs)
self.__defaults = defaultdict(dict, self._defaults)
return self.__defaults
#===============================================================================
def figure(self, nrows, ncols, **kwargs):
"""
Get a new figure with nrows and ncolumns subplots.
Does not initialize the canvases yet.
There is individual kwargs for the individual plotting libraries to use.
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def new_canvas(self, figure=None, col=1, row=1, projection='2d', xlabel=None, ylabel=None, zlabel=None, title=None, xlim=None, ylim=None, zlim=None, **kwargs):
"""
Return a canvas, kwargupdate for your plotting library.
if figure is not None, create a canvas in the figure
at subplot position (col, row).
This method does two things, it creates an empty canvas
and updates the kwargs (deletes the unnecessary kwargs)
for further usage in normal plotting.
the kwargs are plotting library specific kwargs!
:param {'2d'|'3d'} projection: The projection to use.
E.g. in matplotlib this means it deletes references to ax, as
plotting is done on the axis itself and is not a kwarg.
:param xlabel: the label to put on the xaxis
:param ylabel: the label to put on the yaxis
:param zlabel: the label to put on the zaxis (if plotting in 3d)
:param title: the title of the plot
:param legend: if True, plot a legend, if int make legend rows in the legend
:param (float, float) xlim: the limits for the xaxis
:param (float, float) ylim: the limits for the yaxis
:param (float, float) zlim: the limits for the zaxis (if plotting in 3d)
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def add_to_canvas(self, canvas, plots, legend=True, title=None, **kwargs):
"""
Add plots is a dictionary with the plots as the
items or a list of plots as items to canvas.
The kwargs are plotting library specific kwargs!
E.g. in matplotlib this does not have to do anything to add stuff, but
we set the legend and title.
!This function returns the updated canvas!
:param title: the title of the plot
:param legend: whether to plot a legend or not
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def show_canvas(self, canvas, **kwargs):
"""
Draw/Plot the canvas given.
"""
raise NotImplementedError
def plot(self, cavas, X, Y, Z=None, color=None, label=None, **kwargs):
"""
Make a line plot from for Y on X (Y = f(X)) on the canvas.
If Z is not None, plot in 3d!
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def plot_axis_lines(self, ax, X, color=None, label=None, **kwargs):
"""
Plot lines at the bottom (lower boundary of yaxis) of the axis at input location X.
If X is two dimensional, plot in 3d and connect the axis lines to the bottom of the Z axis.
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def surface(self, canvas, X, Y, Z, color=None, label=None, **kwargs):
"""
Plot a surface for 3d plotting for the inputs (X, Y, Z).
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def scatter(self, canvas, X, Y, Z=None, color=None, vmin=None, vmax=None, label=None, **kwargs):
"""
Make a scatter plot between X and Y on the canvas given.
the kwargs are plotting library specific kwargs!
:param canvas: the plotting librarys specific canvas to plot on.
:param array-like X: the inputs to plot.
:param array-like Y: the outputs to plot.
:param array-like Z: the Z level to plot (if plotting 3d).
:param array-like c: the colorlevel for each point.
:param float vmin: minimum colorscale
:param float vmax: maximum colorscale
:param kwargs: the specific kwargs for your plotting library
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def barplot(self, canvas, x, height, width=0.8, bottom=0, color=None, label=None, **kwargs):
"""
Plot vertical bar plot centered at x with height
and width of bars. The y level is at bottom.
the kwargs are plotting library specific kwargs!
:param array-like x: the center points of the bars
:param array-like height: the height of the bars
:param array-like width: the width of the bars
:param array-like bottom: the start y level of the bars
:param kwargs: kwargs for the specific library you are using.
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def xerrorbar(self, canvas, X, Y, error, color=None, label=None, **kwargs):
"""
Make an errorbar along the xaxis for points at (X,Y) on the canvas.
if error is two dimensional, the lower error is error[:,0] and
the upper error is error[:,1]
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def yerrorbar(self, canvas, X, Y, error, color=None, label=None, **kwargs):
"""
Make errorbars along the yaxis on the canvas given.
if error is two dimensional, the lower error is error[0, :] and
the upper error is error[1, :]
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def imshow(self, canvas, X, extent=None, label=None, vmin=None, vmax=None, **kwargs):
"""
Show the image stored in X on the canvas.
The origin of the image show is (0,0), such that X[0,0] gets plotted at [0,0] of the image!
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def imshow_interact(self, canvas, plot_function, extent=None, label=None, vmin=None, vmax=None, **kwargs):
"""
This function is optional!
Create an imshow controller to stream
the image returned by the plot_function. There is an imshow controller written for
mmatplotlib, which updates the imshow on changes in axis.
The origin of the image show is (0,0), such that X[0,0] gets plotted at [0,0] of the image!
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def annotation_heatmap(self, canvas, X, annotation, extent, label=None, **kwargs):
"""
Plot an annotation heatmap. That is like an imshow, but
put the text of the annotation inside the cells of the heatmap (centered).
:param canvas: the canvas to plot on
:param array-like annotation: the annotation labels for the heatmap
:param [horizontal_min,horizontal_max,vertical_min,vertical_max] extent: the extent of where to place the heatmap
:param str label: the label for the heatmap
:return: a list of both the heatmap and annotation plots [heatmap, annotation], or the interactive update object (alone)
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def annotation_heatmap_interact(self, canvas, plot_function, extent, label=None, resolution=15, **kwargs):
"""
if plot_function is not None, return an interactive updated
heatmap, which updates on axis events, so that one can zoom in
and out and the heatmap gets updated. See the matplotlib implementation
in matplot_dep.controllers.
the plot_function returns a pair (X, annotation) to plot, when called with
a new input X (which would be the grid, which is visible on the plot
right now)
:param canvas: the canvas to plot on
:param array-like annotation: the annotation labels for the heatmap
:param [horizontal_min,horizontal_max,vertical_min,vertical_max] extent: the extent of where to place the heatmap
:param str label: the label for the heatmap
:return: a list of both the heatmap and annotation plots [heatmap, annotation], or the interactive update object (alone)
:param plot_function: the function, which generates new data for given input locations X
:param int resolution: the resolution of the interactive plot redraw - this is only needed when giving a plot_function
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def contour(self, canvas, X, Y, C, Z=None, color=None, label=None, **kwargs):
"""
Make a contour plot at (X, Y) with heights/colors stored in C on the canvas.
if Z is not None: make 3d contour plot at (X, Y, Z) with heights/colors stored in C on the canvas.
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def fill_between(self, canvas, X, lower, upper, color=None, label=None, **kwargs):
"""
Fill along the xaxis between lower and upper.
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def fill_gradient(self, canvas, X, percentiles, color=None, label=None, **kwargs):
"""
Plot a gradient (in alpha values) for the given percentiles.
the kwargs are plotting library specific kwargs!
"""
print("fill_gradient not implemented in this backend.")
| bsd-3-clause |
yanlend/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 216 | 13290 | import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
| bsd-3-clause |
mrcslws/htmresearch | projects/sequence_prediction/continuous_sequence/plot.py | 7 | 5369 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
from matplotlib import pyplot as plt
plt.ion()
# from suite import Suite
from run_lstm_suite import Suite
from htmresearch.support.sequence_learning_utils import *
import pandas as pd
import numpy as np
def plotMovingAverage(data, window, label=None):
movingData = movingAverage(data, min(len(data), window))
style = 'ro' if len(data) < window else ''
plt.plot(range(len(movingData)), movingData, style, label=label)
class ExperimentResult(object):
def __init__(self, experiment_name):
self.name = experiment_name
self.loadExperiment(experiment_name)
self.computeError()
def loadExperiment(self, experiment):
suite = Suite()
suite.parse_opt()
suite.parse_cfg()
experiment_dir = experiment.split('/')[1]
params = suite.items_to_params(suite.cfgparser.items(experiment_dir))
self.params = params
predictions = suite.get_history(experiment, 0, 'predictions')
truth = suite.get_history(experiment, 0, 'truth')
computeAfter = params['iterations']-len(predictions)
temp = np.zeros((computeAfter, ))
temp[:] = np.nan
self.iteration = suite.get_history(experiment, 0, 'iteration')
self.train = suite.get_history(experiment, 0, 'train')
self.truth = np.array(truth, dtype=np.float)
self.truth = np.concatenate((temp, self.truth))
if params['output_encoding'] == 'likelihood':
from nupic.encoders.scalar import ScalarEncoder as NupicScalarEncoder
self.outputEncoder = NupicScalarEncoder(w=1, minval=0, maxval=40000, n=22, forced=True)
# predictions_np = np.zeros((len(predictions), self.outputEncoder.n))
predictions_np = np.zeros((len(predictions)+computeAfter, self.outputEncoder.n))
for i in xrange(len(predictions)):
if predictions[i] is not None:
predictions_np[i+computeAfter, :] = np.array(predictions[i])
self.predictions = predictions_np
else:
self.predictions = np.array(predictions, dtype=np.float)
self.predictions = np.concatenate((temp, self.predictions))
def computeError(self):
if self.params['output_encoding'] == 'likelihood':
self.errorType = 'negLL'
self.error = computeLikelihood(self.predictions, self.truth, self.outputEncoder)
elif self.params['output_encoding'] == None:
self.errorType = 'square_deviation'
self.error = computeSquareDeviation(self.predictions, self.truth)
startAt = max(self.params['compute_after'], self.params['train_at_iteration'])
self.error[:startAt] = np.nan
def plotLSTMresult(experiment, window, xaxis=None, label=None):
expResult = ExperimentResult(experiment)
if xaxis is not None:
x = xaxis
else:
x = range(0, len(expResult.error))
error = plotAccuracy((expResult.error, x),
expResult.truth,
# train=expResult.train,
window=window,
label=label,
params=expResult.params,
errorType=expResult.errorType)
return (error, expResult)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('experiments', metavar='/path/to/experiment /path/...', nargs='+', type=str)
parser.add_argument('-w', '--window', type=int, default=480)
parser.add_argument('-f', '--full', action='store_true')
args = parser.parse_args()
from pylab import rcParams
rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'white'})
rcParams.update({'ytick.labelsize': 8})
rcParams.update({'figure.figsize': (12, 6)})
experiments = args.experiments
for experiment in experiments:
experiment_name = experiment.split('/')[-2]
expResult = ExperimentResult(experiment)
# use datetime as x-axis
filePath = './data/' + expResult.params['dataset'] + '.csv'
data = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['datetime', 'value', 'timeofday', 'dayofweek'])
x = pd.to_datetime(data['datetime'])
plotAccuracy((expResult.error, x),
expResult.truth,
train=expResult.train,
window=args.window,
label=experiment_name,
params=expResult.params,
errorType=expResult.errorType)
if len(experiments) > 1:
plt.legend()
plt.show()
| agpl-3.0 |
lizardsystem/flooding | flooding_lib/migrations/0001_initial.py | 1 | 47169 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
import django.db.models.deletion
from django.conf import settings
import flooding_lib.models
import django_extensions.db.fields
import django_extensions.db.fields.json
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('approvaltool', '0001_initial'),
('contenttypes', '0001_initial'),
('lizard_worker', '0001_initial'),
('sharedproject', '0001_initial'),
('auth', '0001_initial'),
('pyramids', '0001_initial'),
('flooding_presentation', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Animation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('frames', models.IntegerField(default=0)),
('cols', models.IntegerField(default=0)),
('rows', models.IntegerField(default=0)),
('maxvalue', models.FloatField(null=True, blank=True)),
('geotransform', django_extensions.db.fields.json.JSONField()),
('basedir', models.TextField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('name', models.CharField(max_length=200)),
('remarks', models.TextField(null=True, blank=True)),
('file', models.FileField(null=True, upload_to=flooding_lib.models.get_attachment_path, blank=True)),
('uploaded_by', models.CharField(max_length=200)),
('uploaded_date', models.DateTimeField(null=True, blank=True)),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
],
options={
'db_table': 'flooding_attachment',
'verbose_name': 'Attachment',
'verbose_name_plural': 'Attachments',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Breach',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('remarks', models.TextField(blank=True)),
('active', models.BooleanField(default=True)),
('levelnormfrequency', models.FloatField()),
('canalbottomlevel', models.FloatField(null=True, blank=True)),
('groundlevel', models.FloatField()),
('defrucritical', models.FloatField()),
('defbaselevel', models.FloatField(null=True, blank=True)),
('decheight', models.FloatField(null=True, blank=True)),
('decheightbaselevel', models.FloatField(null=True, blank=True)),
('internalnode', django.contrib.gis.db.models.fields.PointField(srid=4326, verbose_name=b'internal node')),
('externalnode', django.contrib.gis.db.models.fields.PointField(srid=4326, verbose_name=b'external node')),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326, verbose_name=b'node itself')),
('code', models.CharField(max_length=20, null=True)),
('administrator', models.IntegerField(help_text=b'Breach administrator', null=True, blank=True)),
('fl_rk_adm_jud', models.IntegerField(help_text=b'Flood risk - administrator judgment (section part)', null=True, blank=True)),
('fl_rk_dpv_ref_part', models.IntegerField(help_text=b'Flood risk - DPV reference (section part)', null=True, blank=True)),
('fl_rk_dpv_ref_sect', models.IntegerField(help_text=b'Flood risk - DPV reference (dike section)', null=True, blank=True)),
('fl_rk_nrm', models.IntegerField(help_text=b'Flood risk - Norm', null=True, blank=True)),
],
options={
'ordering': ['name'],
'db_table': 'flooding_breach',
'verbose_name': 'Breach',
'verbose_name_plural': 'Breaches',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BreachSobekModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sobekid', models.CharField(max_length=200)),
('breach', models.ForeignKey(to='flooding_lib.Breach')),
],
options={
'db_table': 'flooding_breachsobekmodel',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Colormap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('matplotlib_name', models.CharField(unique=True, max_length=20)),
('description', models.CharField(unique=True, max_length=50)),
],
options={
'ordering': ('description',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CutoffLocation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('bottomlevel', models.FloatField()),
('width', models.FloatField()),
('deftclose', models.FloatField(null=True, blank=True)),
('type', models.IntegerField(choices=[(1, 'lock'), (2, 'culvert'), (3, 'weir'), (4, 'bridge'), (5, 'undefined'), (6, 'generic_internal')])),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326, verbose_name=b'node itself')),
('code', models.CharField(max_length=15, null=True)),
],
options={
'db_table': 'flooding_cutofflocation',
'verbose_name': 'Cutoff location',
'verbose_name_plural': 'Cutoff locations',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CutoffLocationSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('cutofflocations', models.ManyToManyField(to='flooding_lib.CutoffLocation')),
],
options={
'db_table': 'flooding_cutofflocationset',
'verbose_name': 'Cutoff location set',
'verbose_name_plural': 'Cutoff location sets',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CutoffLocationSobekModelSetting',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sobekid', models.CharField(max_length=200)),
('cutofflocation', models.ForeignKey(to='flooding_lib.CutoffLocation')),
],
options={
'db_table': 'flooding_cutofflocationsobekmodelsetting',
'verbose_name': 'Cutoff location sobek model setting',
'verbose_name_plural': 'Cutoff location sobek model settings',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Dike',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
],
options={
'db_table': 'flooding_dike',
'verbose_name': 'Dike',
'verbose_name_plural': 'Dikes',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EmbankmentUnit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('unit_id', models.CharField(max_length=20)),
('type', models.IntegerField(choices=[(0, 'existing'), (1, 'new')])),
('original_height', models.FloatField()),
('geometry', django.contrib.gis.db.models.fields.LineStringField(srid=4326)),
],
options={
'db_table': 'flooding_embankment_unit',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExternalWater',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('type', models.IntegerField(choices=[(1, 'sea'), (2, 'lake'), (3, 'canal'), (4, 'internal_lake'), (5, 'internal_canal'), (6, 'river'), (7, 'unknown'), (8, 'lower_river')])),
('liztype', models.IntegerField(blank=True, null=True, choices=[(1, 'sea'), (2, b'estuarium'), (3, b'groot meer (incl. afgesloten zeearm)'), (4, b'grote rivier'), (5, b'scheepvaartkanaal'), (6, b'binnenmeer'), (7, b'regionale beek'), (8, b'regionale revier'), (9, b'boezemwater'), (10, b'polderwater')])),
('area', models.IntegerField(null=True, blank=True)),
('deftstorm', models.FloatField(null=True, blank=True)),
('deftpeak', models.FloatField(null=True, blank=True)),
('deftsim', models.FloatField()),
('minlevel', models.FloatField(default=-10)),
('maxlevel', models.FloatField(default=15)),
('code', models.CharField(max_length=15, null=True)),
('cutofflocations', models.ManyToManyField(to='flooding_lib.CutoffLocation', blank=True)),
],
options={
'db_table': 'flooding_externalwater',
'verbose_name': 'External water',
'verbose_name_plural': 'External waters',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExtraInfoField',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=200)),
('use_in_scenario_overview', models.BooleanField(default=False)),
('header', models.IntegerField(default=20, choices=[(1, 'scenario'), (2, 'location'), (4, 'model'), (5, 'other'), (6, 'files'), (10, 'general'), (20, 'metadata'), (30, 'breaches'), (40, 'externalwater'), (70, 'none')])),
('position', models.IntegerField(default=0)),
],
options={
'db_table': 'flooding_extrainfofield',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExtraScenarioInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.CharField(max_length=100)),
('extrainfofield', models.ForeignKey(to='flooding_lib.ExtraInfoField')),
],
options={
'db_table': 'flooding_extrascenarioinfo',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Map',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('remarks', models.TextField(null=True, blank=True)),
('active', models.BooleanField(default=True)),
('index', models.IntegerField(default=100)),
('url', models.CharField(max_length=200)),
('layers', models.CharField(max_length=200)),
('transparent', models.NullBooleanField(default=None)),
('tiled', models.NullBooleanField(default=None)),
('srs', models.CharField(default=b'EPSG:900913', max_length=50)),
('visible', models.BooleanField(default=False)),
],
options={
'db_table': 'flooding_map',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Measure',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('reference_adjustment', models.IntegerField(default=0, choices=[(0, 'unkown'), (1, 'existing level'), (2, 'new level')])),
('adjustment', models.FloatField(default=0)),
],
options={
'db_table': 'flooding_measure',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Program',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
],
options={
'db_table': 'flooding_program',
'verbose_name': 'Program',
'verbose_name_plural': 'Programs',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('friendlyname', models.CharField(max_length=200)),
('name', models.CharField(max_length=200)),
('color_mapping_name', models.CharField(max_length=256, null=True, blank=True)),
('code', models.CharField(max_length=20, null=True)),
('approval_object_type', models.ForeignKey(default=flooding_lib.models.get_default_approval_type, to='approvaltool.ApprovalObjectType', null=True)),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('friendlyname', 'name', 'owner'),
'db_table': 'flooding_project',
'verbose_name': 'Project',
'verbose_name_plural': 'Projects',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProjectColormap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('colormap', models.ForeignKey(to='flooding_lib.Colormap')),
('presentationtype', models.ForeignKey(to='flooding_presentation.PresentationType')),
('project', models.ForeignKey(to='flooding_lib.Project')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProjectGroupPermission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('permission', models.IntegerField(choices=[(1, 'view_scenario'), (2, 'add_scenario_new_simulation'), (7, 'add_scenario_import'), (3, 'edit_scenario'), (4, 'approve_scenario'), (5, 'delete_scenario'), (6, 'edit_scenario_simple')])),
('group', models.ForeignKey(to='auth.Group')),
('project', models.ForeignKey(to='flooding_lib.Project')),
],
options={
'db_table': 'flooding_projectgrouppermission',
'verbose_name': 'Project group permission',
'verbose_name_plural': 'Project group permissions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Raster',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uuid', django_extensions.db.fields.UUIDField(unique=True, editable=False, name=b'uuid', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('longname', models.CharField(max_length=200)),
('active', models.BooleanField(default=True)),
('normfrequency', models.IntegerField(null=True, blank=True)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326, verbose_name=b'Region Border')),
('path', models.CharField(max_length=200)),
('code', models.CharField(max_length=20, null=True)),
('dijkringnr', models.IntegerField(null=True, blank=True)),
('cutofflocations', models.ManyToManyField(to='flooding_lib.CutoffLocation', blank=True)),
('maps', models.ManyToManyField(to='flooding_lib.Map', blank=True)),
],
options={
'db_table': 'flooding_region',
'verbose_name': 'Region',
'verbose_name_plural': 'Regions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RegionSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('parent', models.ForeignKey(related_name=b'children_set', blank=True, to='flooding_lib.RegionSet', null=True)),
('regions', models.ManyToManyField(to='flooding_lib.Region', blank=True)),
],
options={
'db_table': 'flooding_regionset',
'verbose_name': 'Region set',
'verbose_name_plural': 'Region sets',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('resultloc', models.CharField(max_length=200)),
('deltat', models.FloatField(null=True, blank=True)),
('resultpngloc', models.CharField(max_length=200, null=True, blank=True)),
('startnr', models.IntegerField(null=True, blank=True)),
('firstnr', models.IntegerField(null=True, blank=True)),
('lastnr', models.IntegerField(null=True, blank=True)),
('unit', models.CharField(max_length=10, null=True, blank=True)),
('value', models.FloatField(null=True, blank=True)),
('bbox', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326, null=True, verbose_name=b'Result Border', blank=True)),
('animation', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='flooding_lib.Animation', null=True)),
('raster', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='flooding_lib.Raster', null=True)),
],
options={
'db_table': 'flooding_result',
'verbose_name': 'Result',
'verbose_name_plural': 'Results',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ResultType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('shortname_dutch', models.CharField(max_length=20, null=True, blank=True)),
('overlaytype', models.CharField(max_length=20, null=True, blank=True)),
('unit', models.CharField(max_length=15, null=True, blank=True)),
('color_mapping_name', models.CharField(max_length=256, null=True, blank=True)),
('content_names_re', models.CharField(max_length=256, null=True, blank=True)),
('use_to_compute_arrival_times', models.BooleanField(default=False, help_text=b'Dit is een animatie die geschikt is om er aankomsttijden mee te berekenen')),
],
options={
'db_table': 'flooding_resulttype',
'verbose_name': 'Result type',
'verbose_name_plural': 'Result types',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ResultType_PresentationType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('remarks', models.CharField(max_length=100)),
('presentationtype', models.ForeignKey(to='flooding_presentation.PresentationType')),
('resulttype', models.ForeignKey(to='flooding_lib.ResultType')),
],
options={
'db_table': 'flooding_resulttype_presentationtype',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Scenario',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, verbose_name='name')),
('remarks', models.TextField(default=None, null=True, verbose_name='remarks', blank=True)),
('tsim', models.FloatField()),
('calcpriority', models.IntegerField(default=20, choices=[(20, 'low'), (30, 'medium'), (40, 'high')])),
('status_cache', models.IntegerField(default=None, null=True, choices=[(10, 'deleted'), (20, 'approved'), (30, 'disapproved'), (40, 'calculated'), (50, 'error'), (60, 'waiting'), (70, 'none'), (80, 'archived')])),
('migrated', models.NullBooleanField()),
('code', models.CharField(max_length=15, null=True)),
('project_id', models.IntegerField(null=True)),
('has_sobek_presentation', models.NullBooleanField()),
('result_base_path', models.TextField(help_text=b'If left blank, the path is retrieved through scenario.breaches[0].region.path', null=True, blank=True)),
('config_3di', models.CharField(max_length=50, null=True, blank=True)),
('archived', models.BooleanField(default=False, verbose_name='Archived')),
('archived_at', models.DateTimeField(null=True, verbose_name='Archived at', blank=True)),
('archived_by', models.ForeignKey(related_name=b'archived_by_user', verbose_name='Archived by', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('name', 'owner'),
'db_table': 'flooding_scenario',
'verbose_name': 'Scenario',
'verbose_name_plural': 'Scenarios',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Scenario_PresentationLayer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('presentationlayer', models.ForeignKey(to='flooding_presentation.PresentationLayer')),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
],
options={
'db_table': 'flooding_scenario_presentationlayer',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ScenarioBreach',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('widthbrinit', models.FloatField()),
('methstartbreach', models.IntegerField(choices=[(1, 'at top'), (2, 'at moment x'), (3, 'at crossing level x'), (4, 'unknown/error at import')])),
('tstartbreach', models.FloatField()),
('hstartbreach', models.FloatField()),
('brdischcoef', models.FloatField()),
('brf1', models.FloatField()),
('brf2', models.FloatField()),
('bottomlevelbreach', models.FloatField()),
('initialcrest', models.FloatField(null=True, blank=True)),
('ucritical', models.FloatField()),
('pitdepth', models.FloatField()),
('tmaxdepth', models.FloatField()),
('extwmaxlevel', models.FloatField()),
('extwbaselevel', models.FloatField(default=None, null=True, blank=True)),
('extwrepeattime', models.IntegerField(default=None, null=True, blank=True)),
('tstorm', models.FloatField(default=None, null=True, blank=True)),
('tpeak', models.FloatField(default=None, null=True, blank=True)),
('tdeltaphase', models.FloatField(default=None, null=True, blank=True)),
('manualwaterlevelinput', models.BooleanField(default=False)),
('code', models.CharField(max_length=15, null=True, blank=True)),
('breach', models.ForeignKey(to='flooding_lib.Breach')),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
],
options={
'db_table': 'flooding_scenariobreach',
'verbose_name': 'Scenario breach',
'verbose_name_plural': 'Scenario breaches',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ScenarioCutoffLocation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action', models.IntegerField(default=1, null=True, blank=True)),
('tclose', models.FloatField()),
('cutofflocation', models.ForeignKey(to='flooding_lib.CutoffLocation')),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
],
options={
'db_table': 'flooding_scenariocutofflocation',
'verbose_name': 'Scenario cutoff location',
'verbose_name_plural': 'Scenario cutoff locations',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ScenarioProject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_main_project', models.BooleanField(default=False)),
('approved', models.NullBooleanField()),
('approvalobject', models.ForeignKey(default=None, blank=True, to='approvaltool.ApprovalObject', null=True)),
('project', models.ForeignKey(to='flooding_lib.Project')),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ScenarioShareOffer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('new_project', models.ForeignKey(to='flooding_lib.Project')),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
('shared_by', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SobekModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sobekmodeltype', models.IntegerField(choices=[(1, 'canal'), (2, 'inundation')])),
('active', models.BooleanField(default=True)),
('project_fileloc', models.CharField(help_text=b'In case of 3Di, point to model zipfile.', max_length=200)),
('model_case', models.IntegerField()),
('model_version', models.CharField(max_length=20)),
('model_srid', models.IntegerField()),
('model_varname', models.CharField(help_text=b'In case of 3Di, .mdu filename in zip.', max_length=40, null=True, blank=True)),
('model_vardescription', models.CharField(max_length=200, null=True, blank=True)),
('remarks', models.TextField(null=True)),
('embankment_damage_shape', models.CharField(max_length=200, null=True, blank=True)),
('code', models.CharField(max_length=15, null=True, blank=True)),
('keep_initial_level', models.BooleanField(default=False)),
],
options={
'db_table': 'flooding_sobekmodel',
'verbose_name': 'Sobek model',
'verbose_name_plural': 'Sobek models',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SobekVersion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('fileloc_startfile', models.CharField(max_length=200)),
],
options={
'db_table': 'flooding_sobekversion',
'verbose_name': 'Sobek version',
'verbose_name_plural': 'Sobek versions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Strategy',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('visible_for_loading', models.BooleanField(default=False)),
('save_date', models.DateTimeField(null=True, blank=True)),
('region', models.ForeignKey(blank=True, to='flooding_lib.Region', null=True)),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'flooding_strategy',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('remarks', models.TextField(blank=True)),
('creatorlog', models.CharField(max_length=40)),
('tstart', models.DateTimeField()),
('tfinished', models.DateTimeField(null=True, blank=True)),
('errorlog', models.TextField(null=True, blank=True)),
('successful', models.NullBooleanField()),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
],
options={
'get_latest_by': 'tstart',
'verbose_name': 'Task',
'verbose_name_plural': 'Tasks',
'db_table': 'flooding_task',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TaskExecutor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('ipaddress', models.IPAddressField()),
('port', models.IntegerField()),
('active', models.BooleanField(default=True)),
('revision', models.CharField(max_length=20)),
('seq', models.IntegerField(default=1)),
],
options={
'db_table': 'flooding_taskexecutor',
'verbose_name': 'Task executor',
'verbose_name_plural': 'Task executors',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TaskType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
],
options={
'db_table': 'flooding_tasktype',
'verbose_name': 'Task type',
'verbose_name_plural': 'Task types',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ThreediCalculation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.IntegerField(default=1, choices=[(1, b'created'), (2, b'netcdf created'), (3, b'images created, finished.')])),
('scenario', models.ForeignKey(to='flooding_lib.Scenario')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ThreediModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=80)),
('scenario_zip_filename', models.TextField(help_text=b'full path start with / or folder from Settings.SOURCE_DIR, must contain mdu file')),
('mdu_filename', models.TextField(help_text=b'base filename of mdu file')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserPermission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('permission', models.IntegerField(choices=[(1, 'view_scenario'), (2, 'add_scenario_new_simulation'), (7, 'add_scenario_import'), (3, 'edit_scenario'), (4, 'approve_scenario'), (5, 'delete_scenario'), (6, 'edit_scenario_simple')])),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'flooding_userpermission',
'verbose_name': 'User permission',
'verbose_name_plural': 'User permissions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Waterlevel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time', models.FloatField()),
('value', models.FloatField()),
],
options={
'db_table': 'flooding_waterlevel',
'verbose_name': 'Waterlevel',
'verbose_name_plural': 'Waterlevels',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WaterlevelSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('type', models.IntegerField(choices=[(1, 'undefined'), (2, 'tide'), (3, 'breach')])),
('remarks', models.TextField(null=True, blank=True)),
('code', models.CharField(max_length=20, null=True)),
],
options={
'db_table': 'flooding_waterlevelset',
'verbose_name': 'Waterlevel set',
'verbose_name_plural': 'Waterlevel sets',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='waterlevel',
name='waterlevelset',
field=models.ForeignKey(to='flooding_lib.WaterlevelSet'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='waterlevel',
unique_together=set([('waterlevelset', 'time')]),
),
migrations.AlterUniqueTogether(
name='userpermission',
unique_together=set([('user', 'permission')]),
),
migrations.AddField(
model_name='threedicalculation',
name='threedi_model',
field=models.ForeignKey(to='flooding_lib.ThreediModel'),
preserve_default=True,
),
migrations.AddField(
model_name='taskexecutor',
name='tasktypes',
field=models.ManyToManyField(to='flooding_lib.TaskType', null=True, blank=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='taskexecutor',
unique_together=set([('ipaddress', 'port'), ('name', 'seq')]),
),
migrations.AddField(
model_name='task',
name='tasktype',
field=models.ForeignKey(to='flooding_lib.TaskType'),
preserve_default=True,
),
migrations.AddField(
model_name='sobekmodel',
name='sobekversion',
field=models.ForeignKey(to='flooding_lib.SobekVersion'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='scenarioshareoffer',
unique_together=set([('scenario', 'new_project')]),
),
migrations.AlterUniqueTogether(
name='scenariocutofflocation',
unique_together=set([('scenario', 'cutofflocation')]),
),
migrations.AddField(
model_name='scenariobreach',
name='sobekmodel_externalwater',
field=models.ForeignKey(blank=True, to='flooding_lib.SobekModel', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='scenariobreach',
name='tide',
field=models.ForeignKey(related_name=b'tide', default=None, blank=True, to='flooding_lib.WaterlevelSet', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='scenariobreach',
name='waterlevelset',
field=models.ForeignKey(to='flooding_lib.WaterlevelSet'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='scenariobreach',
unique_together=set([('scenario', 'breach')]),
),
migrations.AddField(
model_name='scenario',
name='breaches',
field=models.ManyToManyField(to='flooding_lib.Breach', through='flooding_lib.ScenarioBreach'),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='cutofflocations',
field=models.ManyToManyField(to='flooding_lib.CutoffLocation', through='flooding_lib.ScenarioCutoffLocation', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='presentationlayer',
field=models.ManyToManyField(to='flooding_presentation.PresentationLayer', through='flooding_lib.Scenario_PresentationLayer'),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='projects',
field=models.ManyToManyField(related_name=b'scenarios', through='flooding_lib.ScenarioProject', to='flooding_lib.Project'),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='ror_province',
field=models.ForeignKey(blank=True, to='sharedproject.Province', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='sobekmodel_inundation',
field=models.ForeignKey(to='flooding_lib.SobekModel', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='strategy',
field=models.ForeignKey(default=None, blank=True, to='flooding_lib.Strategy', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='scenario',
name='workflow_template',
field=models.ForeignKey(db_column=b'workflow_template', to='lizard_worker.WorkflowTemplate', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='resulttype',
name='presentationtype',
field=models.ManyToManyField(to='flooding_presentation.PresentationType', through='flooding_lib.ResultType_PresentationType'),
preserve_default=True,
),
migrations.AddField(
model_name='resulttype',
name='program',
field=models.ForeignKey(to='flooding_lib.Program'),
preserve_default=True,
),
migrations.AddField(
model_name='result',
name='resulttype',
field=models.ForeignKey(to='flooding_lib.ResultType'),
preserve_default=True,
),
migrations.AddField(
model_name='result',
name='scenario',
field=models.ForeignKey(to='flooding_lib.Scenario'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='result',
unique_together=set([('scenario', 'resulttype')]),
),
migrations.AddField(
model_name='region',
name='sobekmodels',
field=models.ManyToManyField(to='flooding_lib.SobekModel', blank=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='projectgrouppermission',
unique_together=set([('group', 'project', 'permission')]),
),
migrations.AddField(
model_name='project',
name='regions',
field=models.ManyToManyField(to='flooding_lib.Region', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='project',
name='regionsets',
field=models.ManyToManyField(to='flooding_lib.RegionSet', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='measure',
name='strategy',
field=models.ManyToManyField(to='flooding_lib.Strategy'),
preserve_default=True,
),
migrations.AddField(
model_name='extrascenarioinfo',
name='scenario',
field=models.ForeignKey(to='flooding_lib.Scenario'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='extrascenarioinfo',
unique_together=set([('extrainfofield', 'scenario')]),
),
migrations.AddField(
model_name='externalwater',
name='sobekmodels',
field=models.ManyToManyField(to='flooding_lib.SobekModel', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='embankmentunit',
name='measure',
field=models.ManyToManyField(to='flooding_lib.Measure'),
preserve_default=True,
),
migrations.AddField(
model_name='embankmentunit',
name='region',
field=models.ForeignKey(to='flooding_lib.Region'),
preserve_default=True,
),
migrations.AddField(
model_name='cutofflocationsobekmodelsetting',
name='sobekmodel',
field=models.ForeignKey(to='flooding_lib.SobekModel'),
preserve_default=True,
),
migrations.AddField(
model_name='cutofflocation',
name='sobekmodels',
field=models.ManyToManyField(to='flooding_lib.SobekModel', through='flooding_lib.CutoffLocationSobekModelSetting'),
preserve_default=True,
),
migrations.AddField(
model_name='breachsobekmodel',
name='sobekmodel',
field=models.ForeignKey(to='flooding_lib.SobekModel'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='breachsobekmodel',
unique_together=set([('sobekmodel', 'breach')]),
),
migrations.AddField(
model_name='breach',
name='defaulttide',
field=models.ForeignKey(to='flooding_lib.WaterlevelSet'),
preserve_default=True,
),
migrations.AddField(
model_name='breach',
name='dike',
field=models.ForeignKey(to='flooding_lib.Dike'),
preserve_default=True,
),
migrations.AddField(
model_name='breach',
name='externalwater',
field=models.ForeignKey(to='flooding_lib.ExternalWater'),
preserve_default=True,
),
migrations.AddField(
model_name='breach',
name='region',
field=models.ForeignKey(to='flooding_lib.Region'),
preserve_default=True,
),
migrations.AddField(
model_name='breach',
name='sobekmodels',
field=models.ManyToManyField(to='flooding_lib.SobekModel', through='flooding_lib.BreachSobekModel'),
preserve_default=True,
),
]
| gpl-3.0 |
hrjn/scikit-learn | examples/cluster/plot_face_ward_segmentation.py | 71 | 2460 | """
=========================================================================
A demo of structured Ward hierarchical clustering on a raccoon face image
=========================================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
# Generate data
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
X = np.reshape(face, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*face.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward',
connectivity=connectivity)
ward.fit(X)
label = np.reshape(ward.labels_, face.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
CChengz/dot.r | workspace/fits/.metadata/.plugins/org.eclipse.wst.server.core/tmp0/wtpwebapps/amil/WEB-INF/files/map_outputs/20150209000301oYqb/map_gen.py | 11 | 6786 | #!/usr/bin/env python
"""
Created on Feb 4, 2015
@author: Cheng Zeng, University of Aberdeen
"""
import os.path
import matplotlib as mpl
mpl.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
from matplotlib.colors import from_levels_and_colors
import mplleaflet
def save(fig=None, path='_map.html', template='base.html' ,**kwargs):
fullpath = os.path.abspath(path)
with open(fullpath, 'w') as f:
mplleaflet.save_html(fig, fileobj=f, template =template, **kwargs)
file_path = os.path.dirname(os.path.realpath(__file__))
data_x = np.loadtxt(file_path+'/data/StartLng.txt')
data_y = np.loadtxt(file_path+'/data/StartLat.txt')
data_z = np.loadtxt(file_path+'/data/PT time.txt')
numcols, numrows = 100, 100
xi = np.linspace(data_x.min(), data_x.max(), numcols)
yi = np.linspace(data_y.min(), data_y.max(), numrows)
xi, yi = np.meshgrid(xi, yi)
#-- Interpolating at the points in xi, yi
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
bands_time = [0, 5, 10, 15, 20, 25, 30, 40, 50, 60, 75 , 90, 105, 120 , 150, 1000]
bands_cost = [0, 0.5, 1, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, 6.0, 7.5, 9.0, 10.5, 12.0, 15.0, 100.0]
bands_gen_cost = [0, 1.5, 3, 4.5, 6.0, 7.5, 9.0, 12.0, 15.0, 18.0, 22.5, 27.0, 31.5, 36.0, 45.0, 100.0] #
my_rgbs =['#800026', '#800026', '#bd0026', '#e31a1c', '#fc4e2a', '#fd8d3c', '#feb24c', '#fed976',
'#ffeda0', '#ffffcc', '#d0d1e6', '#a6bddb', '#74a9cf', '#3690c0', '#0570b0', '#034e7b']
##################################
# generate PT time contour map ###
##################################
cmap_time, norm_time = from_levels_and_colors(bands_time, my_rgbs, extend='min')
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_time, cmap = cmap_time, norm=norm_time)
# set the path to generate PT time map
mapfile = file_path + '/map/PT_time.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_time.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
##################################
# generate PT cost contour map ###
##################################
data_z = np.loadtxt(file_path+'/data/PT cost.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
cmap_cost, norm_cost = from_levels_and_colors(bands_cost, my_rgbs, extend='min')
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_cost, cmap = cmap_cost, norm=norm_cost)
# set the path to generate PT time map
mapfile = file_path + '/map/PT_cost.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_cost.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
######################################
# generate PT gen_cost contour map ###
######################################
data_z = np.loadtxt(file_path+'/data/PT gen_cost.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
cmap_gen_cost, norm_gen_cost = from_levels_and_colors(bands_gen_cost, my_rgbs, extend='min')
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_gen_cost, cmap = cmap_gen_cost, norm=norm_gen_cost)
# set the path to generate PT time map
mapfile = file_path + '/map/PT_gen_cost.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_gen_cost.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
####################################
# generate Car time contour map ###
####################################
data_z = np.loadtxt(file_path+'/data/Car time.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_time, cmap = cmap_time, norm=norm_time)
# set the path to generate PT time map
mapfile = file_path + '/map/Car_time.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_time.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
####################################
# generate Car cost contour map ###
####################################
data_z = np.loadtxt(file_path+'/data/Car cost.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_cost, cmap = cmap_cost, norm=norm_cost)
# set the path to generate PT time map
mapfile = file_path + '/map/Car_cost.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_cost.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
#######################################
# generate Car gen_cost contour map ###
#######################################
data_z = np.loadtxt(file_path+'/data/Car gen_cost.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_gen_cost, cmap = cmap_gen_cost, norm=norm_gen_cost)
# set the path to generate PT time map
mapfile = file_path + '/map/Car_gen_cost.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_gen_cost.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
#######################################
# generate Carpool time contour map ###
#######################################
data_z = np.loadtxt(file_path+'/data/Carpool time.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_time, cmap = cmap_time, norm=norm_time)
# set the path to generate PT time map
mapfile = file_path + '/map/Carpool_time.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_time.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
#######################################
# generate Carpool cost contour map ###
#######################################
data_z = np.loadtxt(file_path+'/data/Carpool cost.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_cost, cmap = cmap_cost, norm=norm_cost)
# set the path to generate PT time map
mapfile = file_path + '/map/Carpool_cost.html'
save(fig = fig, path=mapfile, template = 'base_cost.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
#######################################
# generate Carpool gen_cost contour map ###
#######################################
data_z = np.loadtxt(file_path+'/data/Carpool gen_cost.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_gen_cost, cmap = cmap_gen_cost, norm=norm_gen_cost)
# set the path to generate PT time map
mapfile = file_path + '/map/Carpool_gen_cost.html'
save(fig = fig, path=mapfile, template = 'base_gen_cost.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
| apache-2.0 |
dhomeier/astropy | astropy/table/__init__.py | 8 | 3387 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import config as _config
from .column import Column, MaskedColumn, StringTruncateWarning, ColumnInfo
__all__ = ['BST', 'Column', 'ColumnGroups', 'ColumnInfo', 'Conf',
'JSViewer', 'MaskedColumn', 'NdarrayMixin', 'QTable', 'Row',
'SCEngine', 'SerializedColumn', 'SortedArray', 'StringTruncateWarning',
'Table', 'TableAttribute', 'TableColumns', 'TableFormatter',
'TableGroups', 'TableMergeError', 'TableReplaceWarning', 'conf',
'connect', 'hstack', 'join', 'registry', 'represent_mixins_as_columns',
'setdiff', 'unique', 'vstack', 'dstack', 'conf', 'join_skycoord',
'join_distance', 'PprintIncludeExclude']
class Conf(_config.ConfigNamespace): # noqa
"""
Configuration parameters for `astropy.table`.
"""
auto_colname = _config.ConfigItem(
'col{0}',
'The template that determines the name of a column if it cannot be '
'determined. Uses new-style (format method) string formatting.',
aliases=['astropy.table.column.auto_colname'])
default_notebook_table_class = _config.ConfigItem(
'table-striped table-bordered table-condensed',
'The table class to be used in Jupyter notebooks when displaying '
'tables (and not overridden). See <https://getbootstrap.com/css/#tables '
'for a list of useful bootstrap classes.')
replace_warnings = _config.ConfigItem(
[],
'List of conditions for issuing a warning when replacing a table '
"column using setitem, e.g. t['a'] = value. Allowed options are "
"'always', 'slice', 'refcount', 'attributes'.",
'list')
replace_inplace = _config.ConfigItem(
False,
'Always use in-place update of a table column when using setitem, '
"e.g. t['a'] = value. This overrides the default behavior of "
"replacing the column entirely with the new value when possible. "
"This configuration option will be deprecated and then removed in "
"subsequent major releases.")
conf = Conf() # noqa
from . import connect # noqa: E402
from .groups import TableGroups, ColumnGroups # noqa: E402
from .table import (Table, QTable, TableColumns, Row, TableFormatter,
NdarrayMixin, TableReplaceWarning, TableAttribute,
PprintIncludeExclude) # noqa: E402
from .operations import (join, setdiff, hstack, dstack, vstack, unique, # noqa: E402
TableMergeError, join_skycoord, join_distance) # noqa: E402
from .bst import BST # noqa: E402
from .sorted_array import SortedArray # noqa: E402
from .soco import SCEngine # noqa: E402
from .serialize import SerializedColumn, represent_mixins_as_columns # noqa: E402
# Finally import the formats for the read and write method but delay building
# the documentation until all are loaded. (#5275)
from astropy.io import registry # noqa: E402
with registry.delay_doc_updates(Table):
# Import routines that connect readers/writers to astropy.table
from .jsviewer import JSViewer
import astropy.io.ascii.connect
import astropy.io.fits.connect
import astropy.io.misc.connect
import astropy.io.votable.connect
import astropy.io.misc.asdf.connect
import astropy.io.misc.pandas.connect # noqa: F401
| bsd-3-clause |
ttm/mass | src/aux/espectros_oboe.py | 1 | 2771 | #-*- coding: utf-8 -*-
# http://matplotlib.sourceforge.net/examples/api/legend_demo.html
#
import pylab as p, numpy as n
from scipy.io import wavfile as w
p.figure(figsize=(10.,5.))
p.subplots_adjust(left=0.14,bottom=0.17,right=0.99,top=0.96, hspace=0.4)
# uma nota inteira de oboe; 150529 amostras
# oboe=a.wavread("22686__acclivity__oboe-a-440.wav")[0]
oboe = w.read("../../scripts/22686__acclivity__oboe-a-440.wav")[1].astype("float64")
oboe=oboe[:,0]+oboe[:,1] # estereo para mono
oboe=((oboe-oboe.min())/(oboe.max()-oboe.min()))*2-1 # normalizando
# um periodo da nota (recortado no audacity buscando zeros)
# nao se repete o zero, por isso o [:-1], resulta 98 amostras
# poboe=a.wavread("22686__acclivity__oboe-a-440_periodo.wav")[0][:-1]
poboe = w.read("../../scripts/22686__acclivity__oboe-a-440_periodo.wav")[1].astype("float64")
poboe=((poboe-poboe.min())/(poboe.max()-poboe.min()))*2-1 # normalizando
# fazendo um sinal do mesmo tamanho:
poboe2=n.array(list(poboe)*1510) # 147980
#poboe2=n.array(list(poboe)*1536) # len(oboe)/len(poboe) nao bate tanto pois pegamos uma frequencia relativamente baixa == periodo grande
#poboe2=poboe[n.arange(oboe.shape[0])%poboe.shape[0]] # Descobrir pq nao funciona
o_s=n.fft.fft(oboe)
o_a=n.abs(o_s)
p_s=n.fft.fft(poboe2)
p_a=n.abs(p_s)
i=n.arange(poboe2.shape[0])
foo=(p_a>50).nonzero()
p.plot(i[foo],p_a[foo]*.35*1.3,"o",label=u"sampled period")
ii=list(i[foo])
i=n.arange(oboe.shape[0])
p.plot(i,o_a*1.3,label=u"full note")
p.legend(loc="upper right",prop={'size':22})
p.yticks((0,11000),(0,'11k'), fontsize="26")
ticks=[r"$f%i$" % (i,) for i in range(len(ii))]
p.xticks([0] + ii[1:][:14],[0] + ticks[:14] , fontsize="22")
p.xlim(0,22000,)
p.ylim(-300,11000)
# p.ylim(-300,1100000000*.5)
p.ylabel(r'$\sqrt{a^2+b^2}$ $\rightarrow$', fontsize="24")
p.xlabel(u'frequency' + r'$\rightarrow$', fontsize="24")
p.savefig("../figures/oboeNaturalSampledSpectrum_.png")
p.show()
#s_=senoide[indices%T]
#s_s=n.fft.fft(s_)
#s_a=n.abs(s_s)
#d_=dente[indices%T]
#d_s=n.fft.fft(d_)
#d_a=n.abs(d_s)
#t_=triangular[indices%T]
#t_s=n.fft.fft(t_)
#t_a=n.abs(t_s)
#q_=quadrada[indices%T]
#q_s=n.fft.fft(q_)
#q_a=n.abs(q_s)
#i=indices
#foo=(s_a>50).nonzero()
#p.plot(i[foo],s_a[foo],"o", label=u"senóide")
#foo=(d_a>50).nonzero()
#p.plot(i[foo],d_a[foo],"*", label=r"dente de serra")
#foo=(t_a>50).nonzero()
#p.plot(i[foo],t_a[foo],"^", label=r"triangular")
##p.plot(i[foo],t_a[foo],"^", label=r"triangular")
#foo=(q_a>50).nonzero()
#p.plot(i[foo],q_a[foo],"s", label="quadrada")
#p.xlim(0,T2*.51)
#p.ylim(-300,20000)
#p.yticks((0,20000),(0,"20k"))
#p.xticks((0,15000),(0,"15k"))
#p.legend(loc="upper right")
#p.ylabel(r'valor absoluto $\rightarrow$')
#p.xlabel(r'componente do espectro $\rightarrow$')
#p.show()
| gpl-3.0 |
durox/dotfiles | link/.pypackages/myutils/style/core.py | 1 | 4998 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
"""
Core functions and attributes for the matplotlib style library:
``use``
Select style sheet to override the current matplotlib settings.
``context``
Context manager to use a style sheet temporarily.
``available``
List available style sheets.
``library``
A dictionary of style names and matplotlib settings.
"""
import os
import re
import contextlib
import matplotlib as mpl
from matplotlib import cbook
from matplotlib import rc_params_from_file
__all__ = ['use', 'context', 'available', 'library', 'reload_library']
_here = os.path.abspath(os.path.dirname(__file__))
BASE_LIBRARY_PATH = os.path.join(_here, 'stylelib')
# Users may want multiple library paths, so store a list of paths.
USER_LIBRARY_PATHS = [os.path.join(mpl._get_configdir(), 'stylelib')]
STYLE_EXTENSION = 'mplstyle'
STYLE_FILE_PATTERN = re.compile('([\S]+).%s$' % STYLE_EXTENSION)
def is_style_file(filename):
"""Return True if the filename looks like a style file."""
return STYLE_FILE_PATTERN.match(filename) is not None
def use(name):
"""Use matplotlib style settings from a known style sheet or from a file.
Parameters
----------
name : str or list of str
Name of style or path/URL to a style file. For a list of available
style names, see `style.available`. If given a list, each style is
applied from first to last in the list.
"""
if cbook.is_string_like(name):
name = [name]
for style in name:
if style in library:
mpl.rcParams.update(library[style])
else:
try:
rc = rc_params_from_file(style)#, use_default_template=False)
mpl.rcParams.update(rc)
except:
msg = ("'%s' not found in the style library and input is "
"not a valid URL or path. See `style.available` for "
"list of available styles.")
raise ValueError(msg % style)
@contextlib.contextmanager
def context(name, after_reset=False):
"""Context manager for using style settings temporarily.
Parameters
----------
name : str or list of str
Name of style or path/URL to a style file. For a list of available
style names, see `style.available`. If given a list, each style is
applied from first to last in the list.
after_reset : bool
If True, apply style after resetting settings to their defaults;
otherwise, apply style on top of the current settings.
"""
initial_settings = mpl.rcParams.copy()
if after_reset:
mpl.rcdefaults()
use(name)
yield
mpl.rcParams.update(initial_settings)
def load_base_library():
"""Load style library defined in this package."""
library = dict()
library.update(read_style_directory(BASE_LIBRARY_PATH))
return library
def iter_user_libraries():
for stylelib_path in USER_LIBRARY_PATHS:
stylelib_path = os.path.expanduser(stylelib_path)
if os.path.exists(stylelib_path) and os.path.isdir(stylelib_path):
yield stylelib_path
def update_user_library(library):
"""Update style library with user-defined rc files"""
for stylelib_path in iter_user_libraries():
styles = read_style_directory(stylelib_path)
update_nested_dict(library, styles)
return library
def iter_style_files(style_dir):
"""Yield file path and name of styles in the given directory."""
for path in os.listdir(style_dir):
filename = os.path.basename(path)
if is_style_file(filename):
match = STYLE_FILE_PATTERN.match(filename)
path = os.path.abspath(os.path.join(style_dir, path))
yield path, match.groups()[0]
def read_style_directory(style_dir):
"""Return dictionary of styles defined in `style_dir`."""
styles = dict()
for path, name in iter_style_files(style_dir):
styles[name] = rc_params_from_file(path)#, use_default_template=False)
return styles
def update_nested_dict(main_dict, new_dict):
"""Update nested dict (only level of nesting) with new values.
Unlike dict.update, this assumes that the values of the parent dict are
dicts (or dict-like), so you shouldn't replace the nested dict if it
already exists. Instead you should update the sub-dict.
"""
# update named styles specified by user
for name, rc_dict in six.iteritems(new_dict):
if name in main_dict:
main_dict[name].update(rc_dict)
else:
main_dict[name] = rc_dict
return main_dict
# Load style library
# ==================
_base_library = load_base_library()
library = None
available = []
def reload_library():
"""Reload style library."""
global library, available
library = update_user_library(_base_library)
available[:] = library.keys()
reload_library()
| mit |
dhalleine/tensorflow | tensorflow/contrib/learn/python/learn/estimators/rnn.py | 1 | 10124 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator
def null_input_op_fn(x):
"""This function does no transformation on the inputs, used as default."""
return x
class TensorFlowRNNClassifier(TensorFlowEstimator, _sklearn.ClassifierMixin):
"""TensorFlow RNN Classifier model."""
def __init__(self,
rnn_size,
n_classes,
cell_type='gru',
num_layers=1,
input_op_fn=null_input_op_fn,
initial_state=None,
bidirectional=False,
sequence_length=None,
attn_length=None,
attn_size=None,
attn_vec_size=None,
batch_size=32,
steps=50,
optimizer='Adagrad',
learning_rate=0.1,
class_weight=None,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1):
"""Initializes a TensorFlowRNNClassifier instance.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument x for input and returns transformed x.
bidirectional: boolean, Whether this is a bidirectional rnn.
sequence_length: If sequence_length is provided, dynamic calculation
is performed. This saves computational time when unrolling past max
sequence length.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
attn_length: integer, the size of attention vector attached to rnn cells.
attn_size: integer, the size of an attention window attached to rnn cells.
attn_vec_size: integer, the number of convolutional features calculated on
attention state and the size of the hidden layer built from base cell state.
n_classes: Number of classes in the target.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is
used. Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are
supposed to have weight one.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc.
"""
self.rnn_size = rnn_size
self.cell_type = cell_type
self.input_op_fn = input_op_fn
self.bidirectional = bidirectional
self.num_layers = num_layers
self.sequence_length = sequence_length
self.initial_state = initial_state
self.attn_length = attn_length
self.attn_size = attn_size
self.attn_vec_size = attn_vec_size
super(TensorFlowRNNClassifier, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
class_weight=class_weight,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, x, y):
return models.get_rnn_model(self.rnn_size, self.cell_type, self.num_layers,
self.input_op_fn, self.bidirectional,
models.logistic_regression,
self.sequence_length, self.initial_state,
self.attn_length, self.attn_size,
self.attn_vec_size)(x, y)
@property
def bias_(self):
"""Returns bias of the rnn layer."""
return self.get_variable_value('logistic_regression/bias')
@property
def weights_(self):
"""Returns weights of the rnn layer."""
return self.get_variable_value('logistic_regression/weights')
class TensorFlowRNNRegressor(TensorFlowEstimator, _sklearn.RegressorMixin):
"""TensorFlow RNN Regressor model."""
def __init__(self,
rnn_size,
cell_type='gru',
num_layers=1,
input_op_fn=null_input_op_fn,
initial_state=None,
bidirectional=False,
sequence_length=None,
attn_length=None,
attn_size=None,
attn_vec_size=None,
n_classes=0,
batch_size=32,
steps=50,
optimizer='Adagrad',
learning_rate=0.1,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1):
"""Initializes a TensorFlowRNNRegressor instance.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument x for input and returns transformed x.
bidirectional: boolean, Whether this is a bidirectional rnn.
sequence_length: If sequence_length is provided, dynamic calculation
is performed. This saves computational time when unrolling past max
sequence length.
attn_length: integer, the size of attention vector attached to rnn cells.
attn_size: integer, the size of an attention window attached to rnn cells.
attn_vec_size: integer, the number of convolutional features calculated on
attention state and the size of the hidden layer built from base cell state.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is
used. Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the
session, e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
0: the algorithm and debug information is muted.
1: trainer prints the progress.
2: log device placement is printed.
"""
self.rnn_size = rnn_size
self.cell_type = cell_type
self.input_op_fn = input_op_fn
self.bidirectional = bidirectional
self.num_layers = num_layers
self.sequence_length = sequence_length
self.initial_state = initial_state
self.attn_length = attn_length
self.attn_size = attn_size
self.attn_vec_size = attn_vec_size
super(TensorFlowRNNRegressor, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, x, y):
return models.get_rnn_model(self.rnn_size, self.cell_type, self.num_layers,
self.input_op_fn, self.bidirectional,
models.linear_regression, self.sequence_length,
self.initial_state, self.attn_length,
self.attn_size, self.attn_vec_size)(x, y)
@property
def bias_(self):
"""Returns bias of the rnn layer."""
return self.get_variable_value('linear_regression/bias')
@property
def weights_(self):
"""Returns weights of the rnn layer."""
return self.get_variable_value('linear_regression/weights')
| apache-2.0 |
IQSS/miniverse | dv_apps/metrics/stats_util_datasets_bins.py | 1 | 6779 | """
Counts of files per Dataset using the latest DatasetVersion.
Answers the question: How many datasets have "x" number of files?
For example, if the "bin_size" is set to 20, results will show
the number of datasets with 0 to 19 files, the number of datasets
with 20 to 29 files, etc.
"""
import pandas as pd
import json
from collections import OrderedDict
from django.db.models import F
from django.db import models
from dv_apps.datasets.models import Dataset, DatasetVersion
from dv_apps.datafiles.models import FileMetadata
from dv_apps.utils.msg_util import msgt, msg
from dv_apps.metrics.stats_util_base import StatsMakerBase
from dv_apps.metrics.stats_result import StatsResult
class StatsMakerDatasetBins(StatsMakerBase):
"""Answers the question: How many datasets have "x" number of files?"""
def __init__(self, **kwargs):
"""Process kwargs via StatsMakerBase"""
super(StatsMakerDatasetBins, self).__init__(**kwargs)
def get_bin_list(self, step=10, low_num=0, high_num=100):
assert high_num > low_num, "high_num must be greater than low_num"
assert low_num >= 0, "low_num must be at least 0. Cannot be negative"
assert step > 0, "step must greater than 0"
assert high_num > step, "step must lower than high_num"
l = []
next_num = low_num
while next_num <= high_num:
l.append(next_num)
next_num += step
return l
def get_dataset_version_ids(self, **extra_filters):
"""For the binning, we only want the latest dataset versions"""
filter_params = dict()
# Add extra filters from kwargs, e.g. published
#
if extra_filters:
for k, v in extra_filters.items():
filter_params[k] = v
dataset_id_filter = {} # no filter unless published/unpublished
if len(filter_params) > 0:
# -----------------------------
# Retrieve Dataset ids published/unpublished
# -----------------------------
dataset_ids = Dataset.objects.select_related('dvobject'\
).filter(**filter_params\
).values_list('dvobject__id', flat=True)
# ok, reduce by ids...
dataset_id_filter = dict(dataset__in=dataset_ids)
# -----------------------------
# Get latest DatasetVersion ids
# -----------------------------
id_info_list = DatasetVersion.objects.filter(**dataset_id_filter\
).values('id', 'dataset_id', 'versionnumber', 'minorversionnumber'\
).order_by('dataset_id', '-id', '-versionnumber', '-minorversionnumber')
# -----------------------------
# Iterate through and get the DatasetVersion id
# of the latest version
# -----------------------------
latest_dsv_ids = []
last_dataset_id = None
for idx, info in enumerate(id_info_list):
if idx == 0 or info['dataset_id'] != last_dataset_id:
latest_dsv_ids.append(info['id'])
last_dataset_id = info['dataset_id']
return latest_dsv_ids
def get_file_counts_per_dataset_latest_versions_published(self):
return self.get_file_counts_per_dataset_latest_versions(\
**self.get_is_published_filter_param())
def get_file_counts_per_dataset_latest_versions_unpublished(self):
return self.get_file_counts_per_dataset_latest_versions(\
**self.get_is_NOT_published_filter_param())
def get_file_counts_per_dataset_latest_versions(self, **extra_filters):
"""
Get binning stats for the number of files in each Dataset.
For the counts, only use the LATEST DatasetVersion
"""
# Get the correct DatasetVersion ids as a filter parameter
#
latest_dsv_ids = self.get_dataset_version_ids(**extra_filters)
filter_params = dict(datasetversion__id__in=latest_dsv_ids)
# Make query
#
ds_version_counts = FileMetadata.objects.filter(**filter_params\
).annotate(dsv_id=F('datasetversion__id'),\
).values('dsv_id',\
).annotate(cnt=models.Count('datafile__id')\
).values('dsv_id', 'cnt'\
).order_by('-cnt')
# Convert to Dataframe
#
df = pd.DataFrame(list(ds_version_counts), columns = ['dsv_id', 'cnt'])
# Get the list of bins
#
high_num = high_num=df['cnt'].max() + self.bin_size
bins = self.get_bin_list(step=self.bin_size, low_num=0, high_num=high_num+self.bin_size)
# Add a new column, assigning each file count to a bin
#
df['bin_label'] = pd.cut(df['cnt'], bins)
# Count the occurrence of each bin
#
bin_count_series = pd.value_counts(df['bin_label'])
# Make the Series into a new DataFrame
#
df_bins = pd.DataFrame(dict(bin=bin_count_series.index,\
count=bin_count_series.values))
# Add a sort key
# (0, 20] -> 0
# (20, 30] -> 20
# etc
df_bins['sort_key'] = df_bins['bin'].apply(lambda x: int(x[1:-1].split(',')[0]))
df_bins['bin_start_inclusive'] = df_bins['sort_key']
df_bins['bin_end'] = df_bins['bin'].apply(lambda x: int(x[1:-1].split(',')[1]))
# Add a formatted string
# (0, 20] -> 0 to 20
# (20, 30] -> 20 to 30
# etc
df_bins['bin_str'] = df_bins['bin'].apply(lambda x: x[1:-1].replace(', ', ' to '))
# Sort the bins
#
df_bins = df_bins.sort('sort_key')
#msgt(df_bins)
# If appropriate, skip empty bins, e.g. remove 0 counts
#
if self.skip_empty_bins:
df_bins = df_bins.query('count != 0')
#msg(df_bins)
# Return as python dict
# # bit expensive but want orderedDict
formatted_records_json = df_bins.to_json(orient='records')
formatted_records = json.loads(formatted_records_json, object_pairs_hook=OrderedDict)
data_dict = OrderedDict()
data_dict['record_count'] = len(formatted_records)
data_dict['records'] = formatted_records
return StatsResult.build_success_result(data_dict)
"""
# bins changing as more files added
bins = self.get_bin_list(step=20, low_num=0, high_num=199)
bins += self.get_bin_list(step=100, low_num=200, high_num=999)
bins += self.get_bin_list(step=1000, low_num=1000, high_num=df['cnt'].max()+1000)
#bins = self.get_bin_list(step=step_num, low_num=0, high_num=df['cnt'].max()+step_num)
"""
| mit |
2baOrNot2ba/dreamBeam | dreambeam/rime/jones.py | 1 | 17908 | """
This module provides a Jones matrix framework for radio interferometric
measurement equations.
"""
import copy
import numpy.ma as ma
import numpy as np
import matplotlib.pyplot as plt
from casacore.measures import measures
from casacore.quanta import quantity
from .conversion_utils import sph2crt, crt2sph, convertBasis, \
getSph2CartTransf, getSph2CartTransfArr, \
IAU_pol_basis, shiftmat2back, IAUtoC09, \
sphmeshgrid, dc_hrz2vrt
class Jones(object):
"""This is the base class for Jones algebra. It contains the Jones matrix
itself and a basis w.r.t. which the Jones matrix is given.
The basis is such that:
::
self.jonesbasis = array([[r_hat], [phi_hat], [theta_hat]]).
"""
_ecef_frame = 'ITRF'
_eci_frame = 'J2000'
_topo_frame = 'STN'
def __init__(self):
pass
def op(self, jonesobjright):
"""Operate this Jones on to the Jones passed in the argument."""
self.jonesr = jonesobjright.getValue()
self.jonesrbasis_from = jonesobjright.get_basis()
self.refframe_r = jonesobjright.get_refframe()
self.iaucmp = jonesobjright.iaucmp
self.computeJonesRes()
return self
def getValue(self):
"""Return value of the Jones matrix"""
return self.jones
def get_basis(self):
"""Return basis of the Jones matrix"""
return self.jonesbasis
def get_refframe(self):
"""Return the reference frame of the Jones matrix."""
return self.refframe
def computeJonesRes(self):
pass
def sph2lud3_basis(self, jonesbasis_sph, alignment=None):
"""Convert sph basis to Ludwig3 frame with an optional rotation
alignment."""
# The jonesbasis for the antennas is taken to be the Ludwig3 def.
# with r,u,v basis expressed wrt the station frame
r_refframe = jonesbasis_sph[..., 0]
if alignment is not None:
r = np.tensordot(r_refframe, alignment, axes=([-1, 1]))
else:
r = r_refframe
(az, el) = crt2sph(r.T)
lugwig3rot = np.zeros((3, 3, len(az)))
lugwig3rot[0, 0, :] = 1.
lugwig3rot[1:, 1:, :] = np.array([[np.cos(az), np.sin(az)],
[-np.sin(az), np.cos(az)]])
lugwig3rot = np.moveaxis(lugwig3rot, -1, 0)
jonesbasis_lud3 = np.matmul(jonesbasis_sph, lugwig3rot)
# ang_u = np.rad2deg(
# np.arctan2(jonesbasis_lud3[:,1,1], jonesbasis_lud3[:,0,1]))
# print ang_u
return jonesbasis_lud3
def convert2iaucmp(self):
if not self.iaucmp:
self.jones = np.matmul(self.jones, IAUtoC09[1:, 1:])
self.iaucmp = True
class JonesChain(object):
jonesproducts = []
def __init__(self):
self.jonesproducts = []
class PJones(Jones):
"""This is a P-Jones or parallactic Jones. This has a temporal dependence
given by the epoch of observation."""
def __init__(self, obsTimespy, ITRF2stnrot, do_parallactic_rot=True):
super(PJones, self).__init__()
obsTimes_lst = []
for obsTimepy in obsTimespy:
obsTimes_lst.append(quantity(obsTimepy.isoformat()).get_value())
obsTimes_me = quantity(obsTimes_lst, 'd')
self.obsTimes = obsTimes_me.get_value()
self.obsTimeUnit = obsTimes_me.get_unit()
self.ITRF2stnrot = ITRF2stnrot
self.do_parallactic_rot = do_parallactic_rot
def computeJonesRes(self):
if type(self.obsTimes) is float:
self.computeJonesRes_overfield()
else:
self.computeJonesRes_overtime()
def computeJonesRes_overtime(self):
"""Compute and apply the P-Jones matrix. The structure is:
::
jones[time, sphcomp, skycomp] =
Pjones[time, sphcomp, comp]*jonesr[comp, skycomp]
The P-Jones matrix is computed as follows: consider a direction
vector d. Let jonesrbasis be the column concatenation of the 3
spherical basis vectors corresponding to d in the J2000 reference
frame, so
::
jonesrbasis = [[r_J2000],[phi_J2000],[theta_J2000]].T
where `r_J2000` is along the direction d and theta, phi are the
remaining two spherical basis vectors. Let `jonesbasis` be the basis
vectors corresponding to the same direction d but in the STN reference
frame, so
::
jonesbasis = [[r_STN],[phi_STN],[theta_STN]].T
where `r_STN` is along the direction d and theta, phi are the remaining
two spherical basis vectors in the spherical system associated with the
STN.
The algorithm takes `r_J2000` from component 0 of the `jonesrbasis` and
converts it to STN (i.e. finds `r_STN`) using casacore measures module,
along with the other 2 J2000 basis vectors. These converted vectors are
called `jonesrbasis_to`. With `r_STN`, it also computes the
corresponding `jonesbasis`. A vector in the cartesian J2000 ref sys
converted to STN must be equal to the same vector expressed in the
cartesian STN ref sys via a conversion from spherical, so
::
jonesbasis * V_STN^sph = jonesrbasis_to * V_J2000^sph
which implies that we can convert directly from spherical J2000 to the
spherical STN like this
::
V_STN^sph = (jonesbasis.H * jonesrbasis_to) * V_J2000^sph
where the matrix in parentheses is the P-Jones matrix.
The P-Jones matrix is then applied to the operand Jones matrix.
"""
nrOfTimes = len(self.obsTimes)
pjones = np.zeros((nrOfTimes, 2, 2))
me = measures()
me.doframe(measures().position(self._ecef_frame, '0m', '0m', '0m'))
self.jonesbasis = np.zeros((nrOfTimes, 3, 3))
if self.refframe_r == self._eci_frame:
convert2irf = self._ecef_frame
jonesrbasis_from = self.jonesrbasis_from
jr_refframe = self.refframe_r
else:
convert2irf = self._eci_frame
jonesrbasis_from = np.matmul(self.ITRF2stnrot.T,
self.jonesrbasis_from)
jr_refframe = self._ecef_frame
for ti in range(0, nrOfTimes):
# Set current time in reference frame
timEpoch = me.epoch('UTC', quantity(self.obsTimes[ti],
self.obsTimeUnit))
me.doframe(timEpoch)
jonesrbasis_to = np.asmatrix(convertBasis(me, jonesrbasis_from,
jr_refframe,
convert2irf))
if convert2irf == self._ecef_frame:
jonesrbasis_to = np.matmul(self.ITRF2stnrot, jonesrbasis_to)
jonesbasisMat = getSph2CartTransf(jonesrbasis_to[:, 0])
if self.do_parallactic_rot:
pjones[ti, :, :] = jonesbasisMat[:, 1:].H \
* jonesrbasis_to[:, 1:]
else:
pjones[ti, :, :] = np.asmatrix(np.identity(2))
self.jonesbasis[ti, :, :] = jonesbasisMat
if convert2irf == self._ecef_frame:
self.refframe = 'STN' # Final Ref frame is station
else:
self.refframe = self._eci_frame
self.jones = np.matmul(pjones, self.jonesr)
self.thisjones = pjones
def computeJonesRes_overfield(self):
"""Compute the PJones over field of directions for one frequency.
"""
pjones = np.zeros(self.jonesrbasis_from.shape[0:-2]+(2, 2))
me = measures()
me.doframe(measures().position(self._ecef_frame, '0m', '0m', '0m'))
self.jonesbasis = np.zeros(self.jonesrbasis_from.shape)
if self.refframe_r == self._eci_frame:
convert2irf = self._ecef_frame
jonesrbasis_from = self.jonesrbasis_from
jr_refframe = self.refframe_r
else:
convert2irf = self._eci_frame
jonesrbasis_from = np.matmul(self.ITRF2stnrot.T,
self.jonesrbasis_from)
jr_refframe = self._ecef_frame
timEpoch = me.epoch('UTC', quantity(self.obsTimes, self.obsTimeUnit))
me.doframe(timEpoch)
for idxi in range(self.jonesrbasis_from.shape[0]):
for idxj in range(self.jonesrbasis_from.shape[1]):
jonesrbasis_to = np.asmatrix(convertBasis(
me,
jonesrbasis_from[idxi, idxj, :, :],
jr_refframe, convert2irf))
if convert2irf == self._ecef_frame:
jonesrbasis_to = np.matmul(self.ITRF2stnrot,
jonesrbasis_to)
jonesbasisMat = getSph2CartTransf(jonesrbasis_to[..., 0])
pjones[idxi, idxj, :, :] = jonesbasisMat[:, 1:].H \
* jonesrbasis_to[:, 1:]
self.jonesbasis[idxi, idxj, :, :] = jonesbasisMat
if convert2irf == self._ecef_frame:
self.refframe = 'STN' # Final Ref frame is station
else:
self.refframe = self._eci_frame
self.jones = np.matmul(pjones, self.jonesr)
self.thisjones = pjones
class DualPolFieldPointSrc(Jones):
"""This is a mock Jones point source. It does not model a real source. It's
purpose is for testing. It can be seen as a source that first transmits in
one polarization and then in another, then 2 transmissions given in the 2
columns.
It may have a spectral dimension. The src_dir should be a tuple with
(az, el, ref)."""
def __init__(self, src_dir, dualPolField=np.identity(2), iaucmp=True):
(src_az, src_el, src_ref) = src_dir
dualPolField3d = np.asmatrix(np.identity(3))
dualPolField3d[1:, 1:] = np.asmatrix(dualPolField)
if iaucmp:
jones = np.matmul(IAUtoC09, dualPolField3d)[1:, 1:]
self.iaucmp = True
else:
jones = dualPolField3d[1:, 1:]
self.iaucmp = False
self.jones = np.asarray(jones)
self.jonesbasis = np.asarray(IAU_pol_basis(src_az, src_el))
self.refframe = src_ref
class DualPolFieldRegion(Jones):
"""This is a Jones unit flux density field."""
def __init__(self, refframe='J2000', dualPolField=np.identity(2),
iaucmp=True, lmgrid=None):
if not lmgrid:
azimsh, elemsh = sphmeshgrid()
lmn = sph2crt(azimsh, elemsh)
else:
nn = dc_hrz2vrt(*lmgrid)
lmn = np.array(lmgrid+(nn,))
azimsh, elemsh = crt2sph(lmn)
self.azmsh = azimsh
self.elmsh = elemsh
dualPolField3d = np.asmatrix(np.identity(3))
dualPolField3d[1:, 1:] = np.asmatrix(dualPolField)
if iaucmp:
jones = np.matmul(IAUtoC09, dualPolField3d)[1:, 1:]
self.iaucmp = True
else:
jones = dualPolField3d[1:, 1:]
self.iaucmp = False
self.jones = np.broadcast_to(jones,
elemsh.shape+dualPolField.shape)
self.jonesbasis = shiftmat2back(getSph2CartTransfArr(lmn))
self.refframe = refframe
class EJones(Jones):
"""This is the antenna or feed Jones. It is given by a set of complex gain
patterns for each frequency and polarization channel."""
def __init__(self, dualPolElem, position, stnRot, freqSel=None):
self.position = position
self.stnRot = stnRot
self.dualPolElem = dualPolElem
if freqSel is None:
self.freqChan = self.dualPolElem.getfreqs()
else:
self.freqChan = freqSel
self.refframe = 'STN'
def computeJonesRes(self):
"""Compute the Jones that results from applying the E-Jones to the
right.
The structure of the `jonesrbasis` is ``[timeIdx, sphIdx, skycompIdx]``.
"""
idxshape = self.jonesrbasis_from.shape[0:-2]
jonesrbasis = np.reshape(self.jonesrbasis_from, (-1, 3, 3))
(az_from, el_from) = crt2sph(jonesrbasis[..., 0].squeeze().T)
theta_phi_view = (np.pi/2-el_from.flatten(), az_from.flatten())
ejones = self.dualPolElem.getJonesAlong(self.freqChan, theta_phi_view)
# AntPat has column order theta_hat, phi_hat
# while C09 has phi_hat, vartheta_hat (where vartheta_hat=-theta_hat).
# So flip order and change sign of theta_hat.
ejones = ejones[..., ::-1]
ejones[..., 1] = -ejones[..., 1]
self.jonesbasis = self.jonesrbasis_from # Basis does not change
# This is the actual MEq multiplication:
if ejones.ndim > 3:
frqdimsz = (ejones.shape[0],)
else:
frqdimsz = ()
self.jones = np.reshape(
np.matmul(ejones, np.reshape(self.jonesr, (-1, 2, 2))),
frqdimsz+idxshape+(2, 2)
)
self.thisjones = np.reshape(ejones, frqdimsz+idxshape+(2, 2))
def getPosRot(self, position):
"""Compute the nominal transformation from the geodetic position to
ITRF. (Not implemented yet)"""
return np.identity(2)
class DualPolFieldSink(Jones):
def computeJonesRes(self):
self.jones = self.jonesr
self.refframe = self.refframe_r
def inverse(jonesobj):
"""Return a Jones object that is the inverse of `jonesobj`."""
inv_jones = copy.copy(jonesobj)
jmat = jonesobj.getValue()
inv_jones.jones = np.linalg.inv(jmat)
# Swap basis between left and right:
inv_jones.jonesbasis = jonesobj.jonesrbasis_from
inv_jones.jonesrbasis_from = jonesobj.jonesbasis
jframe = jonesobj.get_refframe()
jrframe = jonesobj.refframe_r
inv_jones.refframe = jrframe
inv_jones.refframe_r = jframe
return inv_jones
def fix_imaginary_directions(jonesobj, fill=np.identity(2)):
"""Replace jones matrices with imaginary directions in a Jones object.
When specifying 2D Cartesian direction cosines, it is possible that the
corresponding direction is not physical, e.g. when l,m = 1,1. In such cases,
the Jones radius basis will have an imaginary vertical component. This function
will find such 'directions' and replace the corresponding Jones matrix will the
fill matrix specified by the `fill` argument.
"""
idxs = np.where(np.imag(jonesobj.jonesbasis[..., 0, 2]))
jonesobj.jones[idxs[0], idxs[1], ...] = fill
def plotJonesField(jonesfld, jbasis, refframe, rep='abs-Jones',
mask_belowhorizon=True):
"""Plot a Jones field."""
def belowhorizon(z):
"""Return masked z values that are below the horizon.
Below the horizon means either than z is negative or
the z has a nonzero imaginary part.
"""
imagz_ma = ma.getmaskarray(ma.masked_not_equal(z.imag, 0.))
negz_ma = ma.getmaskarray(ma.masked_less(z, .0))
belowhrz = ma.mask_or(imagz_ma, negz_ma)
return belowhrz
if rep == 'abs-Jones':
restitle = 'Beam Jones on sky'
res00 = np.abs(jonesfld[:, :, 0, 0])
res00 = ma.masked_invalid(res00)
res00lbl = r'|J_{p\phi}|'
res01 = np.abs(jonesfld[:, :, 0, 1])
res01 = ma.masked_invalid(res01)
res01lbl = r'|J_{p\theta}|'
res10 = np.abs(jonesfld[:, :, 1, 0])
res10 = ma.masked_invalid(res10)
res10lbl = r'|J_{q\phi}|'
res11 = np.abs(jonesfld[:, :, 1, 1])
res11 = ma.masked_invalid(res11)
res11lbl = r'|J_{q\theta}|'
elif rep == 'Stokes':
corrmat = np.matmul(jonesfld, np.swapaxes(jonesfld.conj(), -2, -1))
S0 = np.real(corrmat[..., 0, 0]+corrmat[..., 1, 1])
SQ = np.real(corrmat[..., 0, 0]-corrmat[..., 1, 1])
SU = np.real(corrmat[..., 0, 1]+corrmat[..., 1, 0])
SV = np.imag(corrmat[..., 0, 1]-corrmat[..., 1, 0])
restitle = 'Antenna Stokes on sky'
res00 = S0
res00lbl = 'I'
res01 = SQ/S0
res01lbl = 'q'
res10 = SU/S0
res10lbl = 'u'
res11 = SV/S0
res11lbl = 'v'
else:
raise Exception("Unknown Jones representation {}.".format(rep))
if refframe == 'STN':
# Directions in Cartesian station crds
x = jbasis[..., 0, 0]
y = jbasis[..., 1, 0]
z = jbasis[..., 2, 0]
if mask_belowhorizon:
belowhrz = belowhorizon(z)
res00 = ma.MaskedArray(res00, mask=belowhrz)
res01 = ma.MaskedArray(res01, mask=belowhrz)
res10 = ma.MaskedArray(res10, mask=belowhrz)
res11 = ma.MaskedArray(res11, mask=belowhrz)
xlabel = 'STN X'
ylabel = 'STN Y'
elif refframe == 'J2000':
r = np.moveaxis(np.squeeze(jbasis[..., :, 0]), -1, 0)
az, el = crt2sph(r, branchcut_neg_x=False)
x = np.rad2deg(az)
y = np.rad2deg(el)
xlabel = 'RA'
ylabel = 'DEC'
fig = plt.figure()
fig.suptitle(restitle)
ax = plt.subplot(221, polar=False)
plt.pcolormesh(x, y, res00) # , vmin=0., vmax=2.0)
plt.colorbar()
ax.set_title(res00lbl)
plt.ylabel(ylabel)
ax = plt.subplot(222, polar=False)
plt.pcolormesh(x, y, res01)
plt.colorbar()
ax.set_title(res01lbl)
ax = plt.subplot(223, polar=False)
plt.pcolormesh(x, y, res10)
plt.colorbar()
ax.set_title(res10lbl)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
ax = plt.subplot(224, polar=False)
plt.pcolormesh(x, y, res11, vmin=np.nanmin(res11), vmax=np.nanmax(res11))
plt.colorbar()
ax.set_title(res11lbl)
plt.xlabel(xlabel)
plt.show()
| isc |
Myasuka/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
cython-testbed/pandas | pandas/core/series.py | 1 | 142695 | """
Data structure for 1-dimensional cross-sectional and time series data
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
import warnings
from textwrap import dedent
import numpy as np
import numpy.ma as ma
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_string_like,
is_bool,
is_integer, is_integer_dtype,
is_float_dtype,
is_extension_type,
is_extension_array_dtype,
is_datetimelike,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_object_dtype,
is_list_like,
is_hashable,
is_iterator,
is_dict_like,
is_scalar,
_is_unorderable_exception,
ensure_platform_int,
pandas_dtype)
from pandas.core.dtypes.generic import (
ABCSparseArray, ABCDataFrame, ABCIndexClass,
ABCSeries, ABCSparseSeries)
from pandas.core.dtypes.cast import (
maybe_upcast, infer_dtype_from_scalar,
maybe_convert_platform,
maybe_cast_to_datetime, maybe_castable,
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
construct_1d_object_array_from_listlike,
maybe_cast_to_integer_array)
from pandas.core.dtypes.missing import (
isna,
notna,
remove_na_arraylike,
na_value_for_dtype)
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
Float64Index, ensure_index)
from pandas.core.indexing import check_bool_indexer, maybe_convert_indices
from pandas.core import generic, base
from pandas.core.internals import SingleBlockManager
from pandas.core.arrays.categorical import Categorical, CategoricalAccessor
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexes.period import PeriodIndex
from pandas import compat
from pandas.io.formats.terminal import get_terminal_size
from pandas.compat import (
zip, u, OrderedDict, StringIO, range, get_range_parameters, PY36)
from pandas.compat.numpy import function as nv
import pandas.core.ops as ops
import pandas.core.algorithms as algorithms
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.core.indexes.base as ibase
import pandas.io.formats.format as fmt
from pandas.util._decorators import Appender, deprecate, Substitution
from pandas.util._validators import validate_bool_kwarg
from pandas._libs import index as libindex, tslibs, lib, iNaT
from pandas.core.config import get_option
from pandas.core.strings import StringMethods
from pandas.core.tools.datetimes import to_datetime
import pandas.plotting._core as gfx
__all__ = ['Series']
_shared_doc_kwargs = dict(
axes='index', klass='Series', axes_single_arg="{0 or 'index'}",
axis="""axis : {0 or 'index'}
Parameter needed for compatibility with DataFrame.""",
inplace="""inplace : boolean, default False
If True, performs operation inplace and returns None.""",
unique='np.ndarray', duplicated='Series',
optional_by='', optional_mapper='', optional_labels='', optional_axis='',
versionadded_to_excel='\n .. versionadded:: 0.20.0\n')
# see gh-16971
def remove_na(arr):
"""Remove null values from array like structure.
.. deprecated:: 0.21.0
Use s[s.notnull()] instead.
"""
warnings.warn("remove_na is deprecated and is a private "
"function. Do not use.", FutureWarning, stacklevel=2)
return remove_na_arraylike(arr)
def _coerce_method(converter):
""" install the scalar coercion methods """
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
raise TypeError("cannot convert the series to "
"{0}".format(str(converter)))
return wrapper
# ----------------------------------------------------------------------
# Series class
class Series(base.IndexOpsMixin, generic.NDFrame):
"""
One-dimensional ndarray with axis labels (including time series).
Labels need not be unique but must be a hashable type. The object
supports both integer- and label-based indexing and provides a host of
methods for performing operations involving the index. Statistical
methods from ndarray have been overridden to automatically exclude
missing data (currently represented as NaN).
Operations between Series (+, -, /, *, **) align values based on their
associated index values-- they need not be the same length. The result
index will be the sorted union of the two indexes.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
"""
_metadata = ['name']
_accessors = {'dt', 'cat', 'str'}
_deprecations = generic.NDFrame._deprecations | frozenset(
['asobject', 'sortlevel', 'reshape', 'get_value', 'set_value',
'from_csv', 'valid'])
# Override cache_readonly bc Series is mutable
hasnans = property(base.IndexOpsMixin.hasnans.func,
doc=base.IndexOpsMixin.hasnans.__doc__)
def __init__(self, data=None, index=None, dtype=None, name=None,
copy=False, fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
if index is not None:
index = ensure_index(index)
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError("initializing a Series from a "
"MultiIndex is not supported")
elif isinstance(data, Index):
if name is None:
name = data.name
if dtype is not None:
# astype copies
data = data.astype(dtype)
else:
# need to copy to avoid aliasing issues
data = data._values.copy()
copy = False
elif isinstance(data, np.ndarray):
pass
elif isinstance(data, (ABCSeries, ABCSparseSeries)):
if name is None:
name = data.name
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
data = data._data
elif isinstance(data, dict):
data, index = self._init_dict(data, index, dtype)
dtype = None
copy = False
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
elif not data.index.equals(index) or copy:
# GH#19275 SingleBlockManager input should only be called
# internally
raise AssertionError('Cannot pass both SingleBlockManager '
'`data` argument and a different '
'`index` argument. `copy` must '
'be False.')
elif is_extension_array_dtype(data):
pass
elif isinstance(data, (set, frozenset)):
raise TypeError("{0!r} type is unordered"
"".format(data.__class__.__name__))
# If data is Iterable but not list-like, consume into list.
elif (isinstance(data, compat.Iterable)
and not isinstance(data, compat.Sized)):
data = list(data)
else:
# handle sparse passed here (and force conversion)
if isinstance(data, ABCSparseArray):
data = data.to_dense()
if index is None:
if not is_list_like(data):
data = [data]
index = ibase.default_index(len(data))
elif is_list_like(data):
# a scalar numpy array is list-like but doesn't
# have a proper length
try:
if len(index) != len(data):
raise ValueError(
'Length of passed values is {val}, '
'index implies {ind}'
.format(val=len(data), ind=len(index)))
except TypeError:
pass
# create/copy the manager
if isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype=dtype, errors='ignore',
copy=copy)
elif copy:
data = data.copy()
else:
data = _sanitize_array(data, index, dtype, copy,
raise_cast_failure=True)
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data, fastpath=True)
self.name = name
self._set_axis(0, index, fastpath=True)
def _init_dict(self, data, index=None, dtype=None):
"""
Derive the "_data" and "index" attributes of a new Series from a
dictionary input.
Parameters
----------
data : dict or dict-like
Data used to populate the new Series
index : Index or index-like, default None
index for the new Series: if None, use dict keys
dtype : dtype, default None
dtype for the new Series: if None, infer from data
Returns
-------
_data : BlockManager for the new Series
index : index for the new Series
"""
# Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]
# raises KeyError), so we iterate the entire dict, and align
if data:
keys, values = zip(*compat.iteritems(data))
values = list(values)
elif index is not None:
# fastpath for Series(data=None). Just use broadcasting a scalar
# instead of reindexing.
values = na_value_for_dtype(dtype)
keys = index
else:
keys, values = [], []
# Input is now list-like, so rely on "standard" construction:
s = Series(values, index=keys, dtype=dtype)
# Now we just make sure the order is respected, if any
if data and index is not None:
s = s.reindex(index, copy=False)
elif not PY36 and not isinstance(data, OrderedDict) and data:
# Need the `and data` to avoid sorting Series(None, index=[...])
# since that isn't really dict-like
try:
s = s.sort_index()
except TypeError:
pass
return s._data, s.index
@classmethod
def from_array(cls, arr, index=None, name=None, dtype=None, copy=False,
fastpath=False):
"""Construct Series from array.
.. deprecated :: 0.23.0
Use pd.Series(..) constructor instead.
"""
warnings.warn("'from_array' is deprecated and will be removed in a "
"future version. Please use the pd.Series(..) "
"constructor instead.", FutureWarning, stacklevel=2)
if isinstance(arr, ABCSparseArray):
from pandas.core.sparse.series import SparseSeries
cls = SparseSeries
return cls(arr, index=index, name=name, dtype=dtype,
copy=copy, fastpath=fastpath)
@property
def _constructor(self):
return Series
@property
def _constructor_expanddim(self):
from pandas.core.frame import DataFrame
return DataFrame
# types
@property
def _can_hold_na(self):
return self._data._can_hold_na
_index = None
def _set_axis(self, axis, labels, fastpath=False):
""" override generic, we want to set the _typ here """
if not fastpath:
labels = ensure_index(labels)
is_all_dates = labels.is_all_dates
if is_all_dates:
if not isinstance(labels,
(DatetimeIndex, PeriodIndex, TimedeltaIndex)):
try:
labels = DatetimeIndex(labels)
# need to set here because we changed the index
if fastpath:
self._data.set_axis(axis, labels)
except (tslibs.OutOfBoundsDatetime, ValueError):
# labels may exceeds datetime bounds,
# or not be a DatetimeIndex
pass
self._set_subtyp(is_all_dates)
object.__setattr__(self, '_index', labels)
if not fastpath:
self._data.set_axis(axis, labels)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'time_series')
else:
object.__setattr__(self, '_subtyp', 'series')
def _update_inplace(self, result, **kwargs):
# we want to call the generic version and not the IndexOpsMixin
return generic.NDFrame._update_inplace(self, result, **kwargs)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if value is not None and not is_hashable(value):
raise TypeError('Series.name must be a hashable type')
object.__setattr__(self, '_name', value)
# ndarray compatibility
@property
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def dtypes(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def ftype(self):
""" return if the data is sparse|dense """
return self._data.ftype
@property
def ftypes(self):
""" return if the data is sparse|dense """
return self._data.ftype
@property
def values(self):
"""
Return Series as ndarray or ndarray-like
depending on the dtype
Returns
-------
arr : numpy.ndarray or ndarray-like
Examples
--------
>>> pd.Series([1, 2, 3]).values
array([1, 2, 3])
>>> pd.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
>>> pd.Series(list('aabc')).astype('category').values
[a, a, b, c]
Categories (3, object): [a, b, c]
Timezone aware datetime data is converted to UTC:
>>> pd.Series(pd.date_range('20130101', periods=3,
... tz='US/Eastern')).values
array(['2013-01-01T05:00:00.000000000',
'2013-01-02T05:00:00.000000000',
'2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]')
"""
return self._data.external_values()
@property
def _values(self):
""" return the internal repr of this data """
return self._data.internal_values()
def _formatting_values(self):
"""Return the values that can be formatted (used by SeriesFormatter
and DataFrameFormatter)
"""
return self._data.formatting_values()
def get_values(self):
""" same as values (but handles sparseness conversions); is a view """
return self._data.get_values()
@property
def asobject(self):
"""Return object Series which contains boxed values.
.. deprecated :: 0.23.0
Use ``astype(object)`` instead.
*this is an internal non-public method*
"""
warnings.warn("'asobject' is deprecated. Use 'astype(object)'"
" instead", FutureWarning, stacklevel=2)
return self.astype(object).values
# ops
def ravel(self, order='C'):
"""
Return the flattened underlying data as an ndarray
See also
--------
numpy.ndarray.ravel
"""
return self._values.ravel(order=order)
def compress(self, condition, *args, **kwargs):
"""
Return selected slices of an array along given axis as a Series
.. deprecated:: 0.24.0
See also
--------
numpy.ndarray.compress
"""
msg = ("Series.compress(condition) is deprecated. "
"Use 'Series[condition]' or "
"'np.asarray(series).compress(condition)' instead.")
warnings.warn(msg, FutureWarning, stacklevel=2)
nv.validate_compress(args, kwargs)
return self[condition]
def nonzero(self):
"""
Return the *integer* indices of the elements that are non-zero
This method is equivalent to calling `numpy.nonzero` on the
series data. For compatibility with NumPy, the return value is
the same (a tuple with an array of indices for each dimension),
but it will always be a one-item tuple because series only have
one dimension.
Examples
--------
>>> s = pd.Series([0, 3, 0, 4])
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
1 3
3 4
dtype: int64
>>> s = pd.Series([0, 3, 0, 4], index=['a', 'b', 'c', 'd'])
# same return although index of s is different
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
b 3
d 4
dtype: int64
See Also
--------
numpy.nonzero
"""
return self._values.nonzero()
def put(self, *args, **kwargs):
"""
Applies the `put` method to its `values` attribute
if it has one.
See also
--------
numpy.ndarray.put
"""
self._values.put(*args, **kwargs)
def __len__(self):
"""
return the length of the Series
"""
return len(self._data)
def view(self, dtype=None):
"""
Create a new view of the Series.
This function will return a new Series with a view of the same
underlying values in memory, optionally reinterpreted with a new data
type. The new data type must preserve the same size in bytes as to not
cause index misalignment.
Parameters
----------
dtype : data type
Data type object or one of their string representations.
Returns
-------
Series
A new Series object as a view of the same data in memory.
See Also
--------
numpy.ndarray.view : Equivalent numpy function to create a new view of
the same data in memory.
Notes
-----
Series are instantiated with ``dtype=float64`` by default. While
``numpy.ndarray.view()`` will return a view with the same data type as
the original array, ``Series.view()`` (without specified dtype)
will try using ``float64`` and may fail if the original data type size
in bytes is not the same.
Examples
--------
>>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8')
>>> s
0 -2
1 -1
2 0
3 1
4 2
dtype: int8
The 8 bit signed integer representation of `-1` is `0b11111111`, but
the same bytes represent 255 if read as an 8 bit unsigned integer:
>>> us = s.view('uint8')
>>> us
0 254
1 255
2 0
3 1
4 2
dtype: uint8
The views share the same underlying values:
>>> us[0] = 128
>>> s
0 -128
1 -1
2 0
3 1
4 2
dtype: int8
"""
return self._constructor(self._values.view(dtype),
index=self.index).__finalize__(self)
def __array__(self, result=None):
"""
the array interface, return my values
"""
return self.get_values()
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
return self._constructor(result, index=self.index,
copy=False).__finalize__(self)
def __array_prepare__(self, result, context=None):
"""
Gets called prior to a ufunc
"""
# nice error message for non-ufunc types
if (context is not None and
not isinstance(self._values, (np.ndarray, ABCSparseArray))):
obj = context[1][0]
raise TypeError("{obj} with dtype {dtype} cannot perform "
"the numpy op {op}".format(
obj=type(obj).__name__,
dtype=getattr(obj, 'dtype', None),
op=context[0].__name__))
return result
# complex
@property
def real(self):
return self.values.real
@real.setter
def real(self, v):
self.values.real = v
@property
def imag(self):
return self.values.imag
@imag.setter
def imag(self, v):
self.values.imag = v
# coercion
__float__ = _coerce_method(float)
__long__ = _coerce_method(int)
__int__ = _coerce_method(int)
def _unpickle_series_compat(self, state):
if isinstance(state, dict):
self._data = state['_data']
self.name = state['name']
self.index = self._data.index
elif isinstance(state, tuple):
# < 0.12 series pickle
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
# backwards compat
index, name = own_state[0], None
if len(own_state) > 1:
name = own_state[1]
# recreate
self._data = SingleBlockManager(data, index, fastpath=True)
self._index = index
self.name = name
else:
raise Exception("cannot unpickle legacy formats -> [%s]" % state)
# indexers
@property
def axes(self):
"""Return a list of the row axis labels"""
return [self.index]
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the Series by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
try:
# dispatch to the values if we need
values = self._values
if isinstance(values, np.ndarray):
return libindex.get_value_at(values, i)
else:
return values[i]
except IndexError:
raise
except Exception:
if isinstance(i, slice):
indexer = self.index._convert_slice_indexer(i, kind='iloc')
return self._get_values(indexer)
else:
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis, convert=True)
else:
return libindex.get_value_at(self, i)
@property
def _is_mixed_type(self):
return False
def _slice(self, slobj, axis=0, kind=None):
slobj = self.index._convert_slice_indexer(slobj,
kind=kind or 'getitem')
return self._get_values(slobj)
def __getitem__(self, key):
key = com.apply_if_callable(key, self)
try:
result = self.index.get_value(self, key)
if not is_scalar(result):
if is_list_like(result) and not isinstance(result, Series):
# we need to box if loc of the key isn't scalar here
# otherwise have inline ndarray/lists
try:
if not is_scalar(self.index.get_loc(key)):
result = self._constructor(
result, index=[key] * len(result),
dtype=self.dtype).__finalize__(self)
except KeyError:
pass
return result
except InvalidIndexError:
pass
except (KeyError, ValueError):
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# kludge
pass
elif key is Ellipsis:
return self
elif com.is_bool_indexer(key):
pass
else:
# we can try to coerce the indexer (or this will raise)
new_key = self.index._convert_scalar_indexer(key,
kind='getitem')
if type(new_key) != type(key):
return self.__getitem__(new_key)
raise
except Exception:
raise
if is_iterator(key):
key = list(key)
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
return self._get_with(key)
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind='getitem')
return self._get_values(indexer)
elif isinstance(key, ABCDataFrame):
raise TypeError('Indexing a Series with DataFrame is not '
'supported, use the appropriate DataFrame column')
elif isinstance(key, tuple):
try:
return self._get_values_tuple(key)
except Exception:
if len(key) == 1:
key = key[0]
if isinstance(key, slice):
return self._get_values(key)
raise
# pragma: no cover
if not isinstance(key, (list, np.ndarray, Series, Index)):
key = list(key)
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == 'integer':
if self.index.is_integer() or self.index.is_floating():
return self.loc[key]
else:
return self._get_values(key)
elif key_type == 'boolean':
return self._get_values(key)
try:
# handle the dup indexing case (GH 4246)
if isinstance(key, (list, tuple)):
return self.loc[key]
return self.reindex(key)
except Exception:
# [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
# hack
if isinstance(key[0], slice):
return self._get_values(key)
raise
def _get_values_tuple(self, key):
# mpl hackaround
if com._any_none(*key):
return self._get_values(key)
if not isinstance(self.index, MultiIndex):
raise ValueError('Can only tuple-index with a MultiIndex')
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
return self._constructor(self._values[indexer],
index=new_index).__finalize__(self)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self._values[indexer]
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
def setitem(key, value):
try:
self._set_with_engine(key, value)
return
except com.SettingWithCopyError:
raise
except (KeyError, ValueError):
values = self._values
if (is_integer(key) and
not self.index.inferred_type == 'integer'):
values[key] = value
return
elif key is Ellipsis:
self[:] = value
return
elif com.is_bool_indexer(key):
pass
elif is_timedelta64_dtype(self.dtype):
# reassign a null value to iNaT
if isna(value):
value = iNaT
try:
self.index._engine.set_value(self._values, key,
value)
return
except TypeError:
pass
self.loc[key] = value
return
except TypeError as e:
if (isinstance(key, tuple) and
not isinstance(self.index, MultiIndex)):
raise ValueError("Can only tuple-index with a MultiIndex")
# python 3 type errors should be raised
if _is_unorderable_exception(e):
raise IndexError(key)
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
try:
self._where(~key, value, inplace=True)
return
except InvalidIndexError:
pass
self._set_with(key, value)
# do the setitem
cacher_needs_updating = self._check_is_chained_assignment_possible()
setitem(key, value)
if cacher_needs_updating:
self._maybe_update_cacher()
def _set_with_engine(self, key, value):
values = self._values
try:
self.index._engine.set_value(values, key, value)
return
except KeyError:
values[self.index.get_loc(key)] = value
return
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind='getitem')
return self._set_values(indexer, value)
else:
if isinstance(key, tuple):
try:
self._set_values(key, value)
except Exception:
pass
if not isinstance(key, (list, Series, np.ndarray, Series)):
try:
key = list(key)
except Exception:
key = [key]
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == 'integer':
if self.index.inferred_type == 'integer':
self._set_labels(key, value)
else:
return self._set_values(key, value)
elif key_type == 'boolean':
self._set_values(key.astype(np.bool_), value)
else:
self._set_labels(key, value)
def _set_labels(self, key, value):
if isinstance(key, Index):
key = key.values
else:
key = com.asarray_tuplesafe(key)
indexer = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
raise ValueError('%s not contained in the index' % str(key[mask]))
self._set_values(indexer, value)
def _set_values(self, key, value):
if isinstance(key, Series):
key = key._values
self._data = self._data.setitem(indexer=key, value=value)
self._maybe_update_cacher()
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Series. Refer to `numpy.ndarray.repeat`
for more information about the `repeats` argument.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
new_index = self.index.repeat(repeats)
new_values = self._values.repeat(repeats)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def get_value(self, label, takeable=False):
"""Quickly retrieve single value at passed index label
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
label : object
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(label, takeable=takeable)
def _get_value(self, label, takeable=False):
if takeable is True:
return com.maybe_box_datetimelike(self._values[label])
return self.index.get_value(self._values, label)
_get_value.__doc__ = get_value.__doc__
def set_value(self, label, value, takeable=False):
"""Quickly set single value at passed label. If label is not contained,
a new object is created with the label placed at the end of the result
index.
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Returns
-------
series : Series
If label is contained, will be reference to calling Series,
otherwise a new object
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(label, value, takeable=takeable)
def _set_value(self, label, value, takeable=False):
try:
if takeable:
self._values[label] = value
else:
self.index._engine.set_value(self._values, label, value)
except KeyError:
# set using a non-recursive method
self.loc[label] = value
return self
_set_value.__doc__ = set_value.__doc__
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column, or
when the index is meaningless and needs to be reset to the default
before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels
from the index. Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in
the new DataFrame.
name : object, optional
The name to use for the column containing the original Series
values. Uses ``self.name`` by default. This argument is ignored
when `drop` is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
Returns
-------
Series or DataFrame
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
See Also
--------
DataFrame.reset_index: Analogous function for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], name='foo',
... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx foo
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
To update the Series in place, without generating a new one
set `inplace` to True. Note that it also requires ``drop=True``.
>>> s.reset_index(inplace=True, drop=True)
>>> s
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
The `level` parameter is interesting for Series with a multi-level
index.
>>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']),
... np.array(['one', 'two', 'one', 'two'])]
>>> s2 = pd.Series(
... range(4), name='foo',
... index=pd.MultiIndex.from_arrays(arrays,
... names=['a', 'b']))
To remove a specific level from the Index, use `level`.
>>> s2.reset_index(level='a')
a foo
b
one bar 0
two bar 1
one baz 2
two baz 3
If `level` is not set, all levels are removed from the Index.
>>> s2.reset_index()
a b foo
0 bar one 0
1 bar two 1
2 baz one 2
3 baz two 3
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if drop:
new_index = ibase.default_index(len(self))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if inplace:
self.index = new_index
# set name if it was passed, otherwise, keep the previous name
self.name = name or self.name
else:
return self._constructor(self._values.copy(),
index=new_index).__finalize__(self)
elif inplace:
raise TypeError('Cannot reset_index inplace on a Series '
'to create a DataFrame')
else:
df = self.to_frame(name)
return df.reset_index(level=level, drop=drop)
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
width, height = get_terminal_size()
max_rows = (height if get_option("display.max_rows") == 0 else
get_option("display.max_rows"))
show_dimensions = get_option("display.show_dimensions")
self.to_string(buf=buf, name=self.name, dtype=self.dtype,
max_rows=max_rows, length=show_dimensions)
result = buf.getvalue()
return result
def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True,
index=True, length=False, dtype=False, name=False,
max_rows=None):
"""
Render a string representation of the Series
Parameters
----------
buf : StringIO-like, optional
buffer to write to
na_rep : string, optional
string representation of NAN to use, default 'NaN'
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
header: boolean, default True
Add the Series header (index name)
index : bool, optional
Add index (row) labels, default True
length : boolean, default False
Add the Series length
dtype : boolean, default False
Add the Series dtype
name : boolean, default False
Add the Series name if not None
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
Returns
-------
formatted : string (if not buffer passed)
"""
formatter = fmt.SeriesFormatter(self, name=name, length=length,
header=header, index=index,
dtype=dtype, na_rep=na_rep,
float_format=float_format,
max_rows=max_rows)
result = formatter.to_string()
# catch contract violations
if not isinstance(result, compat.text_type):
raise AssertionError("result must be of type unicode, type"
" of result is {0!r}"
"".format(result.__class__.__name__))
if buf is None:
return result
else:
try:
buf.write(result)
except AttributeError:
with open(buf, 'w') as f:
f.write(result)
def iteritems(self):
"""
Lazily iterate over (index, value) tuples
"""
return zip(iter(self.index), iter(self))
items = iteritems
# ----------------------------------------------------------------------
# Misc public methods
def keys(self):
"""Alias for index"""
return self.index
def to_dict(self, into=dict):
"""
Convert Series to {label -> value} dict or dict-like object.
Parameters
----------
into : class, default dict
The collections.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
value_dict : collections.Mapping
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_dict()
{0: 1, 1: 2, 2: 3, 3: 4}
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd)
defaultdict(<type 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})
"""
# GH16122
into_c = com.standardize_mapping(into)
return into_c(compat.iteritems(self))
def to_frame(self, name=None):
"""
Convert Series to DataFrame
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
data_frame : DataFrame
"""
if name is None:
df = self._constructor_expanddim(self)
else:
df = self._constructor_expanddim({name: self})
return df
def to_sparse(self, kind='block', fill_value=None):
"""
Convert Series to SparseSeries
Parameters
----------
kind : {'block', 'integer'}
fill_value : float, defaults to NaN (missing)
Returns
-------
sp : SparseSeries
"""
# TODO: deprecate
from pandas.core.sparse.series import SparseSeries
from pandas.core.sparse.array import SparseArray
values = SparseArray(self, kind=kind, fill_value=fill_value)
return SparseSeries(
values, index=self.index, name=self.name
).__finalize__(self)
def _set_name(self, name, inplace=False):
"""
Set the Series name.
Parameters
----------
name : str
inplace : bool
whether to modify `self` directly or return a copy
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
ser = self if inplace else self.copy()
ser.name = name
return ser
# ----------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series
Returns
-------
nobs : int or Series (if level specified)
"""
if level is None:
return notna(com.values_from_object(self)).sum()
if isinstance(level, compat.string_types):
level = self.index._get_level_number(level)
lev = self.index.levels[level]
lab = np.array(self.index.labels[level], subok=False, copy=True)
mask = lab == -1
if mask.any():
lab[mask] = cnt = len(lev)
lev = lev.insert(cnt, lev._na_value)
obs = lab[notna(self.values)]
out = np.bincount(obs, minlength=len(lev) or None)
return self._constructor(out, index=lev,
dtype='int64').__finalize__(self)
def mode(self, dropna=True):
"""Return the mode(s) of the dataset.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : Series (sorted)
"""
# TODO: Add option for bins like value_counts()
return algorithms.mode(self, dropna=dropna)
def unique(self):
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
Returns
-------
ndarray or Categorical
The unique values returned as a NumPy array. In case of categorical
data type, returned as a Categorical.
See Also
--------
pandas.unique : top-level unique method for any 1-d array-like object.
Index.unique : return Index with unique values from an Index object.
Examples
--------
>>> pd.Series([2, 1, 3, 3], name='A').unique()
array([2, 1, 3])
>>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern')
... for _ in range(3)]).unique()
array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],
dtype=object)
An unordered Categorical will return categories in the order of
appearance.
>>> pd.Series(pd.Categorical(list('baabc'))).unique()
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'),
... ordered=True)).unique()
[b, a, c]
Categories (3, object): [a < b < c]
"""
result = super(Series, self).unique()
if is_datetime64tz_dtype(self.dtype):
# we are special casing datetime64tz_dtype
# to return an object array of tz-aware Timestamps
# TODO: it must return DatetimeArray with tz in pandas 2.0
result = result.astype(object).values
return result
def drop_duplicates(self, keep='first', inplace=False):
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : boolean, default ``False``
If ``True``, performs operation inplace and returns None.
Returns
-------
deduplicated : Series
See Also
--------
Index.drop_duplicates : equivalent method on Index
DataFrame.drop_duplicates : equivalent method on DataFrame
Series.duplicated : related method on Series, indicating duplicate
Series values.
Examples
--------
Generate an Series with duplicated entries.
>>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
... name='animal')
>>> s
0 lama
1 cow
2 lama
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behaviour of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates()
0 lama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep='last')
1 cow
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries. Setting the value of 'inplace' to ``True`` performs
the operation inplace and returns ``None``.
>>> s.drop_duplicates(keep=False, inplace=True)
>>> s
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
return super(Series, self).drop_duplicates(keep=keep, inplace=inplace)
def duplicated(self, keep='first'):
"""
Indicate duplicate Series values.
Duplicated values are indicated as ``True`` values in the resulting
Series. Either all duplicates, all except the first or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set on False and all others on True:
>>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> animals.duplicated()
0 False
1 False
2 True
3 False
4 True
dtype: bool
which is equivalent to
>>> animals.duplicated(keep='first')
0 False
1 False
2 True
3 False
4 True
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> animals.duplicated(keep='last')
0 True
1 False
2 True
3 False
4 False
dtype: bool
By setting keep on ``False``, all duplicates are True:
>>> animals.duplicated(keep=False)
0 True
1 False
2 True
3 False
4 True
dtype: bool
Returns
-------
pandas.core.series.Series
See Also
--------
pandas.Index.duplicated : Equivalent method on pandas.Index
pandas.DataFrame.duplicated : Equivalent method on pandas.DataFrame
pandas.Series.drop_duplicates : Remove duplicate values from Series
"""
return super(Series, self).duplicated(keep=keep)
def idxmin(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
axis : int, default 0
For compatibility with DataFrame.idxmin. Redundant for application
on Series.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with NumPy.
Returns
-------
idxmin : Index of minimum of values.
Raises
------
ValueError
If the Series is empty.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 1],
... index=['A' ,'B' ,'C' ,'D'])
>>> s
A 1.0
B NaN
C 4.0
D 1.0
dtype: float64
>>> s.idxmin()
'A'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
"""
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
i = nanops.nanargmin(com.values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
def idxmax(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the maximum value.
If multiple values equal the maximum, the first row label with that
value is returned.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
axis : int, default 0
For compatibility with DataFrame.idxmax. Redundant for application
on Series.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with NumPy.
Returns
-------
idxmax : Index of maximum of values.
Raises
------
ValueError
If the Series is empty.
Notes
-----
This method is the Series version of ``ndarray.argmax``. This method
returns the label of the maximum, while ``ndarray.argmax`` returns
the position. To get the position, use ``series.values.argmax()``.
See Also
--------
numpy.argmax : Return indices of the maximum values
along the given axis.
DataFrame.idxmax : Return index of first occurrence of maximum
over requested axis.
Series.idxmin : Return index *label* of the first occurrence
of minimum of values.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 3, 4],
... index=['A', 'B', 'C', 'D', 'E'])
>>> s
A 1.0
B NaN
C 4.0
D 3.0
E 4.0
dtype: float64
>>> s.idxmax()
'C'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmax(skipna=False)
nan
"""
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
i = nanops.nanargmax(com.values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
# ndarray compat
argmin = deprecate(
'argmin', idxmin, '0.21.0',
msg=dedent("""\
The current behaviour of 'Series.argmin' is deprecated, use 'idxmin'
instead.
The behavior of 'argmin' will be corrected to return the positional
minimum in the future. For now, use 'series.values.argmin' or
'np.argmin(np.array(values))' to get the position of the minimum
row.""")
)
argmax = deprecate(
'argmax', idxmax, '0.21.0',
msg=dedent("""\
The current behaviour of 'Series.argmax' is deprecated, use 'idxmax'
instead.
The behavior of 'argmax' will be corrected to return the positional
maximum in the future. For now, use 'series.values.argmax' or
'np.argmax(np.array(values))' to get the position of the maximum
row.""")
)
def round(self, decimals=0, *args, **kwargs):
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Series object
See Also
--------
numpy.around
DataFrame.round
"""
nv.validate_round(args, kwargs)
result = com.values_from_object(self).round(decimals)
result = self._constructor(result, index=self.index).__finalize__(self)
return result
def quantile(self, q=0.5, interpolation='linear'):
"""
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile : float or Series
if ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
See Also
--------
pandas.core.window.Rolling.quantile
numpy.percentile
"""
self._check_percentile(q)
result = self._data.quantile(qs=q, interpolation=interpolation)
if is_list_like(q):
return self._constructor(result,
index=Float64Index(q),
name=self.name)
else:
# scalar
return result
def corr(self, other, method='pearson', min_periods=None):
"""
Compute correlation with `other` Series, excluding missing values
Parameters
----------
other : Series
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarray
and returning a float
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations needed to have a valid result
Returns
-------
correlation : float
Examples
--------
>>> import numpy as np
>>> histogram_intersection = lambda a, b: np.minimum(a, b
... ).sum().round(decimals=1)
>>> s1 = pd.Series([.2, .0, .6, .2])
>>> s2 = pd.Series([.3, .6, .0, .1])
>>> s1.corr(s2, method=histogram_intersection)
0.3
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
if method in ['pearson', 'spearman', 'kendall'] or callable(method):
return nanops.nancorr(this.values, other.values, method=method,
min_periods=min_periods)
raise ValueError("method must be either 'pearson', "
"'spearman', or 'kendall', '{method}' "
"was supplied".format(method=method))
def cov(self, other, min_periods=None):
"""
Compute covariance with Series, excluding missing values
Parameters
----------
other : Series
min_periods : int, optional
Minimum number of observations needed to have a valid result
Returns
-------
covariance : float
Normalized by N-1 (unbiased estimator).
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(this.values, other.values,
min_periods=min_periods)
def diff(self, periods=1):
"""
First discrete difference of element.
Calculates the difference of a Series element compared with another
element in the Series (default is element in previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
Returns
-------
diffed : Series
See Also
--------
Series.pct_change: Percent change over given number of periods.
Series.shift: Shift index by desired number of periods with an
optional time freq.
DataFrame.diff: First discrete difference of object
Examples
--------
Difference with previous row
>>> s = pd.Series([1, 1, 2, 3, 5, 8])
>>> s.diff()
0 NaN
1 0.0
2 1.0
3 1.0
4 2.0
5 3.0
dtype: float64
Difference with 3rd previous row
>>> s.diff(periods=3)
0 NaN
1 NaN
2 NaN
3 2.0
4 4.0
5 6.0
dtype: float64
Difference with following row
>>> s.diff(periods=-1)
0 0.0
1 -1.0
2 -1.0
3 -2.0
4 -3.0
5 NaN
dtype: float64
"""
result = algorithms.diff(com.values_from_object(self), periods)
return self._constructor(result, index=self.index).__finalize__(self)
def autocorr(self, lag=1):
"""
Compute the lag-N autocorrelation.
This method computes the Pearson correlation between
the Series and its shifted self.
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
float
The Pearson correlation between self and self.shift(lag).
See Also
--------
Series.corr : Compute the correlation between two Series.
Series.shift : Shift index by desired number of periods.
DataFrame.corr : Compute pairwise correlation of columns.
DataFrame.corrwith : Compute pairwise correlation between rows or
columns of two DataFrame objects.
Notes
-----
If the Pearson correlation is not well defined return 'NaN'.
Examples
--------
>>> s = pd.Series([0.25, 0.5, 0.2, -0.05])
>>> s.autocorr() # doctest: +ELLIPSIS
0.10355...
>>> s.autocorr(lag=2) # doctest: +ELLIPSIS
-0.99999...
If the Pearson correlation is not well defined, then 'NaN' is returned.
>>> s = pd.Series([1, 0, 0, 0])
>>> s.autocorr()
nan
"""
return self.corr(self.shift(lag))
def dot(self, other):
"""
Matrix multiplication with DataFrame or inner-product with Series
objects. Can also be called using `self @ other` in Python >= 3.5.
Parameters
----------
other : Series or DataFrame
Returns
-------
dot_product : scalar or Series
"""
from pandas.core.frame import DataFrame
if isinstance(other, (Series, DataFrame)):
common = self.index.union(other.index)
if (len(common) > len(self.index) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(index=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
raise Exception('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals),
index=other.columns).__finalize__(self)
elif isinstance(other, Series):
return np.dot(lvals, rvals)
elif isinstance(rvals, np.ndarray):
return np.dot(lvals, rvals)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
def __matmul__(self, other):
""" Matrix multiplication using binary `@` operator in Python>=3.5 """
return self.dot(other)
def __rmatmul__(self, other):
""" Matrix multiplication using binary `@` operator in Python>=3.5 """
return self.dot(np.transpose(other))
@Substitution(klass='Series')
@Appender(base._shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if sorter is not None:
sorter = ensure_platform_int(sorter)
return self._values.searchsorted(Series(value)._values,
side=side, sorter=sorter)
# -------------------------------------------------------------------
# Combination
def append(self, to_append, ignore_index=False, verify_integrity=False):
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
ignore_index : boolean, default False
If True, do not use the index labels.
.. versionadded:: 0.19.0
verify_integrity : boolean, default False
If True, raise Exception on creating index with duplicates
Notes
-----
Iteratively appending to a Series can be more computationally intensive
than a single concatenate. A better solution is to append values to a
list and then concatenate the list with the original Series all at
once.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Returns
-------
appended : Series
Examples
--------
>>> s1 = pd.Series([1, 2, 3])
>>> s2 = pd.Series([4, 5, 6])
>>> s3 = pd.Series([4, 5, 6], index=[3,4,5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `ignore_index` set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `verify_integrity` set to True:
>>> s1.append(s2, verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: [0, 1, 2]
"""
from pandas.core.reshape.concat import concat
if isinstance(to_append, (list, tuple)):
to_concat = [self] + to_append
else:
to_concat = [self, to_append]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def _binop(self, other, func, level=None, fill_value=None):
"""
Perform generic binary operation with optional fill value
Parameters
----------
other : Series
func : binary operator
fill_value : float or object
Value to substitute for NA/null values. If both Series are NA in a
location, the result will be NA regardless of the passed fill value
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
combined : Series
"""
if not isinstance(other, Series):
raise AssertionError('Other operand must be Series')
new_index = self.index
this = self
if not self.index.equals(other.index):
this, other = self.align(other, level=level, join='outer',
copy=False)
new_index = this.index
this_vals, other_vals = ops.fill_binop(this.values, other.values,
fill_value)
with np.errstate(all='ignore'):
result = func(this_vals, other_vals)
name = ops.get_op_result_name(self, other)
result = self._constructor(result, index=new_index, name=name)
result = result.__finalize__(self)
if name is None:
# When name is None, __finalize__ overwrites current name
result.name = None
return result
def combine(self, other, func, fill_value=None):
"""
Perform elementwise binary operation on two Series using given function
with optional fill value when an index is missing from one Series or
the other
Parameters
----------
other : Series or scalar value
func : function
Function that takes two scalars as inputs and return a scalar
fill_value : scalar value
The default specifies to use the appropriate NaN value for
the underlying dtype of the Series
Returns
-------
result : Series
Examples
--------
>>> s1 = pd.Series([1, 2])
>>> s2 = pd.Series([0, 3])
>>> s1.combine(s2, lambda x1, x2: x1 if x1 < x2 else x2)
0 0
1 2
dtype: int64
See Also
--------
Series.combine_first : Combine Series values, choosing the calling
Series's values first
"""
if fill_value is None:
fill_value = na_value_for_dtype(self.dtype, compat=False)
if isinstance(other, Series):
# If other is a Series, result is based on union of Series,
# so do this element by element
new_index = self.index.union(other.index)
new_name = ops.get_op_result_name(self, other)
new_values = []
for idx in new_index:
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
with np.errstate(all='ignore'):
new_values.append(func(lv, rv))
else:
# Assume that other is a scalar, so apply the function for
# each element in the Series
new_index = self.index
with np.errstate(all='ignore'):
new_values = [func(lv, other) for lv in self._values]
new_name = self.name
if is_categorical_dtype(self.values):
pass
elif is_extension_array_dtype(self.values):
# The function can return something of any type, so check
# if the type is compatible with the calling EA.
try:
new_values = self._values._from_sequence(new_values)
except Exception:
# https://github.com/pandas-dev/pandas/issues/22850
# pandas has no control over what 3rd-party ExtensionArrays
# do in _values_from_sequence. We still want ops to work
# though, so we catch any regular Exception.
pass
return self._constructor(new_values, index=new_index, name=new_name)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
combined : Series
Examples
--------
>>> s1 = pd.Series([1, np.nan])
>>> s2 = pd.Series([3, 4])
>>> s1.combine_first(s2)
0 1.0
1 4.0
dtype: float64
See Also
--------
Series.combine : Perform elementwise operation on two Series
using a given function
"""
new_index = self.index.union(other.index)
this = self.reindex(new_index, copy=False)
other = other.reindex(new_index, copy=False)
if is_datetimelike(this) and not is_datetimelike(other):
other = to_datetime(other)
return this.where(notna(this), other)
def update(self, other):
"""
Modify Series in place using non-NA values from passed
Series. Aligns on index
Parameters
----------
other : Series
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6]))
>>> s
0 4
1 5
2 6
dtype: int64
>>> s = pd.Series(['a', 'b', 'c'])
>>> s.update(pd.Series(['d', 'e'], index=[0, 2]))
>>> s
0 d
1 b
2 e
dtype: object
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6, 7, 8]))
>>> s
0 4
1 5
2 6
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, np.nan, 6]))
>>> s
0 4
1 2
2 6
dtype: int64
"""
other = other.reindex_like(self)
mask = notna(other)
self._data = self._data.putmask(mask=mask, new=other, inplace=True)
self._maybe_update_cacher()
# ----------------------------------------------------------------------
# Reindexing, sorting
def sort_values(self, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
"""
Sort by the values.
Sort a Series in ascending or descending order by some
criterion.
Parameters
----------
axis : {0 or 'index'}, default 0
Axis to direct sorting. The value 'index' is accepted for
compatibility with DataFrame.sort_values.
ascending : bool, default True
If True, sort values in ascending order, otherwise descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort' or 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. 'mergesort' is the only stable algorithm.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
Returns
-------
Series
Series ordered by values.
See Also
--------
Series.sort_index : Sort by the Series indices.
DataFrame.sort_values : Sort DataFrame by the values along either axis.
DataFrame.sort_index : Sort DataFrame by indices.
Examples
--------
>>> s = pd.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
dtype: float64
Sort a series of strings
>>> s = pd.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
dtype: object
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
# Validate the axis parameter
self._get_axis_number(axis)
# GH 5856/5853
if inplace and self._is_cached:
raise ValueError("This Series is a view of some other array, to "
"sort in-place you must create a copy")
def _try_kind_sort(arr):
# easier to ask forgiveness than permission
try:
# if kind==mergesort, it can fail for object dtype
return arr.argsort(kind=kind)
except TypeError:
# stable sort not available for object dtype
# uses the argsort default quicksort
return arr.argsort(kind='quicksort')
arr = self._values
sortedIdx = np.empty(len(self), dtype=np.int32)
bad = isna(arr)
good = ~bad
idx = ibase.default_index(len(self))
argsorted = _try_kind_sort(arr[good])
if is_list_like(ascending):
if len(ascending) != 1:
raise ValueError('Length of ascending (%d) must be 1 '
'for Series' % (len(ascending)))
ascending = ascending[0]
if not is_bool(ascending):
raise ValueError('ascending must be boolean')
if not ascending:
argsorted = argsorted[::-1]
if na_position == 'last':
n = good.sum()
sortedIdx[:n] = idx[good][argsorted]
sortedIdx[n:] = idx[bad]
elif na_position == 'first':
n = bad.sum()
sortedIdx[n:] = idx[good][argsorted]
sortedIdx[:n] = idx[bad]
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True):
"""
Sort Series by index labels.
Returns a new Series sorted by label if `inplace` argument is
``False``, otherwise updates the original series and returns None.
Parameters
----------
axis : int, default 0
Axis to direct sorting. This can only be 0 for Series.
level : int, optional
If not None, sort on values in specified index level(s).
ascending : bool, default true
Sort ascending vs. descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. 'mergesort' is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If true and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
Returns
-------
pandas.Series
The original Series sorted by the labels
See Also
--------
DataFrame.sort_index: Sort DataFrame by the index
DataFrame.sort_values: Sort DataFrame by the value
Series.sort_values : Sort Series by the value
Examples
--------
>>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4])
>>> s.sort_index()
1 c
2 b
3 a
4 d
dtype: object
Sort Descending
>>> s.sort_index(ascending=False)
4 d
3 a
2 b
1 c
dtype: object
Sort Inplace
>>> s.sort_index(inplace=True)
>>> s
1 c
2 b
3 a
4 d
dtype: object
By default NaNs are put at the end, but use `na_position` to place
them at the beginning
>>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan])
>>> s.sort_index(na_position='first')
NaN d
1.0 c
2.0 b
3.0 a
dtype: object
Specify index level to sort
>>> arrays = [np.array(['qux', 'qux', 'foo', 'foo',
... 'baz', 'baz', 'bar', 'bar']),
... np.array(['two', 'one', 'two', 'one',
... 'two', 'one', 'two', 'one'])]
>>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays)
>>> s.sort_index(level=1)
bar one 8
baz one 6
foo one 4
qux one 2
bar two 7
baz two 5
foo two 3
qux two 1
dtype: int64
Does not sort by remaining levels when sorting by levels
>>> s.sort_index(level=1, sort_remaining=False)
qux one 2
foo one 4
baz one 6
bar one 8
qux two 1
foo two 3
baz two 5
bar two 7
dtype: int64
"""
# TODO: this can be combined with DataFrame.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# Validate the axis parameter
self._get_axis_number(axis)
index = self.index
if level is not None:
new_index, indexer = index.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(index, MultiIndex):
from pandas.core.sorting import lexsort_indexer
labels = index._sort_levels_monotonic()
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and index.is_monotonic_increasing) or
(not ascending and index.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(index, kind=kind, ascending=ascending,
na_position=na_position)
indexer = ensure_platform_int(indexer)
new_index = index.take(indexer)
new_index = new_index._sort_levels_monotonic()
new_values = self._values.take(indexer)
result = self._constructor(new_values, index=new_index)
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
def argsort(self, axis=0, kind='quicksort', order=None):
"""
Overrides ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values
Parameters
----------
axis : int (can only be zero)
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
order : ignored
Returns
-------
argsorted : Series, with -1 indicated where nan values are present
See also
--------
numpy.ndarray.argsort
"""
values = self._values
mask = isna(values)
if mask.any():
result = Series(-1, index=self.index, name=self.name,
dtype='int64')
notmask = ~mask
result[notmask] = np.argsort(values[notmask], kind=kind)
return self._constructor(result,
index=self.index).__finalize__(self)
else:
return self._constructor(
np.argsort(values, kind=kind), index=self.index,
dtype='int64').__finalize__(self)
def nlargest(self, n=5, keep='first'):
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Return this many descending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : take the first occurrences based on the index order
- ``last`` : take the last occurrences based on the index order
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Malta": 434000, "Maldives": 434000,
... "Brunei": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Monserat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Malta 434000
Maldives 434000
Brunei 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Monserat 5200
dtype: int64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3``. Default `keep` value is 'first'
so Malta will be kept.
>>> s.nlargest(3)
France 65000000
Italy 59000000
Malta 434000
dtype: int64
The `n` largest elements where ``n=3`` and keeping the last duplicates.
Brunei will be kept since it is the last with value 434000 based on
the index order.
>>> s.nlargest(3, keep='last')
France 65000000
Italy 59000000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has five elements due to the three duplicates.
>>> s.nlargest(3, keep='all')
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
def nsmallest(self, n=5, keep='first'):
"""
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : take the first occurrences based on the index order
- ``last`` : take the last occurrences based on the index order
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Brunei": 434000, "Malta": 434000,
... "Maldives": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Monserat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Brunei 434000
Malta 434000
Maldives 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Monserat 5200
dtype: int64
The `n` largest elements where ``n=5`` by default.
>>> s.nsmallest()
Monserat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
Iceland 337000
dtype: int64
The `n` smallest elements where ``n=3``. Default `keep` value is
'first' so Nauru and Tuvalu will be kept.
>>> s.nsmallest(3)
Monserat 5200
Nauru 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` and keeping the last
duplicates. Anguilla and Tuvalu will be kept since they are the last
with value 11300 based on the index order.
>>> s.nsmallest(3, keep='last')
Monserat 5200
Anguilla 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has four elements due to the three duplicates.
>>> s.nsmallest(3, keep='all')
Monserat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
dtype: int64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""Sort Series with MultiIndex by chosen level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order),
.. deprecated:: 0.20.0
Use :meth:`Series.sort_index`
Parameters
----------
level : int or level name, default None
ascending : bool, default True
Returns
-------
sorted : Series
See Also
--------
Series.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level=...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, ascending=ascending,
sort_remaining=sort_remaining)
def swaplevel(self, i=-2, j=-1, copy=True):
"""
Swap levels i and j in a MultiIndex
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : Series
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
new_index = self.index.swaplevel(i, j)
return self._constructor(self._values, index=new_index,
copy=copy).__finalize__(self)
def reorder_levels(self, order):
"""
Rearrange index levels using input order. May not drop or duplicate
levels
Parameters
----------
order : list of int representing new level order.
(reference level by number or key)
Returns
-------
type of caller (new object)
"""
if not isinstance(self.index, MultiIndex): # pragma: no cover
raise Exception('Can only reorder levels on a hierarchical axis.')
result = self.copy()
result.index = result.index.reorder_levels(order)
return result
def unstack(self, level=-1, fill_value=None):
"""
Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
Examples
--------
>>> s = pd.Series([1, 2, 3, 4],
... index=pd.MultiIndex.from_product([['one', 'two'], ['a', 'b']]))
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
# ----------------------------------------------------------------------
# function application
def map(self, arg, na_action=None):
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict`` or
a :class:`Series`.
Parameters
----------
arg : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NaN values, without passing them to the
mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
DataFrame.apply : Apply a function row-/column-wise.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``NaN``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``NaN``.
Examples
--------
>>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])
>>> s
0 cat
1 dog
2 NaN
3 rabbit
dtype: object
``map`` accepts a ``dict`` or a ``Series``. Values that are not found
in the ``dict`` are converted to ``NaN``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 NaN
3 NaN
dtype: object
It also accepts a function:
>>> s.map('I am a {}'.format)
0 I am a cat
1 I am a dog
2 I am a nan
3 I am a rabbit
dtype: object
To avoid applying the function to missing values (and keep them as
``NaN``) ``na_action='ignore'`` can be used:
>>> s.map('I am a {}'.format, na_action='ignore')
0 I am a cat
1 I am a dog
2 NaN
3 I am a rabbit
dtype: object
"""
new_values = super(Series, self)._map_values(
arg, na_action=na_action)
return self._constructor(new_values,
index=self.index).__finalize__(self)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
return self
_agg_doc = dedent("""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.agg('min')
1
>>> s.agg(['min', 'max'])
min 1
max 4
dtype: int64
See also
--------
pandas.Series.apply : Invoke function on a Series.
pandas.Series.transform : Transform function producing
a Series with like indexes.
""")
@Appender(_agg_doc)
@Appender(generic._shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
def aggregate(self, func, axis=0, *args, **kwargs):
# Validate the axis parameter
self._get_axis_number(axis)
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
# we can be called from an inner function which
# passes this meta-data
kwargs.pop('_axis', None)
kwargs.pop('_level', None)
# try a regular apply, this evaluates lambdas
# row-by-row; however if the lambda is expected a Series
# expression, e.g.: lambda x: x-x.quantile(0.25)
# this will fail, so we can try a vectorized evaluation
# we cannot FIRST try the vectorized evaluation, because
# then .agg and .apply would have different semantics if the
# operation is actually defined on the Series, e.g. str
try:
result = self.apply(func, *args, **kwargs)
except (ValueError, AttributeError, TypeError):
result = func(self, *args, **kwargs)
return result
agg = aggregate
@Appender(generic._shared_docs['transform'] % _shared_doc_kwargs)
def transform(self, func, axis=0, *args, **kwargs):
# Validate the axis parameter
self._get_axis_number(axis)
return super(Series, self).transform(func, *args, **kwargs)
def apply(self, func, convert_dtype=True, args=(), **kwds):
"""
Invoke function on values of Series. Can be ufunc (a NumPy function
that applies to the entire Series) or a Python function that only works
on single values
Parameters
----------
func : function
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object
args : tuple
Positional arguments to pass to function in addition to the value
Additional keyword arguments will be passed as keywords to the function
Returns
-------
y : Series or DataFrame if func returns a Series
See also
--------
Series.map: For element-wise operations
Series.agg: only perform aggregating type operations
Series.transform: only perform transforming type operations
Examples
--------
Create a series with typical summer temperatures for each city.
>>> series = pd.Series([20, 21, 12], index=['London',
... 'New York','Helsinki'])
>>> series
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x):
... return x**2
>>> series.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Square the values by passing an anonymous function as an
argument to ``apply()``.
>>> series.apply(lambda x: x**2)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword.
>>> def subtract_custom_value(x, custom_value):
... return x-custom_value
>>> series.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``.
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x+=kwargs[month]
... return x
>>> series.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library.
>>> series.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
if len(self) == 0:
return self._constructor(dtype=self.dtype,
index=self.index).__finalize__(self)
# dispatch to agg
if isinstance(func, (list, dict)):
return self.aggregate(func, *args, **kwds)
# if we are a string, try to dispatch
if isinstance(func, compat.string_types):
return self._try_aggregate_string_function(func, *args, **kwds)
# handle ufuncs and lambdas
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
with np.errstate(all='ignore'):
if isinstance(f, np.ufunc):
return f(self)
# row-wise access
if is_extension_type(self.dtype):
mapped = self._values.map(f)
else:
values = self.astype(object).values
mapped = lib.map_infer(values, f, convert=convert_dtype)
if len(mapped) and isinstance(mapped[0], Series):
from pandas.core.frame import DataFrame
return DataFrame(mapped.tolist(), index=self.index)
else:
return self._constructor(mapped,
index=self.index).__finalize__(self)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
"""
perform a reduction operation
if we have an ndarray as a value, then simply perform the operation,
otherwise delegate to the object
"""
delegate = self._values
if axis is not None:
self._get_axis_number(axis)
# dispatch to ExtensionArray interface
if isinstance(delegate, ExtensionArray):
return delegate._reduce(name, skipna=skipna, **kwds)
# dispatch to numpy arrays
elif isinstance(delegate, np.ndarray):
if numeric_only:
raise NotImplementedError('Series.{0} does not implement '
'numeric_only.'.format(name))
with np.errstate(all='ignore'):
return op(delegate, skipna=skipna, **kwds)
# TODO(EA) dispatch to Index
# remove once all internals extension types are
# moved to ExtensionArrays
return delegate._reduce(op=op, name=name, axis=axis, skipna=skipna,
numeric_only=numeric_only,
filter_type=filter_type, **kwds)
def _reindex_indexer(self, new_index, indexer, copy):
if indexer is None:
if copy:
return self.copy()
return self
new_values = algorithms.take_1d(self._values, indexer,
allow_fill=True, fill_value=None)
return self._constructor(new_values, index=new_index)
def _needs_reindex_multi(self, axes, method, level):
""" check if we do need a multi reindex; this is for compat with
higher dims
"""
return False
@Appender(generic._shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(Series, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value, method=method,
limit=limit, fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
def rename(self, index=None, **kwargs):
"""Alter Series index labels or name
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
index : scalar, hashable sequence, dict-like or function, optional
dict-like or functions are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
attribute.
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new Series. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : Series (new object)
See Also
--------
pandas.Series.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
"""
kwargs['inplace'] = validate_bool_kwarg(kwargs.get('inplace', False),
'inplace')
non_mapping = is_scalar(index) or (is_list_like(index) and
not is_dict_like(index))
if non_mapping:
return self._set_name(index, inplace=kwargs.get('inplace'))
return super(Series, self).rename(index=index, **kwargs)
@Substitution(**_shared_doc_kwargs)
@Appender(generic.NDFrame.reindex.__doc__)
def reindex(self, index=None, **kwargs):
return super(Series, self).reindex(index=index, **kwargs)
def drop(self, labels=None, axis=0, index=None, columns=None,
level=None, inplace=False, errors='raise'):
"""
Return Series with specified index labels removed.
Remove elements of a Series based on specifying the index labels.
When using a multi-index, labels on different levels can be removed
by specifying the level.
Parameters
----------
labels : single label or list-like
Index labels to drop.
axis : 0, default 0
Redundant for application on Series.
index, columns : None
Redundant for application on Series, but index can be used instead
of labels.
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level for which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are dropped.
Returns
-------
dropped : pandas.Series
See Also
--------
Series.reindex : Return only specified index labels of Series.
Series.dropna : Return series without null values.
Series.drop_duplicates : Return Series with duplicate values removed.
DataFrame.drop : Drop specified labels from rows or columns.
Raises
------
KeyError
If none of the labels are found in the index.
Examples
--------
>>> s = pd.Series(data=np.arange(3), index=['A','B','C'])
>>> s
A 0
B 1
C 2
dtype: int64
Drop labels B en C
>>> s.drop(labels=['B','C'])
A 0
dtype: int64
Drop 2nd level label in MultiIndex Series
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... labels=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop(labels='weight', level=1)
lama speed 45.0
length 1.2
cow speed 30.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
"""
return super(Series, self).drop(labels=labels, axis=axis, index=index,
columns=columns, level=level,
inplace=inplace, errors=errors)
@Substitution(**_shared_doc_kwargs)
@Appender(generic.NDFrame.fillna.__doc__)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Series, self).fillna(value=value, method=method,
axis=axis, inplace=inplace,
limit=limit, downcast=downcast,
**kwargs)
@Appender(generic._shared_docs['replace'] % _shared_doc_kwargs)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad'):
return super(Series, self).replace(to_replace=to_replace, value=value,
inplace=inplace, limit=limit,
regex=regex, method=method)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(Series, self).shift(periods=periods, freq=freq, axis=axis)
def reindex_axis(self, labels, axis=0, **kwargs):
"""Conform Series to new index with optional filling logic.
.. deprecated:: 0.21.0
Use ``Series.reindex`` instead.
"""
# for compatibility with higher dims
if axis != 0:
raise ValueError("cannot reindex series on non-zero axis!")
msg = ("'.reindex_axis' is deprecated and will be removed in a future "
"version. Use '.reindex' instead.")
warnings.warn(msg, FutureWarning, stacklevel=2)
return self.reindex(index=labels, **kwargs)
def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of the Series.
The memory usage can optionally include the contribution of
the index and of elements of `object` dtype.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the Series index.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned value.
Returns
-------
int
Bytes of memory consumed.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
DataFrame.memory_usage : Bytes consumed by a DataFrame.
Examples
--------
>>> s = pd.Series(range(3))
>>> s.memory_usage()
104
Not including the index gives the size of the rest of the data, which
is necessarily smaller:
>>> s.memory_usage(index=False)
24
The memory footprint of `object` values is ignored by default:
>>> s = pd.Series(["a", "b"])
>>> s.values
array(['a', 'b'], dtype=object)
>>> s.memory_usage()
96
>>> s.memory_usage(deep=True)
212
"""
v = super(Series, self).memory_usage(deep=deep)
if index:
v += self.index.memory_usage(deep=deep)
return v
@Appender(generic.NDFrame._take.__doc__)
def _take(self, indices, axis=0, is_copy=False):
indices = ensure_platform_int(indices)
new_index = self.index.take(indices)
if is_categorical_dtype(self):
# https://github.com/pandas-dev/pandas/issues/20664
# TODO: remove when the default Categorical.take behavior changes
indices = maybe_convert_indices(indices, len(self._get_axis(axis)))
kwargs = {'allow_fill': False}
else:
kwargs = {}
new_values = self._values.take(indices, **kwargs)
result = (self._constructor(new_values, index=new_index,
fastpath=True).__finalize__(self))
# Maybe set copy if we didn't actually change the index.
if is_copy:
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def isin(self, values):
"""
Check whether `values` are contained in Series.
Return a boolean Series showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
.. versionadded:: 0.18.1
Support for values as a set.
Returns
-------
isin : Series (bool dtype)
Raises
------
TypeError
* If `values` is a string
See Also
--------
pandas.DataFrame.isin : equivalent method on DataFrame
Examples
--------
>>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
"""
result = algorithms.isin(self, values)
return self._constructor(result, index=self.index).__finalize__(self)
def between(self, left, right, inclusive=True):
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar
Left boundary.
right : scalar
Right boundary.
inclusive : bool, default True
Include boundaries.
Returns
-------
Series
Each element will be a boolean.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
See Also
--------
pandas.Series.gt : Greater than of series and other
pandas.Series.lt : Less than of series and other
Examples
--------
>>> s = pd.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``False`` boundary values are excluded:
>>> s.between(1, 4, inclusive=False)
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask
@classmethod
def from_csv(cls, path, sep=',', parse_dates=True, header=None,
index_col=0, encoding=None, infer_datetime_format=False):
"""Read CSV file.
.. deprecated:: 0.21.0
Use :func:`pandas.read_csv` instead.
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a time Series.
This method only differs from :func:`pandas.read_csv` in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `header` is ``None`` instead of ``0`` (the first row is not used as
the column names)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
With :func:`pandas.read_csv`, the option ``squeeze=True`` can be used
to return a Series like ``from_csv``.
Parameters
----------
path : string file path or file handle / StringIO
sep : string, default ','
Field delimiter
parse_dates : boolean, default True
Parse dates. Different default from read_table
header : int, default None
Row to use as header (skip prior rows)
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : Series
"""
# We're calling `DataFrame.from_csv` in the implementation,
# which will propagate a warning regarding `from_csv` deprecation.
from pandas.core.frame import DataFrame
df = DataFrame.from_csv(path, header=header, index_col=index_col,
sep=sep, parse_dates=parse_dates,
encoding=encoding,
infer_datetime_format=infer_datetime_format)
result = df.iloc[:, 0]
if header is None:
result.index.name = result.name = None
return result
@Appender(generic.NDFrame.to_csv.__doc__)
def to_csv(self, *args, **kwargs):
names = ["path_or_buf", "sep", "na_rep", "float_format", "columns",
"header", "index", "index_label", "mode", "encoding",
"compression", "quoting", "quotechar", "line_terminator",
"chunksize", "tupleize_cols", "date_format", "doublequote",
"escapechar", "decimal"]
old_names = ["path_or_buf", "index", "sep", "na_rep", "float_format",
"header", "index_label", "mode", "encoding",
"compression", "date_format", "decimal"]
if "path" in kwargs:
warnings.warn("The signature of `Series.to_csv` was aligned "
"to that of `DataFrame.to_csv`, and argument "
"'path' will be renamed to 'path_or_buf'.",
FutureWarning, stacklevel=2)
kwargs["path_or_buf"] = kwargs.pop("path")
if len(args) > 1:
# Either "index" (old signature) or "sep" (new signature) is being
# passed as second argument (while the first is the same)
maybe_sep = args[1]
if not (is_string_like(maybe_sep) and len(maybe_sep) == 1):
# old signature
warnings.warn("The signature of `Series.to_csv` was aligned "
"to that of `DataFrame.to_csv`. Note that the "
"order of arguments changed, and the new one "
"has 'sep' in first place, for which \"{}\" is "
"not a valid value. The old order will cease to "
"be supported in a future version. Please refer "
"to the documentation for `DataFrame.to_csv` "
"when updating your function "
"calls.".format(maybe_sep),
FutureWarning, stacklevel=2)
names = old_names
pos_args = dict(zip(names[:len(args)], args))
for key in pos_args:
if key in kwargs:
raise ValueError("Argument given by name ('{}') and position "
"({})".format(key, names.index(key)))
kwargs[key] = pos_args[key]
if kwargs.get("header", None) is None:
warnings.warn("The signature of `Series.to_csv` was aligned "
"to that of `DataFrame.to_csv`, and argument "
"'header' will change its default value from False "
"to True: please pass an explicit value to suppress "
"this warning.", FutureWarning,
stacklevel=2)
kwargs["header"] = False # Backwards compatibility.
return self.to_frame().to_csv(**kwargs)
@Appender(generic._shared_docs['to_excel'] % _shared_doc_kwargs)
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True):
df = self.to_frame()
df.to_excel(excel_writer=excel_writer, sheet_name=sheet_name,
na_rep=na_rep, float_format=float_format, columns=columns,
header=header, index=index, index_label=index_label,
startrow=startrow, startcol=startcol, engine=engine,
merge_cells=merge_cells, encoding=encoding,
inf_rep=inf_rep, verbose=verbose)
@Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return super(Series, self).isna()
@Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return super(Series, self).isnull()
@Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return super(Series, self).notna()
@Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return super(Series, self).notnull()
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Return a new Series with missing values removed.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
**kwargs
Not in use.
Returns
-------
Series
Series with NA entries dropped from it.
See Also
--------
Series.isna: Indicate missing values.
Series.notna : Indicate existing (non-missing) values.
Series.fillna : Replace missing values.
DataFrame.dropna : Drop rows or columns which contain NA values.
Index.dropna : Drop missing indices.
Examples
--------
>>> ser = pd.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
dtype: float64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1.0
1 2.0
dtype: float64
Empty strings are not considered NA values. ``None`` is considered an
NA value.
>>> ser = pd.Series([np.NaN, 2, pd.NaT, '', None, 'I stay'])
>>> ser
0 NaN
1 2
2 NaT
3
4 None
5 I stay
dtype: object
>>> ser.dropna()
1 2
3
5 I stay
dtype: object
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
kwargs.pop('how', None)
if kwargs:
raise TypeError('dropna() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
# Validate the axis parameter
self._get_axis_number(axis or 0)
if self._can_hold_na:
result = remove_na_arraylike(self)
if inplace:
self._update_inplace(result)
else:
return result
else:
if inplace:
# do nothing
pass
else:
return self.copy()
def valid(self, inplace=False, **kwargs):
"""Return Series without null values.
.. deprecated:: 0.23.0
Use :meth:`Series.dropna` instead.
"""
warnings.warn("Method .valid will be removed in a future version. "
"Use .dropna instead.", FutureWarning, stacklevel=2)
return self.dropna(inplace=inplace, **kwargs)
# ----------------------------------------------------------------------
# Time series-oriented methods
def to_timestamp(self, freq=None, how='start', copy=True):
"""
Cast to datetimeindex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
Returns
-------
ts : Series with DatetimeIndex
"""
new_values = self._values
if copy:
new_values = new_values.copy()
new_index = self.index.to_timestamp(freq=freq, how=how)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def to_period(self, freq=None, copy=True):
"""
Convert Series from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
Returns
-------
ts : Series with PeriodIndex
"""
new_values = self._values
if copy:
new_values = new_values.copy()
new_index = self.index.to_period(freq=freq)
return self._constructor(new_values,
index=new_index).__finalize__(self)
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
str = CachedAccessor("str", StringMethods)
dt = CachedAccessor("dt", CombinedDatetimelikeProperties)
cat = CachedAccessor("cat", CategoricalAccessor)
plot = CachedAccessor("plot", gfx.SeriesPlotMethods)
# ----------------------------------------------------------------------
# Add plotting methods to Series
hist = gfx.hist_series
Series._setup_axes(['index'], info_axis=0, stat_axis=0, aliases={'rows': 0},
docs={'index': 'The index (axis labels) of the Series.'})
Series._add_numeric_operations()
Series._add_series_only_operations()
Series._add_series_or_dataframe_operations()
# Add arithmetic!
ops.add_flex_arithmetic_methods(Series)
ops.add_special_arithmetic_methods(Series)
# -----------------------------------------------------------------------------
# Supplementary functions
def _sanitize_index(data, index, copy=False):
""" sanitize an index type to return an ndarray of the underlying, pass
thru a non-Index
"""
if index is None:
return data
if len(data) != len(index):
raise ValueError('Length of values does not match length of ' 'index')
if isinstance(data, ABCIndexClass) and not copy:
pass
elif isinstance(data, (PeriodIndex, DatetimeIndex)):
data = data._values
if copy:
data = data.copy()
elif isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ['M', 'm']:
data = _sanitize_array(data, index, copy=copy)
return data
def _sanitize_array(data, index, dtype=None, copy=False,
raise_cast_failure=False):
""" sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if maybe_castable(arr) and not copy and dtype is None:
return arr
try:
# gh-15832: Check if we are requesting a numeric dype and
# that we can convert the data to the requested dtype.
if is_integer_dtype(dtype):
subarr = maybe_cast_to_integer_array(arr, dtype)
subarr = maybe_cast_to_datetime(arr, dtype)
# Take care in creating object arrays (but iterators are not
# supported):
if is_object_dtype(dtype) and (is_list_like(subarr) and
not (is_iterator(subarr) or
isinstance(subarr, np.ndarray))):
subarr = construct_1d_object_array_from_listlike(subarr)
elif not is_extension_type(subarr):
subarr = construct_1d_ndarray_preserving_na(subarr, dtype,
copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
# We *do* allow casting to categorical, since we know
# that Categorical is the only array type for 'category'.
subarr = Categorical(arr, dtype.categories,
ordered=dtype.ordered)
elif is_extension_array_dtype(dtype):
# create an extension array from its dtype
array_type = dtype.construct_array_type()._from_sequence
subarr = array_type(arr, dtype=dtype, copy=copy)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if is_float_dtype(data.dtype) and is_integer_dtype(dtype):
if not isna(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=copy)
else:
# we will try to copy be-definition here
subarr = _try_cast(data, True)
elif isinstance(data, ExtensionArray):
subarr = data
if dtype is not None and not data.dtype.is_dtype(dtype):
subarr = data.astype(dtype)
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, (list, tuple)) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
# GH 16804
start, stop, step = get_range_parameters(data)
arr = np.arange(start, stop, step, dtype='int64')
subarr = _try_cast(arr, False)
else:
subarr = _try_cast(data, False)
# scalar like, GH
if getattr(subarr, 'ndim', 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(
value, len(index), dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception('Data must be 1-dimensional')
else:
subarr = com.asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
# GH 16605
# If not empty convert the data to dtype
# GH 19853: If data is a scalar, subarr has already the result
if not is_scalar(data):
if not np.all(isna(data)):
data = np.array(data, dtype=dtype, copy=False)
subarr = np.array(data, dtype=object, copy=copy)
return subarr
| bsd-3-clause |
phbradley/tcr-dist | setup.py | 1 | 6314 | ##
## Run this script in the main repository directory by typing
##
## python setup.py
##
## If you are getting errors, re-run the script saving the output
## and contact [email protected] for help trouble-shooting.
##
##
from os import popen, system, chdir, mkdir
from os.path import exists, isdir, isfile
from sys import stderr,exit,platform
msg = """
This script will download a set of compatible BLAST executables, parameter and
database files used by the tcr-dist pipeline, and some TCR datasets for
testing and analysis. It should be run in the main tcr-dist/ directory.
Altogether, it will end up taking about 500 Megabytes of space.
(To reduce this a bit you can delete the .tgz files in external/ after
it completes successfully.)
Do you want to proceed? [Y/n] """
ans = raw_input(msg)
if ans and ans not in 'Yy':
print 'Setup aborted.'
exit()
old_directories = ['db','external','datasets','testing_ref']
found_old_directory = False
for d in old_directories:
if exists(d):
found_old_directory = True
if found_old_directory:
msg = """
It looks like you have some old directories from a previous setup.
I need to remove db/ external/ datasets/ and testing_ref/
Is that OK? [Y/n] """
ans = raw_input(msg)
if ans and ans not in 'Yy':
print 'Setup aborted.'
exit()
for d in old_directories:
if exists(d):
cmd = 'rm -rf '+d
print cmd
system(cmd)
# I don't know how reliable this is:
mac_osx = ( platform.lower() == "darwin" )
if mac_osx:
print 'Detected mac_osx operating system -- if not, hardcode mac_osx=False in setup.py'
def download_web_file( address ):
newfile = address.split('/')[-1]
if exists(newfile):
print 'download_web_file: {} already exists, delete it to re-download'
return
## try with wget
cmd = 'wget '+address
print cmd
system(cmd)
if not exists( newfile ):
print 'wget failed, trying curl'
cmd = 'curl -L {} -o {}'.format(address,newfile)
print cmd
system(cmd)
if not exists( newfile ):
print '[ERROR] unable to download (tried wget and curl) the link '+address
## check for python modules
try:
import numpy
except:
print '[ERROR] failed to import numpy'
exit(1)
try:
import scipy
except:
print '[ERROR] failed to import scipy'
exit(1)
try:
import matplotlib
except:
print '[ERROR] failed to import matplotlib'
exit(1)
try:
import sklearn
except:
print """
=============================================================================
=============================================================================
[ERROR]
[ERROR] Failed to import the python module sklearn (scikit-learn)
[ERROR] Some analyses (kernelPCA plots, adjusted_mutual_information) will fail
[ERROR] Take a look at http://scikit-learn.org/stable/install.html
[ERROR]
=============================================================================
=============================================================================
"""
#exit() ## not exiting since most stuff will probably still work...
## setup the
print 'Making the ./external/ directory'
external_dir = 'external/'
if not isdir( external_dir ):
mkdir( external_dir )
chdir( external_dir )
## download blast
blastdir = './blast-2.2.16'
if not isdir( blastdir ):
if mac_osx:
# need to host this elsewhere while updating to newer version!
#address = 'ftp://ftp.ncbi.nlm.nih.gov/blast/executables/legacy.NOTSUPPORTED/2.2.16/blast-2.2.16-universal-macosx.tar.gz'
address = 'https://www.dropbox.com/s/x3e8qs9pk5w6szq/blast-2.2.16-universal-macosx.tar.gz'
else:
# ack-- need to update to a newer version of blast! temp fix move to dropbox
#address = 'ftp://ftp.ncbi.nlm.nih.gov/blast/executables/legacy.NOTSUPPORTED/2.2.16/blast-2.2.16-x64-linux.tar.gz'
address = 'https://www.dropbox.com/s/gurbwgcys6xcttm/blast-2.2.16-x64-linux.tar.gz'
tarfile = address.split('/')[-1]
if not exists( tarfile ):
print 'Downloading a rather old BLAST tool'
download_web_file( address )
if not exists( tarfile ):
print '[ERROR] download BLAST failed!'
exit(1)
cmd = 'tar -xzf '+tarfile
print cmd
system(cmd)
## download other db files
# switching to dropbox as the default since some users networks don't like the port 7007 address
address = 'https://www.dropbox.com/s/kivfp27gbz2m2st/tcrdist_extras_v2.tgz'
backup_address = 'http://xfiles.fhcrc.org:7007/bradley_p/pub/tcrdist_extras_v2.tgz'
tarfile = address.split('/')[-1]
assert tarfile == backup_address.split('/')[-1]
if not exists( tarfile ):
print 'Downloading database files'
download_web_file( address )
if not exists( tarfile ):
print '[ERROR] download database files failed, trying a backup location'
download_web_file( backup_address )
if not exists( tarfile ):
print '[ERROR] download database files failed'
exit(1)
## md5sum check
lines = popen('md5sum '+tarfile).readlines()
if lines and len(lines[0].split()) == 2:
# rhino1 tcr-dist$ md5sum tcrdist_extras_v2.tgz
# 2705f3a79152cd0382aa6c5d4a81ad0b tcrdist_extras_v2.tgz
checksum = lines[0].split()[0]
expected_checksum = '2705f3a79152cd0382aa6c5d4a81ad0b'
if checksum == expected_checksum:
print "\n[SUCCESS] md5sum checksum for tarfile matches expected, phew!\n"
else:
print "[ERROR] OH NO! md5sum checksum for tarfile does not match: actual={} expected={}"\
.format( checksum, expected_checksum )
else:
print '[WARNING] md5sum command failed or gave unparseable output, unable to check the tarfile...'
download_dir = tarfile[:-4]
if not isdir( download_dir ):
cmd = 'tar -xzf '+tarfile
print cmd
system(cmd)
if not isdir( download_dir ):
print '[ERROR] tar failed or the database download was corrupted!'
exit(1)
cmd = 'mv {}/external/* .'.format(download_dir)
print cmd
system(cmd)
cmd = 'mv {}/db ../'.format(download_dir)
print cmd
system(cmd)
cmd = 'mv {}/datasets ../'.format(download_dir)
print cmd
system(cmd)
cmd = 'mv {}/testing_ref ../'.format(download_dir)
print cmd
system(cmd)
| mit |
LiaoPan/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
MPIBGC-TEE/CompartmentalSystems | src/CompartmentalSystems/smooth_reservoir_model.py | 1 | 32031 | """Module for symbolical treatment of smooth reservoir models.
This module handles the symbolic treatment of compartmental/reservoir/pool
models.
It does not deal with numerical computations and model simulations,
but rather defines the underlying structure of the respective model.
All fluxes or matrix entries are supposed to be SymPy expressions.
*Smooth* means that no ``Piecewise`` or ``DiracDelta`` functions should be
involved in the model description.
Counting of compartment/pool/reservoir numbers starts at zero and the
total number of pools is :math:`d`.
"""
import multiprocessing
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
from copy import copy, deepcopy
from string import Template
from functools import reduce
from sympy import (zeros, Matrix, simplify, diag, eye, gcd, latex, Symbol,
flatten, Function, solve, limit, oo , ask , Q, assuming
,sympify)
from sympy.printing import pprint
from . import helpers_reservoir as hr
from .cs_plotter import CSPlotter
from typing import TypeVar
class Error(Exception):
"""Generic error occurring in this module."""
pass
class SmoothReservoirModel(object):
"""General class of smooth reservoir models.
Attributes:
state_vector (SymPy dx1-matrix): The model's state vector
:math:`x`.
Its entries are SymPy symbols.
state_variables (list of str):
Names of the variables in the state vector.
Its entries are of type ``str``.
time_symbol (SymPy symbol): The model's time symbol.
input_fluxes (dict): The model's external input fluxes.
``{key1: flux1, key2: flux2}`` with ``key`` the pool number and
``flux`` a SymPy expression for the influx.
output_fluxes (dict): The model's external output fluxes.
``{key1: flux1, key2: flux2}`` with ``key`` the pool number and
``flux`` a SymPy expression for the outflux.
internal_fluxes (dict): The model's internal_fluxes.
``{key1: flux1, key2: flux2}`` with ``key = (pool_from, pool_to)``
and *flux* a SymPy expression for the flux.
"""
@classmethod
def from_state_variable_indexed_fluxes(cls,state_vector, time_symbol,
input_fluxes, output_fluxes, internal_fluxes)->"SmoothReservoirModel":
"""Return an instance of SmoothReservoirModel.
Args:
state_vector (SymPy dx1-matrix): The model's state vector
:math:`x`.
Its entries are SymPy symbols.
time_symbol (SymPy symbol): The model's time symbol.
input_fluxes (dict): The model's external input fluxes.
``{key1: flux1, key2: flux2}`` with ``key`` the symbol of the target pool. (as used in the state vector)
and ``flux`` a SymPy expression for the influx.
output_fluxes (dict): The model's external output fluxes.
``{key1: flux1, key2: flux2}`` with ``key`` the symbols for the source pool (as used in the state vector)
and ``flux`` a SymPy expression for the outflux.
internal_fluxes (dict): The model's internal_fluxes.
``{key1: flux1, key2: flux2}`` with
``key = (source pool symbol, target pool symbol)`` and ``flux`` a SymPy expression
for the flux.
Returns:
:class:`SmoothReservoirModel`
"""
# transform to integer indexed dicts
int_input = hr.to_int_keys_1(input_fluxes, state_vector)
int_output = hr.to_int_keys_1(output_fluxes, state_vector)
int_internal = hr.to_int_keys_2(internal_fluxes, state_vector)
# call normal init
return cls(state_vector, time_symbol, int_input, int_output, int_internal)
def __init__(self, state_vector, time_symbol,
input_fluxes={}, output_fluxes={}, internal_fluxes={}):
"""Initialize an instance of SmoothReservoirModel.
Args:
state_vector (SymPy dx1-matrix): The model's state vector
:math:`x`.
Its entries are SymPy symbols.
time_symbol (SymPy symbol): The model's time symbol.
input_fluxes (dict): The model's external input fluxes.
``{key1: flux1, key2: flux2}`` with ``key`` the pool number
and ``flux`` a SymPy expression for the influx.
output_fluxes (dict): The model's external output fluxes.
``{key1: flux1, key2: flux2}`` with ``key`` the pool number
and ``flux`` a SymPy expression for the outflux.
internal_fluxes (dict): The model's internal_fluxes.
``{key1: flux1, key2: flux2}`` with
``key = (pool_from, pool_to)`` and ``flux`` a SymPy expression
for the flux.
Returns:
:class:`SmoothReservoirModel`
"""
self.state_vector = state_vector
self.state_variables = [sv.name for sv in state_vector]
self.time_symbol=time_symbol
self.input_fluxes=input_fluxes
self.output_fluxes=output_fluxes
self.internal_fluxes=internal_fluxes
def is_state_dependent(self,expr):
efss=expr.free_symbols
svs=set([e for e in self.state_vector])
inter=efss.intersection(svs)
return not(len(inter)==0)
@property
def is_linear(self):
"""Returns True if we can make SURE that the model is linear by checking that the jacobian is not state dependent.
Note that external numerical functions of state variables are represented as sympy.Function f(x_1,x_2,...,t)
Sympy will consider the derivative of math:`df/dx_i` with respect to state variable math:`x_i` as function math:`g(x_1, x_2,...)` too, since it can not exclude this possibility if we know f only numerically.
In consequence this method will return False even if the numerical implementation of f IS linear in math:`x_1,x_2,...` .
To avoid this situation you can just reformulate linear external functions math:`f(x_1,x_2,...,t)` as linear combinations
of state independent external functions math:`f(x_1,x_2,...,t)=g_1(t)x_1+g_2(t)x_2+...` so that sympy can detect the linearity.
Returns:
bool: 'True', 'False'
"""
return not(self.is_state_dependent(self.jacobian))
def _output_flux_type(self, pool_from):
"""Return the type of an external output flux.
Args:
pool_from (int): The number of the pool from which the flux starts.
Returns:
str: 'linear', 'nonlinear', 'no state dependence'
Raises:
Error: If unknown flux type is encountered.
"""
sv = self.state_vector[pool_from]
flux = self.output_fluxes[pool_from]
if gcd(sv, flux) == 1:
return 'no state dependence'
# now test for dependence on further state variables,
# which would lead to nonlinearity
if (gcd(sv, flux) == sv) or gcd(sv, flux) == 1.0*sv:
flux /= sv
free_symbols = flux.free_symbols
for sv in list(self.state_vector):
if sv in free_symbols:
return 'nonlinear'
return 'linear'
else:
# probably this can never happen
raise(Error('Unknown internal flux type'))
# the following two functions are used by the 'figure' method to determine
# the color of the respective arrow
def _internal_flux_type(self, pool_from, pool_to):
"""Return the type of an internal flux.
Args:
pool_from (int): The number of the pool from which the flux starts.
pool_to (int): The number of the pool to which the flux goes.
Returns:
str: 'linear', 'nonlinear', 'no state dependence'
Raises:
Error: If unknown flux type is encountered.
"""
sv = self.state_vector[pool_from]
flux = self.internal_fluxes[(pool_from, pool_to)]
if hr.has_pw(flux):
#print("Piecewise")
#print(latex(flux))
return "nonlinear"
if gcd(sv, flux) == 1:
return 'no state dependence'
# now test for dependence on further state variables,
# which would lead to nonlinearity
if (gcd(sv, flux) == sv) or gcd(sv, flux) == 1.0*sv:
flux /= sv
free_symbols = flux.free_symbols
for sv in list(self.state_vector):
if sv in free_symbols:
return 'nonlinear'
return 'linear'
else:
# probably this can never happen
raise(Error('Unknown internal flux type'))
def _input_flux_type(self, pool_to):
"""Return the type of an external input flux.
Args:
pool_to (int): The number of the pool to which the flux contributes.
Returns:
str: 'linear', 'nonlinear', 'no state dependence'
Raises:
Error: If unknown flux type is encountered.
"""
sv = self.state_vector[pool_to]
# we compute the derivative of the appropriate row of the input vector w.r.t. all the state variables
# (This is a row of the jacobian)
u_i=Matrix([self.external_inputs[pool_to]])
s_v=Matrix(self.state_vector)
J_i=hr.jacobian(u_i,s_v)
# an input that does not depend on state variables has a zero derivative with respect
# to all state variables
if all([ j_ij==0 for j_ij in J_i]):
return 'no state dependence'
# an input that depends on state variables in a linear way
# has a constant derivatives with respect to all state variables
# (the derivative has no state variables in its free symbols)
J_ifss=J_i.free_symbols
svs=set([e for e in self.state_vector])
inter=J_ifss.intersection(svs)
if len(inter)==0:
return 'linear'
else:
return 'nonlinear'
@property
def no_input_model(self):
return SmoothReservoirModel(
self.state_vector,
self.time_symbol,
{},# no input fluxes
self.output_fluxes,
self.internal_fluxes
)
@property
def function_expressions(self):
""" Returns the superset of the free symbols of the flux expressions.
"""
flux_list=self.all_fluxes()
fun_sets=[ fun_set
# the sympify in the next line is only necessary for
# fluxexpressions that are integers (which have no atoms method)
for fun_set in map(lambda
flux:sympify(flux).atoms(Function),flux_list)]
if len(fun_sets)==0:
res=set()
else:
res=reduce( lambda A,B: A.union(B),fun_sets)
return res
@property
def free_symbols(self):
""" Returns the superset of the free symbols of the flux expressions including the state variables.
"""
flux_exprs=self.all_fluxes()
free_sym_sets=[ sym_set
# the sympification in the next line is only necessary for
# fluxexpressions that are numbers
# It does no harm on expressions
for sym_set in map(lambda sym:sympify(sym).free_symbols,flux_exprs)]
if len(free_sym_sets)==0:
res=set()
else:
res=reduce( lambda A,B: A.union(B),free_sym_sets)
return res
def subs(self,parameter_dict):
""" Returns a new instance of class: `SmoothReservoirModel` with all parameters in the parameter_dict replaced
by their values by calling subs on all the flux expressions.
Args:
parameter_dict: A dictionary with the structure {parameter_symbol:parameter_value,....}
"""
return SmoothReservoirModel(
self.state_vector,
self.time_symbol,
{k:fl.subs(parameter_dict) for k,fl in self.input_fluxes.items()},
{k:fl.subs(parameter_dict) for k,fl in self.output_fluxes.items()},
{k:fl.subs(parameter_dict) for k,fl in self.internal_fluxes.items()}
)
def __str__(self):
""" This method is called implicitly by print and gives an returns a string that gives an overview over the fluxes
"""
s = "Object of class "+str(self.__class__)
indent=2
s += "\n Input fluxes:\n"
s += hr.flux_dict_string(self.input_fluxes, indent)
s += "\n Internal fluxes:\n"
s += hr.flux_dict_string(self.internal_fluxes, indent)
s += "\n Output fluxes:\n"
s += hr.flux_dict_string(self.output_fluxes, indent)
return s
def all_fluxes(self):
# since input and output fluxes are indexed by integers they could
# overload each other in a common dictionary
# to avoid this we create a list
return [v for v in self.input_fluxes.values()] + [v for v in self.output_fluxes.values()] + [v for v in self.internal_fluxes.values()]
@property
def jacobian(self):
state_vec=Matrix(self.state_vector)
vec=Matrix(self.F)
return hr.jacobian(vec,state_vec)
@property
def is_compartmental(self):
""" Returns checks that all fluxes are nonnegative
at the time of implementation this functionality sympy did not support
relations in predicates yet.
So while the following works:
with assuming(Q.positive(x) & Q.positive(y)):
print(ask(Q.positive(2*x+y)
it is not possible yet to get a meaningful answer to:
with assuming(Q.is_true(x>0) & Q.is_true(y>0)):
print(ask(Q.positive(2*x+y)
We therefore cannot implement more elaborate assumptions like k_1-(a_12+a_32)>=0
but still can assume all the state_variables and the time_symbol to be nonnegative
Therefore we can check the compartmental_property best after all paramater value have been substituted.
At the moment the function throws an exception if this is not the case.
"""
#check if all free symbols have been removed
allowed_symbs= set( [sym for sym in self.state_vector])
if hasattr(self,"time_symbol"):
allowed_symbs.add(self.time_symbol)
if not(allowed_symbs.issuperset(self.free_symbols)):
raise Exception(
Template("Sympy can not check the parameters without assumptions. Try to substitute all variables except the state variables and the time symbol. Use the subs methot of the class {c}").subs(c=self__class__)
)
def f(expr):
res= ask(Q.nonnegative(expr))
if res is None:
raise Exception(
Template("""Sympy can not (yet) check the parameters even with correct assumptions,\
since relations (<,>) are not implemented yet.
It gave up for the following expression: ${e}."""
).substitute(e=expr)
)
return res
# making a list of predicated stating that all state variables are nonnegative
predList=[Q.nonnegative(sym) for sym in self.state_vector]
if hasattr(self,"time_symbol"):
predList+=[Q.nonnegative(self.time_symbol)]
with assuming(*predList):
# under this assumption eveluate all fluxes
all_fluxes_nonnegative=all(map(f,self.all_fluxes()))
return all_fluxes_nonnegative
# alternative constructor based on the formulation f=u+Bx
@classmethod
def from_B_u(cls, state_vector, time_symbol, B, u)->'SmoothReservoirModel':
"""Construct and return a :class:`SmoothReservoirModel` instance from
:math:`\\dot{x}=B\\,x+u`
Args:
state_vector (SymPy dx1-matrix): The model's state vector
:math:`x`.
Its entries are SymPy symbols.
time_symbol (SymPy symbol): The model's time symbol.
B (SymPy dxd-matrix): The model's compartmental matrix.
u (SymPy dx1-matrix): The model's external input vector.
Returns:
:class:`SmoothReservoirModel`
"""
# if not(u):
# # fixme mm:
# # make sure that ReservoirModels standard constructor can handle an
# # empty dict and produce the empty matrix only if necessary later
# u=zeros(x.rows,1)
# fixme mm:
# we do not seem to have a check that makes sure
# that the argument B is compartmental
# maybe the fixme belongs rather to the SmoothModelRun class since
# we perhaps need parameters
input_fluxes = hr.in_fluxes_by_index(state_vector, u)
output_fluxes = hr.out_fluxes_by_index(state_vector, B)
internal_fluxes = hr.internal_fluxes_by_index(state_vector, B)
# call the standard constructor
srm = SmoothReservoirModel(state_vector, time_symbol,
input_fluxes, output_fluxes, internal_fluxes)
return srm
@property
def state_variable_set(self):
return set(self.state_vector)
@property
def F(self):
"""SymPy dx1-matrix: The right hand side of the differential equation
:math:`\\dot{x}=B\\,x+u`."""
v = (self.external_inputs + self.internal_inputs
- self.internal_outputs - self.external_outputs)
#for i in range(len(v)):
# v[i] = simplify(v[i])
return v
@property
def external_inputs(self):
"""SymPy dx1-matrix: Return the vector of external inputs."""
u = zeros(self.nr_pools, 1)
for k, val in self.input_fluxes.items():
u[k] = val
return u
@property
def external_outputs(self):
"""SymPy dx1-matrix: Return the vector of external outputs."""
o = zeros(self.nr_pools, 1)
for k, val in self.output_fluxes.items():
o[k] = val
return o
@property
def internal_inputs(self):
"""SymPy dx1-matrix: Return the vector of internal inputs."""
n = self.nr_pools
u_int = zeros(n, 1)
for ln in range(n):
# find all entries in the fluxes dict that have the target key==ln
expr = 0
for k, val in self.internal_fluxes.items():
if k[1] == ln: #the second part of the tupel is the recipient
expr += val
u_int[ln] = expr
return u_int
@property
def internal_outputs(self):
"""SymPy dx1-matrix: Return the vector of internal outputs."""
n = self.nr_pools
o_int = zeros(n, 1)
for ln in range(n):
# find all entries in the fluxes dict that have the target key==ln
expr = 0
for k, val in self.internal_fluxes.items():
if k[0] == ln:# the first part of the tupel is the donator
expr += val
o_int[ln] = expr
return o_int
@property
def nr_pools(self):
"""int: Return the number of pools involved in the model."""
return(len(self.state_variables))
def port_controlled_Hamiltonian_representation(self):
"""tuple: :math:`J, R, N, x, u` from
:math:`\\dot{x} = [J(x)-R(x)] \\frac{\\partial}{\\partial x}H+u`.
with :math:`H=\\sum_i x_i \\implies \\frac{\\partial}{\\partial x}H =(1,1,...,1)`
Returns:
tuple:
- J (skew symmetric SymPy dxd-matrix) of internal fluxbalances:
:math:`J_{i,j}=r_{j,i}-r_{i,j}`
- Q (SymPy dxd-matrix): Diagonal matrix describing the dissipation
rates (outfluxes).
- x (SymPy dx1-matrix): The model's state vector.
- u (SymPy dx1-matrix): The model's external input vector.
"""
nr_pools = self.nr_pools
inputs = self.input_fluxes
outputs = self.output_fluxes
internal_fluxes = self.internal_fluxes
C = self.state_vector
# convert inputs
u = self.external_inputs
# calculate decomposition operators
decomp_fluxes = []
for pool in range(nr_pools):
if pool in outputs.keys():
decomp_flux = outputs[pool]
else:
decomp_flux = 0
decomp_fluxes.append(simplify(decomp_flux))
Q = diag(*decomp_fluxes)
# calculate the skewsymmetric matrix J
J = zeros(nr_pools)
for (i,j), flux in internal_fluxes.items():
J[j,i] +=flux
J[i,j] -=flux
return (J, Q, C, u)
def xi_T_N_u_representation(self, factor_out_xi=True):
"""tuple: :math:`\\xi, T, N, x, u` from
:math:`\\dot{x} = \\xi\\,T\\,N\\,x+u`.
Args:
factor_out_xi (bool): If true, xi is extracted from the matrix,
otherwise :math:`xi=1` will be returned.
(Defaults to ``True``.)
Returns:
tuple:
- xi (SymPy number): Environmental coefficient.
- T (SymPy dxd-matrix): Internal fluxes. Main diagonal contains
``-1`` entries.
- N (SymPy dxd-matrix): Diagonal matrix containing the decomposition
rates.
- x (SymPy dx1-matrix): The model's state vector.
- u (SymPy dx1-matrix): The model's external input vector.
"""
nr_pools = self.nr_pools
inputs = self.input_fluxes
outputs = self.output_fluxes
internal_fluxes = self.internal_fluxes
C = self.state_vector
# convert inputs
u = self.external_inputs
R = hr.release_operator_1(
outputs,
internal_fluxes,
C
)
# calculate transition operator
T = hr.transfer_operator_3(
internal_fluxes,
R,
C
)
# try to extract xi from N and T
if factor_out_xi:
xi = hr.factor_out_from_matrix(R)
N = R/xi
# Note mm 02/17/2021
# since T has -1 on the main diagonal
# the gcd will be always one so the
# factor_out_from_matrix(T) is not
# necessarry.
else:
xi = 1
return (xi, T, N, C, u)
@property
def compartmental_matrix(self):
"""SymPy Matrix: :math:`B` from
:math:`\\dot{x} = B\\,x+u`.
Returns:
SymPy dxd-matrix: :math:`B = \\xi\\,T\\,N`
"""
# we could also use the more expensive
# xi, T, N, C, u = self.xi_T_N_u_representation(factor_out_xi=False))
return hr.compartmental_matrix_1(
self.output_fluxes,
self.internal_fluxes,
self.state_vector
)
def age_moment_system(self, max_order):
"""Return the age moment system of the model.
Args:
max_order (int): The maximum order up to which the age moment
system is created (1 for the mean).
Returns:
tuple:
- extended_state (SymPy d*(max_order+1)x1-matrix): The extended
state vector of the age moment system.
- extended_rhs (SymPy d*(max_order+1)x1-matrix): The extended right
hand side of the age moment ODE.
"""
u = self.external_inputs
#X = Matrix(self.state_variables)
X = self.state_vector
B = self.compartmental_matrix
n = self.nr_pools
extended_state = list(X)
former_additional_states = [1]*n
extended_rhs = list(self.F)
for k in range(1, max_order+1):
additional_states = [Symbol(str(x)+'_moment_'+str(k)) for x in X]
g = [k*former_additional_states[i]
+(sum([(additional_states[j]-additional_states[i])
*B[i,j]*X[j] for j in range(n)])
-additional_states[i]*u[i])/X[i] for i in range(n)]
former_additional_states = additional_states
extended_state.append(additional_states)
extended_rhs.append(g)
extended_state = Matrix(flatten(extended_state))
extended_rhs = Matrix(flatten(extended_rhs))
return (extended_state, extended_rhs)
def plot_pools_and_fluxes(self, ax, mutation_scale = 50, fontsize = 24, thumbnail = False, legend=True, color_fluxes=True):
ax.set_axis_off()
arrowstyle = "simple"
visible_pool_names = True
if color_fluxes:
pipe_colors = {
'linear': 'blue',
'nonlinear': 'green',
'no state dependence': 'red'
}
else:
pipe_colors = {
'linear': 'blue',
'nonlinear': 'blue',
'no state dependence': 'blue'
}
if thumbnail:
arrowstyle = "-"
visible_pool_names = False
csp = CSPlotter(
self.state_vector,
{
k: self._input_flux_type(k) for k in self.input_fluxes
if self.input_fluxes[k] != 0
},
{
k: self._output_flux_type(k) for k in self.output_fluxes
if self.output_fluxes[k] != 0
},
{
k: self._internal_flux_type(*k) for k in self.internal_fluxes
if self.internal_fluxes[k] != 0
},
pipe_colors,
visible_pool_names = visible_pool_names,
arrowstyle = arrowstyle,
fontsize = fontsize
)
csp.plot_pools_and_fluxes(ax)
if legend:
csp.legend(ax)
def figure(self, figure_size = (7,7), logo = False, thumbnail = False):
"""Return a figure representing the reservoir model.
Args:
figure_size (2-tuple, optional): Width and height of the figure.
Defaults to (7,7).
logo (bool, optional): If True, figure_size set to (3,3), no legend,
smaller font size. Defaults to False.
thumbnail (bool, optional): If True, produce a very small version,
no legend. Defaults to False.
Returns:
Matplotlib figure: Figure representing the reservoir model.
"""
fontsize = 24
mutation_scale = 50
#mutation_scale=20
arrowstyle = "simple"
fontsize = 24
legend = True
if thumbnail:
mutation_scale = 10
legend = False
arrowstyle = "-"
figure_size = (0.7,0.7)
if logo:
mutation_scale = 15
legend = False
fontsize = 16
figure_size = (3,3)
fig = plt.figure(figsize=figure_size, dpi=300)
if legend:
#ax = fig.add_axes([0,0,1,0.9])
ax = fig.add_axes([0,0,0.8,0.8])
else:
#ax = fig.add_axes([0,0,1,1])
ax = fig.add_subplot(1,1,1)
self.plot_pools_and_fluxes(ax, mutation_scale = mutation_scale, fontsize = fontsize, thumbnail = thumbnail, legend = legend)
return fig
def nxgraphs(self):
return hr.nxgraphs(
self.state_vector,
self.inFluxes,
self.internalFluxes,
self.outFluxes,
)
##### 14C methods #####
# def to_14C_only(self, decay_symbol_name, Fa_expr_name):
# """Construct and return a :class:`SmoothReservoirModel` instance that
# models the 14C component of the original model.
#
# Args:
# decay_symbol_name (str): The name of the 14C decay rate symbol.
# Fa_expr_name(str): The name of the symbol to be used for the
# atmospheric C14 fraction function.
# Returns:
# :class:`SmoothReservoirModel`
# """
## state_vector_14C = Matrix(
## self.nr_pools,
## 1,
## [Symbol(sv.name+'_14C') for sv in self.state_vector]
## )
# state_vector_14C = self.state_vector
#
# decay_symbol = Symbol(decay_symbol_name)
# B_14C = copy(self.compartmental_matrix) - decay_symbol*eye(self.nr_pools)
# u = self.external_inputs
# Fa_expr = Function(Fa_expr_name)(self.time_symbol)
# u_14C = Matrix(self.nr_pools, 1, [expr*Fa_expr for expr in u])
#
# srm_14C = SmoothReservoirModel.from_B_u(
# state_vector_14C,
# self.time_symbol,
# B_14C,
# u_14C
# )
#
# return srm_14C
#
# def to_14C_explicit(self, decay_symbol_name, Fa_expr_name):
# """Construct and return a :class:`SmoothReservoirModel` instance that
# models the 14C component additional to the original model.
#
# Args:
# decay_symbol_name (str): The name of the 14C decay rate symbol.
# Fa_expr_name(str): The name of the symbol to be used for the
# atmospheric C14 fraction function.
# Returns:
# :class:`SmoothReservoirModel`
# """
# state_vector = self.state_vector
# B, u = self.compartmental_matrix, self.external_inputs
# srm_14C = self.to_14C_only(decay_symbol_name, Fa_expr_name)
# state_vector_14C = srm_14C.state_vector
# B_C14 = srm_14C.compartmental_matrix
# u_14C = srm_14C.external_inputs
#
# nr_pools = self.nr_pools
#
# state_vector_total = Matrix(nr_pools*2, 1, [1]*(nr_pools*2))
# state_vector_total[:nr_pools,0] = state_vector
# state_vector_total[nr_pools:,0] = state_vector_14C
#
# B_total = eye(nr_pools*2)
# B_total[:nr_pools, :nr_pools] = B
# B_total[nr_pools:, nr_pools:] = B_C14
#
# u_total = Matrix(nr_pools*2, 1, [1]*(nr_pools*2))
# u_total[:nr_pools,0] = u
# u_total[nr_pools:,0] = u_14C
#
# srm_total = SmoothReservoirModel.from_B_u(
# state_vector_total,
# self.time_symbol,
# B_total,
# u_total)
#
# return srm_total
def steady_states(self, par_set = None):
if par_set is None:
#compute steady state formulas
par_set = {}
# try to calculate the steady states for ten seconds
# after ten seconds stop it
q = multiprocessing.Queue()
def calc_steady_states(q):
ss = solve(self.F.subs(par_set), self.state_vector, dict=True)
q.put(ss)
p = multiprocessing.Process(target=calc_steady_states, args=(q,))
p.start()
p.join(10)
if p.is_alive():
p.terminate()
p.join()
steady_states = []
else:
steady_states = q.get()
formal_steady_states = []
for ss in steady_states:
result = []
ss_dict = {}
for sv_symbol in self.state_vector:
if sv_symbol in ss.keys():
ss[sv_symbol] = simplify(ss[sv_symbol])
else:
ss[sv_symbol] = sv_symbol
ss_expr = ss[sv_symbol]
if self.time_symbol in ss_expr.free_symbols:
# take limit of time to infinity if steady state still depends on time
ss_expr = limit(ss_expr, self.time_symbol, oo)
ss_dict[sv_symbol.name] = ss_expr
formal_steady_states.append(ss_dict)
return formal_steady_states
##### functions for internal use only #####
| mit |
tapomay/libgenetic | src_python/roc/roc.py | 1 | 1923 | import math
import matplotlib.pyplot as plt
from texttable import Texttable
def load():
with open('positives') as f:
positives = f.read()
positives = positives.split()
positives = [float(v) for v in positives]
with open('negatives') as f:
negatives = f.read()
negatives = negatives.split()
negatives = [float(v) for v in negatives]
return (positives, negatives)
def roc_compute(positives, negatives):
minThresh = math.floor(min(positives + negatives))
maxThresh = math.ceil(max(positives + negatives))
thresh = minThresh
fpr_X = []
tpr_Y = []
vals = []
vals.append(["Thresh", "TP", "FP", "TN", "FN", "FPR", "TPR"])
while thresh < maxThresh:
tp = len([v for v in positives if v >= thresh])
fn = len([v for v in positives if v < thresh])
tn = len([v for v in negatives if v <= thresh])
fp = len([v for v in negatives if v > thresh])
fpr = fp / float(fp + tn) * 100 if (fp+tn) != 0 else -1
tpr = tp / float(tp + fn) * 100 if (tp+fn) != 0 else -1
if fpr !=-1 and tpr !=-1:
stat = [thresh, tp, fp, tn, fn, fpr, tpr]
print("thresh: %f, tp:%d, fp:%d, tn:%d, fn:%d, fpr:%f, tpr: %f" % (thresh, tp, fp, tn, fn, fpr, tpr))
fpr_X.append(fpr)
tpr_Y.append(tpr)
# ROC = fpr@X vz tpr#Y
vals.append(stat)
thresh += 0.01
return (fpr_X, tpr_Y, vals)
def main():
(positives, negatives) = load()
print("POS: %s" % positives)
print("NEG: %s" % negatives)
(fpr_X, tpr_Y, vals) = roc_compute(positives, negatives)
t = Texttable()
for v in vals:
t.add_row(v)
print(t.draw())
# plot
plt.plot(fpr_X, tpr_Y)
plt.ylabel('TPR = TP/(TP + FN)')
plt.xlabel('FPR = FP/(FP+TN)')
plt.axis([0, 100, 0, 100])
plt.show()
if __name__ == "__main__":
main()
| apache-2.0 |
agutieda/QuantEcon.py | examples/optgrowth_v0.py | 7 | 2024 | """
Filename: optgrowth_v0.py
Authors: John Stachurski and Thomas Sargent
A first pass at solving the optimal growth problem via value function
iteration. A more general version is provided in optgrowth.py.
"""
from __future__ import division # Omit for Python 3.x
import matplotlib.pyplot as plt
import numpy as np
from numpy import log
from scipy.optimize import fminbound
from scipy import interp
# Primitives and grid
alpha = 0.65
beta = 0.95
grid_max = 2
grid_size = 150
grid = np.linspace(1e-6, grid_max, grid_size)
# Exact solution
ab = alpha * beta
c1 = (log(1 - ab) + log(ab) * ab / (1 - ab)) / (1 - beta)
c2 = alpha / (1 - ab)
def v_star(k):
return c1 + c2 * log(k)
def bellman_operator(w):
"""
The approximate Bellman operator, which computes and returns the updated
value function Tw on the grid points.
* w is a flat NumPy array with len(w) = len(grid)
The vector w represents the value of the input function on the grid
points.
"""
# === Apply linear interpolation to w === #
Aw = lambda x: interp(x, grid, w)
# === set Tw[i] equal to max_c { log(c) + beta w(f(k_i) - c)} === #
Tw = np.empty(grid_size)
for i, k in enumerate(grid):
objective = lambda c: - log(c) - beta * Aw(k**alpha - c)
c_star = fminbound(objective, 1e-6, k**alpha)
Tw[i] = - objective(c_star)
return Tw
# === If file is run directly, not imported, produce figure === #
if __name__ == '__main__':
w = 5 * log(grid) - 25 # An initial condition -- fairly arbitrary
n = 35
fig, ax = plt.subplots()
ax.set_ylim(-40, -20)
ax.set_xlim(np.min(grid), np.max(grid))
lb = 'initial condition'
ax.plot(grid, w, color=plt.cm.jet(0), lw=2, alpha=0.6, label=lb)
for i in range(n):
w = bellman_operator(w)
ax.plot(grid, w, color=plt.cm.jet(i / n), lw=2, alpha=0.6)
lb = 'true value function'
ax.plot(grid, v_star(grid), 'k-', lw=2, alpha=0.8, label=lb)
ax.legend(loc='upper left')
plt.show()
| bsd-3-clause |
wzbozon/statsmodels | statsmodels/examples/ex_multivar_kde.py | 34 | 1504 |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import statsmodels.api as sm
"""
This example illustrates the nonparametric estimation of a
bivariate bi-modal distribution that is a mixture of two normal
distributions.
author: George Panterov
"""
if __name__ == '__main__':
np.random.seed(123456)
# generate the data
nobs = 500
BW = 'cv_ml'
mu1 = [3, 4]
mu2 = [6, 1]
cov1 = np.asarray([[1, 0.7], [0.7, 1]])
cov2 = np.asarray([[1, -0.7], [-0.7, 1]])
ix = np.random.uniform(size=nobs) > 0.5
V = np.random.multivariate_normal(mu1, cov1, size=nobs)
V[ix, :] = np.random.multivariate_normal(mu2, cov2, size=nobs)[ix, :]
x = V[:, 0]
y = V[:, 1]
dens = sm.nonparametric.KDEMultivariate(data=[x, y], var_type='cc', bw=BW,
defaults=sm.nonparametric.EstimatorSettings(efficient=True))
supportx = np.linspace(min(x), max(x), 60)
supporty = np.linspace(min(y), max(y), 60)
X, Y = np.meshgrid(supportx, supporty)
edat = np.column_stack([X.ravel(), Y.ravel()])
Z = dens.pdf(edat).reshape(X.shape)
# plot
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet,
linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.figure(2)
plt.imshow(Z)
plt.show()
| bsd-3-clause |
russel1237/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
sowe9385/qiime | qiime/make_otu_heatmap.py | 15 | 7171 | from __future__ import division
__author__ = "Dan Knights"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Dan Knights", "Greg Caporaso", "Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Dan Knights"
__email__ = "[email protected]"
import numpy as np
import matplotlib
matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import linkage
from skbio.tree import TreeNode
from skbio.diversity.beta import pw_distances
from qiime.parse import parse_newick, PhyloNode
from qiime.filter import filter_samples_from_otu_table
def get_overlapping_samples(map_rows, otu_table):
"""Extracts only samples contained in otu table and mapping file.
Returns: new_map_rows, new_otu_table
"""
map_sample_ids = zip(*map_rows)[0]
shared_ids = set(map_sample_ids) & set(otu_table.ids())
otu_table = filter_samples_from_otu_table(otu_table, shared_ids, -np.inf,
np.inf)
new_map = []
for sam_id in map_sample_ids:
if sam_id in shared_ids:
ix = map_sample_ids.index(sam_id)
new_map.append(map_rows[ix])
return new_map, otu_table
def extract_metadata_column(sample_ids, metadata, category):
"""Extracts values from the given metadata column"""
col_ix = metadata[1].index(category)
map_sample_ids = zip(*metadata[0])[0]
category_labels = []
for i, sample_id in enumerate(sample_ids):
if sample_id in map_sample_ids:
row_ix = map_sample_ids.index(sample_id)
entry = metadata[0][row_ix][col_ix]
category_labels.append(entry)
return category_labels
def get_order_from_categories(otu_table, category_labels):
"""Groups samples by category values; clusters within each group"""
category_labels = np.array(category_labels)
sample_order = []
for label in np.unique(category_labels):
label_ix = category_labels == label
selected = [s for (i, s) in zip(label_ix, otu_table.ids()) if i]
sub_otu_table = filter_samples_from_otu_table(otu_table, selected,
-np.inf, np.inf)
data = np.asarray(list(sub_otu_table.iter_data(axis='observation')))
label_ix_ix = get_clusters(data, axis='column')
sample_order += list(np.nonzero(label_ix)[0][np.array(label_ix_ix)])
return np.array(sample_order)
def get_order_from_tree(ids, tree_text):
"""Returns the indices that would sort ids by tree tip order"""
tree = parse_newick(tree_text, PhyloNode)
ordered_ids = []
for tip in tree.iterTips():
if tip.Name in ids:
ordered_ids.append(tip.Name)
return names_to_indices(ids, ordered_ids)
def make_otu_labels(otu_ids, lineages, n_levels=1):
"""Returns 'pretty' OTU labels: 'Lineage substring (OTU ID)'
Lineage substring includes the last n_levels lineage levels
"""
if len(lineages[0]) > 0:
otu_labels = []
for i, lineage in enumerate(lineages):
if n_levels > len(lineage):
otu_label = '%s (%s)' % (';'.join(lineage), otu_ids[i])
else:
otu_label = '%s (%s)' \
% (';'.join(lineage[-n_levels:]), otu_ids[i])
otu_labels.append(otu_label)
otu_labels = [lab.replace('"', '') for lab in otu_labels]
else:
otu_labels = otu_ids
return otu_labels
def names_to_indices(names, ordered_names):
"""Returns the indices that would sort 'names' like 'ordered_names'
"""
indices = []
names_list = list(names)
for ordered_name in ordered_names:
if ordered_name in names_list:
indices.append(names_list.index(ordered_name))
return np.array(indices)
def get_log_transform(otu_table):
"""Returns log10 of the data"""
if otu_table.nnz == 0:
raise ValueError('All values in the OTU table are zero!')
# take log of all values
def h(s_v, s_id, s_md):
return np.log10(s_v)
return otu_table.transform(h, axis='sample', inplace=False)
def get_clusters(x_original, axis='row'):
"""Performs UPGMA clustering using euclidean distances"""
x = x_original.copy()
if axis == 'column':
x = x.T
nr = x.shape[0]
row_dissims = pw_distances(x, ids=map(str, range(nr)), metric='euclidean')
# do upgma - rows
# Average in SciPy's cluster.hierarchy.linkage is UPGMA
linkage_matrix = linkage(row_dissims.condensed_form(), method='average')
tree = TreeNode.from_linkage_matrix(linkage_matrix, row_dissims.ids)
return [int(tip.name) for tip in tree.tips()]
def get_fontsize(numrows):
"""Returns the fontsize needed to make text fit within each row.
"""
thresholds = [25, 50, 75, 100, 125]
sizes = [5, 4, 3, 2, 1.5, 1]
i = 0
while numrows > thresholds[i]:
i += 1
if i == len(thresholds):
break
return sizes[i]
def plot_heatmap(otu_table, row_labels, col_labels, filename, imagetype='pdf',
width=5, height=5, dpi=None, textborder=.25,
color_scheme='YlGn'):
"""Create a heatmap plot, save as a pdf by default.
'width', 'height' are in inches
'textborder' is the fraction of the figure allocated for the
tick labels on the x and y axes
color_scheme: choices can be found at
http://matplotlib.org/examples/color/colormaps_reference.html
"""
nrow = otu_table.length(axis='observation')
ncol = otu_table.length(axis='sample')
# determine appropriate font sizes for tick labels
row_fontsize = get_fontsize(nrow)
col_fontsize = get_fontsize(ncol)
# create figure and plot heatmap
fig, ax = plt.subplots(figsize=(width, height))
data = list(otu_table.iter_data(axis='observation'))
im = plt.imshow(np.fliplr(data), interpolation='nearest', aspect='auto',
cmap=color_scheme)
# imshow is offset by .5 for some reason
plt.xlim(-.5, ncol - .5)
plt.ylim(-.5, nrow - .5)
# add ticklabels to axes
plt.xticks(np.arange(ncol), col_labels[::-1], fontsize=col_fontsize,
rotation=90)
plt.yticks(np.arange(nrow), row_labels, fontsize=row_fontsize)
# turn off tick marks
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# add space for tick labels
fig.subplots_adjust(left=textborder, bottom=textborder)
# create colorbar (legend) in its own axes so that tight_layout will
# respect both the heatmap and colorbar when it makes room for everything.
# code based on example in:
# http://matplotlib.org/users/tight_layout_guide.html
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", "5%", pad="3%")
cb = plt.colorbar(im, cax=cax)
# set colorbar tick labels to a reasonable value (normal is large)
for t in cb.ax.get_yticklabels():
t.set_fontsize(5)
plt.tight_layout()
fig.savefig(filename, format=imagetype, dpi=dpi)
| gpl-2.0 |
frank-tancf/scikit-learn | sklearn/discriminant_analysis.py | 22 | 28485 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
.. versionchanged:: 0.17
Deprecated :class:`lda.LDA` have been moved to *LinearDiscriminantAnalysis*.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
.. versionadded:: 0.17
tol : float, optional
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = S[:self.n_components] / S.sum()
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
.. versionchanged:: 0.17
Deprecated :class:`qda.QDA` have been moved to *QuadraticDiscriminantAnalysis*.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
kashif/scikit-learn | sklearn/mixture/gmm.py | 19 | 30655 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
lin-credible/scikit-learn | sklearn/utils/multiclass.py | 92 | 13986 | # Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
import warnings
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_sequence_of_sequence(y):
if hasattr(y, '__array__'):
y = np.asarray(y)
return set(chain.from_iterable(y))
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-sequences': _unique_sequence_of_sequence,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1] for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %r" % ys)
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_label_indicator_matrix(y):
""" Check if ``y`` is in the label indicator matrix format (multilabel).
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a label indicator matrix format,
else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_label_indicator_matrix
>>> is_label_indicator_matrix([0, 1, 0, 1])
False
>>> is_label_indicator_matrix([[1], [0, 2], []])
False
>>> is_label_indicator_matrix(np.array([[1, 0], [0, 0]]))
True
>>> is_label_indicator_matrix(np.array([[1], [0], [0]]))
False
>>> is_label_indicator_matrix(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def is_sequence_of_sequences(y):
""" Check if ``y`` is in the sequence of sequences format (multilabel).
This format is DEPRECATED.
Parameters
----------
y : sequence or array.
Returns
-------
out : bool,
Return ``True``, if ``y`` is a sequence of sequences else ``False``.
"""
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
try:
if hasattr(y, '__array__'):
y = np.asarray(y)
out = (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types))
except (IndexError, TypeError):
return False
if out:
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
return out
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
return is_label_indicator_matrix(y) or is_sequence_of_sequences(y)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-sequences': `y` is a sequence of sequences, a 1d
array-like of objects that are sequences of labels.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_sequence_of_sequences(y):
return 'multilabel-sequences'
elif is_label_indicator_matrix(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# known to fail in numpy 1.3 for array of arrays
return 'unknown'
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown'
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown'
elif y.ndim == 2 and y.shape[1] > 1:
suffix = '-multioutput'
else:
# column vector or 1d
suffix = ''
# check float and contains non-integer float values:
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
return 'continuous' + suffix
if len(np.unique(y)) <= 2:
assert not suffix, "2d binary array-like should be multilabel"
return 'binary'
else:
return 'multiclass' + suffix
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
aabadie/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 95 | 6971 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Plot modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
sppalkia/weld | weld-python/tests/grizzly/core/test_frame.py | 2 | 3167 | """
Test basic DataFrame functionality.
"""
import pandas as pd
import pytest
import weld.grizzly as gr
def get_frames(cls, strings):
"""
Returns two DataFrames for testing binary operators.
The DataFrames have columns of overlapping/different names, types, etc.
"""
df1 = pd.DataFrame({
'name': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'Smith', 'Narayanan', 'Thomas', 'Thaker'],
'age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df2 = pd.DataFrame({
'firstName': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'smith', 'narayanan', 'Thomas', 'thaker'],
'age': [25, 30, 45, 20, 60, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
if not strings:
df1 = df1.drop(['name', 'lastName'], axis=1)
df2 = df2.drop(['firstName', 'lastName'], axis=1)
return (cls(df1), cls(df2))
def _test_binop(pd_op, gr_op, strings=True):
"""
Test a binary operator.
Binary operators align on column name. For columns that don't exist in both
DataFrames, the column is filled with NaN (for non-comparison operations) and
or False (for comparison operations).
If the RHS is a Series, the Series should be added to all columns.
"""
df1, df2 = get_frames(pd.DataFrame, strings)
gdf1, gdf2 = get_frames(gr.GrizzlyDataFrame, strings)
expect = pd_op(df1, df2)
result = gr_op(gdf1, gdf2).to_pandas()
assert expect.equals(result)
def test_evaluation():
# Test to make sure that evaluating a DataFrame once caches the result/
# doesn't cause another evaluation.
df1 = gr.GrizzlyDataFrame({
'age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df2 = gr.GrizzlyDataFrame({
'age': [20, 30, 35, 20, 50, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df3 = (df1 + df2) * df2 + df1 / df2
assert not df3.is_value
df3.evaluate()
assert df3.is_value
weld_value = df3.weld_value
df3.evaluate()
# The same weld_value should be returned.
assert weld_value is df3.weld_value
def test_add():
_test_binop(pd.DataFrame.add, gr.GrizzlyDataFrame.add, strings=False)
def test_sub():
_test_binop(pd.DataFrame.sub, gr.GrizzlyDataFrame.sub, strings=False)
def test_mul():
_test_binop(pd.DataFrame.mul, gr.GrizzlyDataFrame.mul, strings=False)
def test_div():
_test_binop(pd.DataFrame.div, gr.GrizzlyDataFrame.div, strings=False)
def test_eq():
_test_binop(pd.DataFrame.eq, gr.GrizzlyDataFrame.eq, strings=True)
def test_ne():
_test_binop(pd.DataFrame.ne, gr.GrizzlyDataFrame.ne, strings=True)
def test_le():
_test_binop(pd.DataFrame.le, gr.GrizzlyDataFrame.le, strings=False)
def test_lt():
_test_binop(pd.DataFrame.lt, gr.GrizzlyDataFrame.lt, strings=False)
def test_ge():
_test_binop(pd.DataFrame.ge, gr.GrizzlyDataFrame.ge, strings=False)
def test_gt():
_test_binop(pd.DataFrame.gt, gr.GrizzlyDataFrame.gt, strings=False)
| bsd-3-clause |
tomevans/gps | gps/spgp_routines.py | 1 | 29754 | import sys, os, pdb, time
import numpy as np
import scipy.linalg
import matplotlib
import matplotlib.pyplot as plt
PERTURB = 1e-4#1e-3
def random_draw( gp_obj, xmesh=None, emesh=None, conditioned=True, perturb=PERTURB, ndraws=5, \
plot_draws=True, mesh_dim=0, lw=3 ):
"""
SUMMARY
Draws one or more random realisations from the gp and (optionally) plots them,
along with the mean function (black dashed line) and 1- and 2-sigma uncertainty
regions (shaded grey regions).
CALLING
draws = random_draw( gp_obj, xmesh=None, emesh=None, conditioned=True, perturb=PERTURB, \
ndraws=5, plot_draws=True, mesh_dim=0, lw=3 )
INPUTS
'xmesh' [KxD array] - input locations for the random draw points; if set to
None (default), a fine grid spanning the xtrain range will be used.
'emesh' [float] - white noise value for the random draw points; if set to
None (default) or zero, then this will be set to the value of the perturb
variable for numerical stability.
'conditioned' [bool] - if set to True (default), the GP will be trained on the
training data stored in the object; otherwise, it will be drawn from the
unconditioned prior.
'perturb' [float] - small perturbation to be added to the covariance diagonal for
numerical stability if the white noise errors are set to None/zero.
'ndraws' [integer] - the number of random draws to be made.
'plot_draws' [bool] - if set to True, the random draws will be plotted.
'mesh_dim' [integer] - for cases where D>1 (i.e. multidimensional input), a single
input dimension must be specified for the mesh to span; the other input
variables will be held fixed to the corresponding median values in the training
data set.
'lw' [integer] - thickness of plot lines.
OUTPUT
'draws' [list] - a list containing the separate random draws from the GP.
"""
xtrain = gp_obj.xtrain
dtrain = gp_obj.dtrain
etrain = gp_obj.etrain
n = np.shape( xtrain )[0]
d = np.shape( xtrain )[1]
if xmesh==None:
nmesh = 1000
xmesh_i = np.r_[ xtrain[:,mesh_dim].min() : xtrain[:,mesh_dim].max() : 1j*nmesh ]
xmesh = np.zeros( [ nmesh, d ] )
for i in range( d ):
if i!=mesh_dim:
xmesh[:,i] = np.median( xtrain[:,i] )
else:
xmesh[:,i] = xmesh_i
else:
nmesh = np.shape( xmesh )[0]
if conditioned==True:
print( '\nDrawing from GP posterior (i.e. after being trained on data set)' )
title_str = 'posterior (i.e. trained)'
else:
print( '\nDrawing from GP prior (i.e. not trained on any data set)' )
title_str = 'prior (i.e. untrained)'
mu, cov = meancov( gp_obj, xnew=xmesh, enew=emesh, conditioned=conditioned, perturb=perturb )
sig = np.sqrt( np.diag( cov ).flatten() )
mu = mu.flatten()
sig = sig.flatten()
xmesh_i = xmesh[:,mesh_dim].flatten()
if plot_draws==True:
fig = plt.figure()
ax = fig.add_axes( [ 0.05, 0.05, 0.9, 0.9 ] )
zorder0 = 0
ax.fill_between( xmesh_i, mu-2*sig, mu+2*sig, color=[ 0.8, 0.8, 0.8 ], zorder=zorder0 )
zorder0 = 1
ax.fill_between( xmesh_i, mu-1*sig, mu+1*sig, color=[ 0.6, 0.6, 0.6 ], zorder=zorder0 )
zorder0 = 2
ax.plot( xmesh_i, mu, ls='--', c='g', lw=2, zorder=zorder0 )
ax.set_title('%i random GP draws - %s' % ( ndraws, title_str ) )
# Draw random samples from the GP:
colormap = matplotlib.cm.cool
colormap = plt.cm.ScalarMappable( cmap=colormap )
colormap.set_clim( vmin=0, vmax=1 )
line_colors = np.r_[ 0.05 : 0.95 : 1j*ndraws ]
ax.set_xlim( [ xmesh_i.min(), xmesh_i.max() ] )
draws = []
for i in range( ndraws ):
print( ' drawing %i of %i on a mesh of %i points' % ( i+1, ndraws, nmesh ) )
# The following step can be a computation bottleneck if there are too
# many points on the mesh:
draw = np.random.multivariate_normal( mu, cov )
draws += [ draw ]
if plot_draws==True:
color = colormap.to_rgba( line_colors[i] )
zorder0 = 3
ax.plot( xmesh_i, draw, ls='-', c=color, lw=lw, zorder=1 )
if ( plot_draws==True )*( conditioned==True ):
dtrain = dtrain.flatten()
zorder0 = 4
xtrain_i = xtrain[:,mesh_dim].flatten()
if n<1000:
marktype = 'o'
elif n<2000:
marktype = '.'
else:
marktype = ','
if ( np.all( etrain==0 ) )+( np.all( etrain==None ) )+( n>=2000 ):
ax.plot( xtrain_i, dtrain, marktype, mec='k', mfc='k', zorder=zorder0 )
else:
errs = etrain + np.zeros( n )
ax.errorbar( xtrain_i, dtrain, yerr=errs, fmt=marktype, mec='k', mfc='k', ecolor='k', \
capsize=0, elinewidth=2, barsabove=True, zorder=zorder0 )
return draws
def meancov( gp_obj, xnew=None, enew=None, conditioned=True, perturb=PERTURB ):
"""
SUMMARY
Returns the mean and full covariance of a gp at the locations of xnew, with
random errors enew. If conditioned==True, the gp will be conditioned on the
training data stored in the gp_obj. If etrain==None or etrain==0 (stored within
gp_obj), a perturbation term of magnitude perturb will be added to the diagonal
entries of the training covariance matrix before it is inverted for numerical
stability.
CALLING:
mu, cov = meancov( gp_obj, xnew=None, enew=None, conditioned=True, perturb=PERTURB )
INPUTS
'gp_obj' [gp class object], containing:
'mfunc', 'cfunc' [functions] - mean and covariance functions.
'mpars', 'cpars' [dictionaries] - mean and covariance function parameters.
'xtrain' [NxD array] - training data input locations.
'dtrain' [Nx1 array] - training data values.
'etrain' [float] - white noise value for the training data points.
'xnew' [PxD array] - input locations for the mean and covariance to be evaluated at;
if set to None (default), the values for xtrain will be used.
'enew' [float] - white noise value to be incorporated into the covariance diagonal;
if set to None (default) or zero, it will be set to the value of the perturb
variable for numerical stability.
'conditioned' [bool] - if set to True (default), the gp will be trained on the
training data stored in the object.
'perturb' [float] - small perturbation to be added to the covariance diagonal for
numerical stability if the white noise errors are set to None/zero.
OUTPUT
'mu' [Px1 array] - gp mean function values.
'cov' [PxP array] - gp covariance values.
"""
# Unpack the variables stored in the GP object:
mfunc = gp_obj.mfunc
mpars = gp_obj.mpars
cfunc = gp_obj.cfunc
cpars = gp_obj.cpars
xtrain = gp_obj.xtrain
xinduc = gp_obj.xinduc
dtrain = gp_obj.dtrain
etrain = gp_obj.etrain
n = np.shape( xtrain )[0]
m = np.shape( xinduc )[0]
if xnew==None:
xnew = xtrain
conditioned = False
p = np.shape( xnew )[0]
# Ensure that etrain is formatted as an array
# and any zero entries replaced with jitter:
if np.ndim( etrain )==0:
if ( etrain==None )+( etrain==0 ):
etrain = perturb*np.ones( n )
else:
ixs = ( etrain==None )
etrain[ixs] = perturb
ixs = ( etrain==0 )
etrain[ixs] = perturb
# Do the same for enew:
if np.ndim( enew )==0:
if ( enew==None ):
enew = np.zeros( p )
else:
ixs = ( enew==None )
enew[ixs] = perturb
ixs = ( enew==0 )
enew[ixs] = perturb
if mfunc==None:
mfunc = zero_mfunc
if mpars==None:
mpars = {}
if cpars==None:
cpars = {}
# Calculate the unconditioned mean and covariance values
# at the new input locations:
mnew = mfunc( xnew, **mpars )
Km = cfunc( xinduc, xinduc, **cpars ) + ( perturb**2. ) * np.eye( m )
Kmp = cfunc( xinduc, xnew, **cpars )
Kmn = cfunc( xinduc, xtrain, **cpars )
knn = cfunc( xtrain, None, **cpars ).flatten()
kpp = cfunc( xnew, None, **cpars ).flatten()
Lm = np.linalg.cholesky( Km )
# The following lines calculate the pxp low-rank projection matrix:
# Qp = (Kmp^T)*(Km^-1)*(Kmp)
Vmp = scipy.linalg.lu_solve( scipy.linalg.lu_factor( Lm ), Kmp )
Qp = np.array( np.matrix( Vmp ).T * Vmp )
qpp = np.diag( Qp )
Deltap = np.diag( kpp - qpp )
sig2Ip = ( enew**2. ) * np.eye( p )
# If we are using the unconditioned GP, we are finished:
if conditioned==False:
mu = np.array( mnew.flatten() )
cov = np.array( Qp + Deltap + sig2Ip )
# If we want to use the conditioned GP, we still have work to do:
else:
mtrain = mfunc( xtrain, **mpars )
resids = dtrain.flatten() - mtrain.flatten()
# The following lines calculate the diagonal of the nxn Gamma matrix,
# as given by Eq C.1. To do this, we make use of the Cholesky identity
# given by Eq B.8. Note that:
# sig2*Gamma = Deltan + sig2*I
# where Deltan is the NxN diagonal matrix used in Eq 2.12.
Lm = np.linalg.cholesky( Km )
Vmn = scipy.linalg.lu_solve( scipy.linalg.lu_factor( Lm ), Kmn )
gnn = 1. + ( knn.flatten() - np.sum( Vmn**2., axis=0 ).flatten() ) / ( etrain**2. )
# To make things more concise, we will divide the rows of the Vmn and
# resids arrays by the square root of the corresponding entries on the
# Gamma matrix diagonal.
# Vmn --> Vmn * (Gamma^-0.5)
# resids --> (Gamma^-0.5) * resids
Vmn = np.matrix( Vmn / np.tile( np.sqrt( gnn ).flatten(), [ m, 1 ] ) )
resids = resids.flatten() / np.sqrt( gnn.flatten() )
resids = np.matrix( np.reshape( resids, [ n, 1 ] ) )
Vmn_resids = np.array( Vmn * resids )
# Now we need to calculate the term involving B^-1 in Eq 2.12, which
# we do using two Cholesky decompositions:
W = np.array( np.linalg.cholesky( ( enew**2. ) * np.eye( m ) + np.array( Vmn*Vmn.T ) ) )
Y = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), Vmn_resids )
H = np.linalg.lstsq( Lm, Kmp )[0]
J = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), H )
# Finally, we use Eqs 2.9 and 2.12 to calculate the predictive mean and
# covariance matrix of the GP:
mu = np.array( mnew.flatten() + np.array( np.matrix( J ).T * np.matrix( Y ) ).flatten() )
KmpTBinvKmp = ( enew**2. ) * np.array( np.matrix( J ).T * np.matrix( J ) )
cov = np.array( Deltap + sig2Ip + KmpTBinvKmp )
mu = np.reshape( mu, [ p, 1 ] )
cov = np.reshape( cov, [ p, p ] )
return mu, cov
def predictive( gp_obj, xnew=None, enew=None, conditioned=True, perturb=PERTURB ):
"""
SUMMARY
Returns the predictive mean and standard deviation of a gp. If conditioned==True,
the gp will be conditioned on the training data stored in the gp_obj. If
etrain==None or etrain==0 (stored within gp_obj), a perturbation term of magnitude
perturb will be added to the diagonal entries of the training covariance matrix
before it is inverted for numerical stability. This routine is very similar to
meancov, except that it only calculates the diagonal entries of the conditioned
gp's covariance matrix to save time.
CALLING:
mu, sig = predictive( gp_obj, xnew=None, enew=None, conditioned=True, perturb=PERTURB )
INPUTS:
'gp_obj' [gp class object], containing:
'mfunc', 'cfunc' [functions] - mean and covariance functions.
'mpars', 'cpars' [dictionaries] - mean and covariance function parameters.
'xtrain' [NxD array] - training data input locations.
'dtrain' [Nx1 array] - training data values.
'etrain' [float] - white noise value for the training data points.
'xnew' [PxD array] - input locations for the mean and covariance to be evaluated at;
if set to None (default), the values for xtrain will be used.
'enew' [float] - white noise value to be incorporated into the covariance diagonal;
if set to None (default) or zero, it will be set to the value of the perturb
variable for numerical stability.
'conditioned' [bool] - if set to True (default), the gp will be trained on the
training data stored in the object.
'perturb' [float] - small perturbation to be added to the covariance diagonal for
numerical stability if the white noise errors are set to None/zero.
OUTPUT:
'mu' [Px1 array] - gp mean function values.
'sig' [Px1 array] - 1-sigma marginalised uncertainties, i.e. the square roots of
the entries along the diagonal of the full covariance matrix.
"""
# Unpack the variables stored in the GP object:
mfunc = gp_obj.mfunc
mpars = gp_obj.mpars
cfunc = gp_obj.cfunc
cpars = gp_obj.cpars
xtrain = gp_obj.xtrain
xinduc = gp_obj.xinduc
dtrain = gp_obj.dtrain
etrain = gp_obj.etrain
n = np.shape( xtrain )[0]
m = np.shape( xinduc )[0]
p = np.shape( xnew )[0]
if mfunc==None:
mfunc = zero_mfunc
if mpars==None:
mpars = {}
if cpars==None:
cpars = {}
if xnew==None:
xnew = xtrain
conditioned = False
# Ensure that etrain is formatted as an array
# and any zero entries replaced with jitter:
if np.ndim( etrain )==0:
if ( etrain==None )+( etrain==0 ):
etrain = perturb*np.ones( n )
else:
ixs = ( etrain==None )
etrain[ixs] = perturb
ixs = ( etrain==0 )
etrain[ixs] = perturb
# Do the same for enew:
if np.ndim( enew )==0:
if ( enew==None ):
enew = np.zeros( p )
else:
ixs = ( enew==None )
enew[ixs] = perturb
ixs = ( enew==0 )
enew[ixs] = perturb
# Calculate the unconditioned mean and covariance values
# at the new input locations:
mnew = mfunc( xnew, **mpars )
kpp = cfunc( xnew, None, **cpars ).flatten()
# If we are using the unconditioned GP, we are finished:
if conditioned==False:
mu = mnew.flatten()
sig = np.sqrt( kpp.flatten() + ( enew**2. ) )
# If we want to use the conditioned GP, we still have work to do:
else:
mtrain = mfunc( xtrain, **mpars )
Km = cfunc( xinduc, xinduc, **cpars ) + ( perturb**2. ) * np.eye( m )
Kmn = cfunc( xinduc, xtrain, **cpars )
Kmp = cfunc( xinduc, xnew, **cpars )
knn = cfunc( xtrain, None, **cpars ).flatten()
resids = dtrain.flatten() - mtrain.flatten()
# The following lines calculate the diagonal of the NxN Gamma matrix,
# as given by Eq C.1. To do this, we make use of the Cholesky identity
# given by Eq B.8. Note that:
# sig2*Gamma = Delta + sig2*I
# where Delta is the diagonal matrix used in Eq 2.12.
Lm = np.linalg.cholesky( Km )
Vmn = scipy.linalg.lu_solve( scipy.linalg.lu_factor( Lm ), Kmn )
# Diagonal of QN:
Qnn_diag = np.sum( Vmn**2., axis=0 ).flatten()
# Diagonal of the D=sig2*Gamma matrix:
D_diag = knn - Qnn_diag + etrain**2.
# To make things more concise, we will divide the rows of the Vmn and
# resids arrays by the square root of the corresponding entries on the
# Gamma matrix diagonal.
# Vmn --> Vmn * (Gamma^-0.5)
# resids --> (Gamma^-0.5) * resids
Vmn = np.matrix( Vmn / np.tile( np.sqrt( D_diag ).flatten(), [ m, 1 ] ) )
resids = resids.flatten() / np.sqrt( D_diag.flatten() )
resids = np.matrix( np.reshape( resids, [ n, 1 ] ) )
Vmn_resids = np.array( Vmn * resids )
# Now we need to calculate the terms involving B^-1 in Eq 2.12, which
# we do using two Cholesky decompositions:
W = np.array( np.linalg.cholesky( np.eye( m ) + np.array( Vmn*Vmn.T ) ) )
Y = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), Vmn_resids )
H = np.linalg.lstsq( Lm, Kmp )[0]
J = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), H )
# Finally, we use Eq 2.12 to calculate the predictive mean and standard
# deviation of the GP:
mu = mnew.flatten() + np.array( np.matrix( J ).T * np.matrix( Y ) ).flatten()
sig = np.sqrt( kpp.flatten() + ( enew**2. ) \
- np.sum( H**2., axis=0 ).flatten() \
+ np.sum( J**2., axis=0 ).flatten() )
# Note that:
# np.sum( H**2., axis=0 ) = diagonal of (H^T)*H
# np.sum( J**2., axis=0 ) = diagonal of (J^T)*J
mu = np.reshape( mu, [ p, 1 ] )
sig = np.reshape( sig, [ p, 1 ] )
return mu, sig
def logp_builtin( gp_obj, perturb=None ):
"""
Uses the contents of the gp object to calculate its log likelihood. The
logp() routine is actually used to perform the calculation. Note that
the latter can be called directly if for some reason it is preferable to
do the precomputations separately outside the routine.
"""
xtrain = gp_obj.xtrain
dtrain = gp_obj.dtrain
etrain = gp_obj.etrain
xinduc = gp_obj.xinduc
mfunc = gp_obj.mfunc
mpars = gp_obj.mpars
cfunc = gp_obj.cfunc
cpars = gp_obj.cpars
n = np.shape( dtrain )[0]
m = np.shape( xinduc )[0]
if mpars==None:
mpars = {}
if cpars==None:
cpars = {}
# Ensure that etrain is formatted as an array
# and any zero entries replaced with jitter:
if np.ndim( etrain )==0:
if ( etrain==None )+( etrain==0 ):
etrain = perturb*np.ones( n )
else:
ixs = ( etrain==None )
etrain[ixs] = perturb
ixs = ( etrain==0 )
etrain[ixs] = perturb
if mfunc==None:
mfunc = zero_mfunc
mu = mfunc( xtrain, **mpars )
resids = dtrain.flatten() - mu.flatten()
resids = np.reshape( resids, [ n, 1 ] )
if xinduc==None:
print( 'Must specify inducing inputs (xinduc)' )
pdb.set_trace()
Km = cfunc( xinduc, xinduc, **cpars )
Kmn = cfunc( xinduc, xtrain, **cpars )
knn = cfunc( xtrain, None, **cpars )
loglikelihood = logp( resids, Km, Kmn, knn, etrain, perturb=perturb )
return loglikelihood
def logp( resids=None, Km=None, Kmn=None, knn=None, sigw=None, perturb=PERTURB ):
"""
SUMMARY
Evaluates the log likelihood of residuals that are assumed to be generated by a
gp with a specified covariance. The mean and covariance are passed directly into
the function as inputs, to allow flexibility in how they are actually computed.
This can be useful when repeated evaluations of logp are required (eg. likelihood
maximisation or MCMC), as it may be possible to optimise how these precomputations
are done outside the function.
The loglikelihood is calculated according to:
loglikelihood = -0.5*n*np.log( 2*np.pi ) - 0.5*L1 - 0.5*L2
where 'n' is the number of data points and:
L1 = logdet[ (Kmm^-1)*( Kmm+Kmn*(W^-1)*(Kmn^T) ) ] - logdet(W)
L2 = norm[ V*r ]^2 - norm[ (U^-1)*Kmn*(W^-1)*r ]^2
W = diag[ Knn - (Kmn^T)*(Km^-1)*Kmn ] + (sigw^2)*I
V*(V^T) = W
U*(U^T) = (Kmn^T)*(Km^-1)*Kmn + W
CALLING
loglikelihood = logp( resids, Kn, sigw, perturb=PERTURB )
INPUTS
'resids' [Nx1 array] - residuals between the training data and the gp mean function.
'Kn' [NxN array] - the covariance matrix between the training inputs.
'sigw' [Nx1 array or float] - white noise value to be incorporated into the covariance diagonal;
if set to None or zero, it will be set to the value of the perturb variable
for numerical stability.
'perturb' [float] - small perturbation to be added to the covariance diagonal for
numerical stability if the white noise errors are set to None/zero.
OUTPUT
'loglikelihood' [float] - the gp log likelihood.
"""
# Convert sigw to an array and replace any zero
# entries with jitter:
if np.ndim( sigw )==0:
if ( sigw==None )+( sigw==0 ):
sigw = perturb*np.ones( n )
else:
ixs = ( sigw==None )
sigw[ixs] = perturb
ixs = ( sigw==0 )
sigw[ixs] = perturb
# Unpack and prepare:
n = np.shape( Kmn )[1] # number of data points
m = np.shape( Kmn )[0] # number of inducing variables
Km = np.matrix( Km + ( perturb**2. ) * np.eye( m ) )
Kmn = np.matrix( Kmn )
knn = ( knn + perturb**2. ).flatten()
r = np.reshape( resids, [ n, 1 ] )
Sig2_diag = sigw**2.
# Calculate the diagonal entries of the Qnn matrix, where:
# Qnn = (Kmn^T)*(Kmm^-1)*Kmn
H = np.linalg.cholesky( Km )
V = np.array( scipy.linalg.lu_solve( scipy.linalg.lu_factor( H ), Kmn ) )
Qnn_diag = np.sum( V**2., axis=0 )
# Generate an array holding the diagonal entries of the D matrix, where:
# D = Qnn + diag[ Knn - Qnn ]
D_diag = ( knn - Qnn_diag + Sig2_diag ).flatten()
# Convert V to V*(D^-0.5) and compute V*(D^-1)*V:
V = np.matrix( V/np.tile( np.sqrt( D_diag ), [ m, 1 ] ) )
VVT = V*V.T
# Convert r to (D^-0.5)*r and compute (r^T)*(D^-1)*r:
r = np.matrix( np.reshape( r.flatten()/np.sqrt( D_diag ), [ n, 1 ] ) )
# To obtain L1, compute:
# L1 = 0.5*logdet(B) + 0.5*logdet(D)
# where:
# B*(B^T) = I + V*(V^T)
# = I + (H^-1)*Kmn*(D^-1)*(Kmn^T)*(H^-T)
# = (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(H^-T)
# = (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(Km^-1)*H
# det[ (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(Km^-1)*H ] = prod[ diag(B)^2 ]
# (this is a standard result of the Cholesky decomposition)
# --> logdet[ ( Kmm + Kmn*(D^-1)*(Kmn^T) )*(Km^-1) ] = 2*sum[ diag(B) ]
# (using standard results det[ X*Y ]=det[X]*det[Y] and det[X^-1]=1/det[X])
B = np.linalg.cholesky( np.matrix( np.eye( m ) ) + VVT )
logdetB = 2*np.sum( np.log( np.diag( B ) ) )
logdetD = np.sum( np.log( D_diag ) )
L1 = 0.5*( logdetB + logdetD )
# To obtain L2, compute:
# L2 = 0.5*(r^T)*r - 0.5*(Y^T)*Y
# where:
# (Y^T)*Y = (r^T)*(D^-0.5)*(Z^T)*Z*(D^0.5)*r
# Z = (B^-1)*V*(D^-0.5)
# = (B^-1)*(H^-1)*Kmn*(D^-0.5)
# = (B^-1)*(H^-1)*Kmn*(D^-0.5)
# Z^T = (D^-0.5)*(Kmn^T)*(H^-T)*(B^-T)
# so that:
# (Y^T)*Y = (r^T)*(D^-1)*(Kmn^T)*(H^-T)*(B^-T)*(B^-1)*(H^-1)*Kmn*(D^-1)*r
# = norm[ H*B*Kmn*(D^-1)*r ]^2
# as it can be verified that:
# (H*B)*[(H*B)^T] = Kmm + Kmn*(D^-1)*(Kmn^T)
# so that:
# (H^-T)*(B^-T)*(B^-1)*(H^-1) = (Kmm + Kmn*(D^-1)*(Kmn^T))^-1
rTr = float( r.T*r )
Z = np.matrix( scipy.linalg.lu_solve( scipy.linalg.lu_factor( B ), V ) )
Y = Z*r
YTY = float( Y.T*Y )
L2 = 0.5*( rTr - YTY )
L3 = 0.5*n*np.log( 2*np.pi )
return -float( L1 + L2 + L3 )
def prep_fixedcov( gp_obj, perturb=PERTURB ):
"""
Prepares a dictionary containing variables that remain unchanged in calculating
the log likelihood when the covariance parameters are fixed. The usage of this
routine is along the lines of:
>> resids = data - model
>> kwpars = gp.prep_fixedcov()
>> logp = gp.logp_fixedcov( resids=resids, kwpars=kwpars )
"""
# Unpack the variables stored in the GP object:
mfunc = gp_obj.mfunc
mpars = gp_obj.mpars
cfunc = gp_obj.cfunc
cpars = gp_obj.cpars
xtrain = gp_obj.xtrain
xinduc = gp_obj.xinduc
dtrain = gp_obj.dtrain
sigw = gp_obj.etrain
Kmn = cfunc( xinduc, xtrain, **cpars )
n = np.shape( Kmn )[1] # number of data points
m = np.shape( Kmn )[0] # number of inducing variables
Km = cfunc( xinduc, xinduc, **cpars ) + ( perturb**2. ) * np.eye( m )
knn = cfunc( xtrain, None, **cpars ).flatten()
knn = ( knn + perturb**2. ).flatten()
# Convert sigw to an array and replace any zero
# entries with jitter:
if np.ndim( sigw )==0:
if ( sigw==None )+( sigw==0 ):
sigw = perturb*np.ones( n )
else:
ixs = ( sigw==None )
sigw[ixs] = perturb
ixs = ( sigw==0 )
sigw[ixs] = perturb
Sig2_diag = sigw**2.
# Calculate the diagonal entries of the Qnn matrix, where:
# Qnn = (Kmn^T)*(Kmm^-1)*Kmn
H = np.linalg.cholesky( Km )
V = np.array( scipy.linalg.lu_solve( scipy.linalg.lu_factor( H ), Kmn ) )
Qnn_diag = np.sum( V**2., axis=0 )
# Generate an array holding the diagonal entries of the D matrix, where:
# D = Qnn + diag[ Knn - Qnn ]
D_diag = ( knn - Qnn_diag + Sig2_diag ).flatten()
# CHECK THIS IS DOING THE RIGHT THING:
# Convert V to V*(D^-0.5) and compute V*(D^-1)*V:
V = np.matrix( V/np.tile( np.sqrt( D_diag ), [ m, 1 ] ) )
VVT = V*V.T
# To obtain L1, compute:
# L1 = 0.5*logdet(B) + 0.5*logdet(D)
# where:
# B*(B^T) = I + V*(V^T)
# = I + (H^-1)*Kmn*(D^-1)*(Kmn^T)*(H^-T)
# = (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(H^-T)
# = (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(Km^-1)*H
# det[ (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(Km^-1)*H ] = prod[ diag(B)^2 ]
# (the above is a standard result of the Cholesky decomposition)
# --> logdet[ ( Kmm + Kmn*(D^-1)*(Kmn^T) )*(Km^-1) ] = 2*sum[ diag(B) ]
# (using standard results det[ X*Y ]=det[X]*det[Y] and det[X^-1]=1/det[X])
B = np.linalg.cholesky( np.matrix( np.eye( m ) ) + VVT )
logdetB = 2*np.sum( np.log( np.diag( B ) ) )
logdetD = np.sum( np.log( D_diag ) )
L1 = 0.5*( logdetB + logdetD )
Z = np.matrix( scipy.linalg.lu_solve( scipy.linalg.lu_factor( B ), V ) )
L3 = 0.5*n*np.log( 2*np.pi )
sqrt_D_diag = np.reshape( np.sqrt( D_diag ), [ n, 1 ] )
kwpars = { 'L1':L1, 'L3':L3, 'Z':Z, 'sqrt_D_diag':sqrt_D_diag }
return kwpars
def logp_fixedcov( resids=None, kwpars=None ):
"""
Calculates the log likehood using a specific dictionary of arguments that
are generated using the prep_fixedcov() routine. This routine is used to
avoid re-calculating the components of the log likelihood that remain
unchanged if the covariance parameters are fixed, which can potentially
save time for things like type-II maximum likelihood. The usage of this
routine is along the lines of:
>> resids = data - model
>> kwpars = gp.prep_fixedcov()
>> logp = gp.logp_fixedcov( resids=resids, kwpars=kwpars )
"""
L1 = kwpars['L1']
L3 = kwpars['L3']
Z = kwpars['Z']
sqrt_D_diag = kwpars['sqrt_D_diag']
r = np.matrix( resids/sqrt_D_diag )
# rTr should be rT*(D^(-1))*r
rTr = float( r.T*r )
Y = Z*r
YTY = float( Y.T*Y )
L2 = 0.5*( rTr - YTY )
return -float( L1 + L2 + L3 )
def prep_fixedcov_OLD( gp_obj, perturb=PERTURB ):
"""
Prepares a dictionary containing variables that remain unchanged in calculating
the log likelihood when the covariance parameters are fixed. The usage of this
routine is along the lines of:
>> resids = data - model
>> kwpars = gp.prep_fixedcov()
>> logp = gp.logp_fixedcov( resids=resids, kwpars=kwpars )
"""
# Ensure that etrain is formatted as an array
# and any zero entries replaced with jitter:
etrain = gp_obj.etrain
if np.ndim( etrain )==0:
if ( etrain==None )+( etrain==0 ):
etrain = perturb*np.ones( n )
else:
ixs = ( etrain==None )
etrain[ixs] = perturb
ixs = ( etrain==0 )
etrain[ixs] = perturb
# Do the same for enew:
if np.ndim( enew )==0:
if ( enew==None ):
enew = np.zeros( p )
else:
ixs = ( enew==None )
enew[ixs] = perturb
ixs = ( enew==0 )
enew[ixs] = perturb
Km = gp_obj.cfunc( gp_obj.xinduc, gp_obj.xinduc, **gp_obj.cpars )
Kmn = gp_obj.cfunc( gp_obj.xinduc, gp_obj.xtrain, **gp_obj.cpars )
knn = gp_obj.cfunc( gp_obj.xtrain, None, **gp_obj.cpars )
n = np.shape( Kmn )[1]
m = np.shape( Kmn )[0]
Km = np.matrix( Km + ( perturb**2. ) * np.eye( m ) )
Kmn = np.matrix( Kmn )
knn = np.matrix( knn + perturb**2. )
L = np.linalg.cholesky( Km )
Vmn = np.matrix( scipy.linalg.lu_solve( scipy.linalg.lu_factor( L ), Kmn ) )
gnn = 1. + ( knn.flatten() - np.sum( np.power( Vmn, 2. ), axis=0 ) ) / ( etrain**2. )
gnn = np.reshape( gnn, [ n, 1 ] )
Vmn = Vmn / np.tile( np.sqrt( gnn ).T, [ m, 1 ] )
VmnVmnT = Vmn * Vmn.T
W = np.linalg.cholesky( np.matrix( ( etrain**2. ) * np.eye( m ) ) + VmnVmnT )
Z = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), Vmn )
Z = np.matrix( Z )
L1 = 0.5 * ( 2 * np.sum( np.log( np.diag( W ) ) ) + np.sum( np.log( gnn ) ) \
+ ( n-m ) * np.log( gp_obj.etrain**2. ) )
L3 = 0.5*n*np.log( 2*np.pi )
kwpars = { 'L1':L1, 'L3':L3, 'gnn':gnn, 'Z':Z, 'sigw':etrain }
return kwpars
def zero_mfunc( x, **kwargs ):
"""
A simple zero mean function, used whenever mfunc==None in
any of the above routines. It takes an [NxD] array as input
and returns an [Nx1] array of zeros.
"""
n = np.shape( x )[0]
return np.zeros( [ n, 1 ] )
| gpl-2.0 |
vibhorag/scikit-learn | sklearn/manifold/t_sne.py | 48 | 20644 | # Author: Alexander Fabisch -- <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
min_grad_norm=self.min_grad_norm,
n_iter_without_progress=self.n_iter_without_progress,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
| bsd-3-clause |
wbbeyourself/cn-deep-learning | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
rpalovics/Alpenglow | python/test_alpenglow/experiments/test_BatchFactorExperiment.py | 2 | 14659 | import alpenglow as prs
import alpenglow.Getter as rs
import alpenglow.experiments
import pandas as pd
import math
import pytest
import sys
from alpenglow.evaluation import DcgScore
import alpenglow.cpp
compiler = alpenglow.cpp.__compiler
stdlib = alpenglow.cpp.__stdlib
class TestBatchFactorExperiment:
def test_batchFactorExperiment(self):
data = pd.read_csv(
"python/test_alpenglow/test_data_4",
sep=' ',
header=None,
names=['time', 'user', 'item', 'id', 'score', 'eval']
)
sbExperiment = alpenglow.experiments.BatchFactorExperiment(
top_k=100,
negative_rate=3,
seed=254938879,
period_length=1000
)
rankings = sbExperiment.run(data, verbose=True, exclude_known=True)
assert rankings.top_k == 100
desired_ranks = [101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 42.0, 1.0, 10.0, 101.0, 101.0, 20.0, 101.0, 101.0, 101.0, 18.0, 87.0, 101.0, 92.0, 101.0, 101.0, 48.0, 3.0, 101.0, 77.0, 25.0, 75.0, 3.0, 80.0, 10.0, 101.0, 101.0, 101.0, 101.0, 89.0, 101.0, 66.0, 101.0, 6.0, 101.0, 52.0, 83.0, 101.0, 101.0, 56.0, 24.0, 26.0, 38.0, 101.0, 101.0, 16.0, 58.0, 15.0, 31.0, 101.0, 26.0, 101.0, 76.0, 72.0, 12.0, 7.0, 50.0, 24.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 32.0, 101.0, 101.0, 101.0, 52.0, 95.0, 3.0, 101.0, 98.0, 94.0, 101.0, 22.0, 101.0, 101.0, 25.0, 101.0, 3.0, 83.0, 2.0, 101.0, 25.0, 9.0, 27.0, 101.0, 37.0, 12.0, 101.0, 101.0, 64.0, 101.0, 101.0, 101.0, 101.0, 26.0, 50.0, 5.0, 101.0, 101.0, 66.0, 101.0, 45.0, 11.0, 101.0, 7.0, 101.0, 34.0, 101.0, 1.0, 98.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 4.0, 101.0, 10.0, 10.0, 101.0, 101.0, 101.0, 101.0, 31.0, 6.0, 101.0, 101.0, 101.0, 7.0, 54.0, 12.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 6.0, 101.0, 101.0, 45.0, 101.0, 101.0, 22.0, 101.0, 101.0, 45.0, 101.0, 101.0, 89.0, 101.0, 101.0, 101.0, 30.0, 3.0, 20.0, 101.0, 3.0, 10.0, 101.0, 16.0, 101.0, 101.0, 101.0, 25.0, 94.0, 16.0, 101.0, 101.0, 101.0, 4.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 31.0, 101.0, 101.0, 53.0, 3.0, 101.0, 2.0, 2.0, 101.0, 43.0, 101.0, 26.0, 36.0, 101.0, 101.0, 5.0, 5.0, 101.0, 21.0, 3.0, 3.0, 5.0, 37.0, 47.0, 101.0, 101.0, 35.0, 12.0, 101.0, 23.0, 101.0, 28.0, 101.0, 7.0, 82.0, 26.0, 101.0, 101.0, 20.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 2.0, 10.0, 80.0, 72.0, 26.0, 101.0, 38.0, 48.0, 10.0, 101.0, 65.0, 101.0, 4.0, 101.0, 12.0, 101.0, 50.0, 101.0, 101.0, 101.0, 101.0, 101.0, 3.0, 7.0, 101.0, 10.0, 14.0, 78.0, 97.0, 6.0, 1.0, 8.0, 15.0, 29.0, 3.0, 101.0, 96.0, 46.0, 32.0, 101.0, 12.0, 58.0, 101.0, 101.0, 53.0, 52.0, 4.0, 25.0, 70.0, 99.0, 14.0, 25.0, 38.0, 3.0, 1.0, 101.0, 1.0, 101.0, 101.0, 101.0, 101.0, 101.0, 90.0, 1.0, 101.0, 101.0, 101.0, 4.0, 101.0, 100.0, 44.0, 22.0, 56.0, 42.0, 101.0, 65.0, 100.0, 35.0, 7.0, 101.0, 101.0, 101.0, 88.0, 101.0, 2.0, 58.0, 101.0, 33.0, 8.0, 1.0, 37.0, 93.0, 101.0, 3.0, 36.0, 17.0, 20.0, 62.0, 1.0, 35.0, 101.0, 4.0, 77.0, 17.0, 101.0, 101.0, 1.0, 101.0, 101.0, 28.0, 101.0, 101.0, 19.0, 101.0, 37.0, 18.0, 101.0, 1.0, 101.0, 101.0, 25.0, 35.0, 101.0, 19.0, 47.0, 42.0, 21.0, 101.0, 101.0, 88.0, 101.0, 9.0, 4.0, 101.0, 4.0, 101.0, 3.0, 87.0, 16.0, 14.0, 101.0, 101.0, 46.0, 13.0, 44.0, 101.0, 31.0, 101.0, 101.0, 101.0, 1.0, 101.0, 57.0, 101.0, 85.0, 1.0, 91.0, 101.0, 101.0, 101.0, 101.0, 42.0, 33.0, 101.0, 101.0, 54.0, 16.0, 28.0, 101.0, 15.0, 15.0, 56.0, 101.0, 101.0, 101.0, 9.0, 101.0, 3.0, 101.0, 12.0, 9.0, 16.0, 5.0, 11.0, 2.0, 16.0, 101.0, 1.0, 101.0, 14.0, 66.0, 25.0, 28.0, 24.0, 5.0, 101.0, 93.0, 101.0, 18.0, 101.0, 101.0, 50.0, 10.0, 32.0, 30.0, 101.0, 18.0, 101.0, 17.0, 4.0, 8.0, 40.0, 36.0, 101.0, 38.0, 101.0, 2.0, 24.0, 8.0, 101.0, 101.0, 5.0, 101.0, 101.0, 8.0, 101.0, 6.0, 27.0, 101.0, 101.0, 90.0, 16.0, 84.0, 12.0, 101.0, 101.0, 23.0, 26.0, 101.0, 2.0, 101.0, 34.0, 43.0, 37.0, 17.0, 35.0, 2.0, 9.0, 5.0, 6.0, 28.0, 101.0, 29.0, 11.0, 5.0, 86.0, 8.0, 28.0, 7.0, 31.0, 11.0, 1.0, 12.0, 101.0, 30.0, 31.0, 101.0, 14.0, 101.0, 76.0, 5.0, 101.0, 13.0, 101.0, 43.0, 2.0, 2.0, 22.0, 47.0, 93.0, 48.0, 101.0, 11.0, 101.0, 101.0, 1.0, 7.0, 101.0, 4.0, 82.0, 17.0, 101.0, 22.0, 35.0, 35.0, 17.0, 101.0, 6.0, 101.0, 101.0, 29.0, 24.0, 101.0, 91.0, 101.0, 2.0, 2.0, 3.0, 101.0, 22.0, 21.0, 15.0, 8.0, 12.0, 19.0, 25.0, 17.0, 19.0, 96.0, 101.0, 21.0, 11.0, 46.0, 4.0, 1.0, 101.0, 101.0, 49.0, 17.0, 13.0, 1.0, 43.0, 101.0, 2.0, 1.0, 94.0, 56.0, 6.0, 40.0, 2.0, 101.0, 101.0, 22.0, 4.0, 28.0, 1.0, 101.0, 13.0, 30.0, 101.0, 101.0, 1.0, 39.0, 23.0, 12.0, 17.0, 7.0, 101.0, 5.0, 22.0, 2.0, 24.0, 101.0, 101.0, 76.0, 35.0, 46.0, 101.0, 16.0, 7.0, 68.0, 101.0, 31.0, 4.0, 6.0, 16.0, 9.0, 101.0, 101.0, 27.0, 9.0, 3.0, 1.0, 7.0, 29.0, 16.0, 3.0, 101.0, 10.0, 3.0, 101.0, 1.0, 2.0, 35.0, 101.0, 1.0, 36.0, 40.0, 2.0, 25.0, 1.0, 101.0, 101.0, 101.0, 101.0, 101.0, 18.0, 17.0, 1.0, 101.0, 9.0, 25.0, 13.0, 12.0, 101.0, 3.0, 101.0, 25.0, 101.0, 46.0, 62.0, 101.0, 101.0, 5.0, 5.0, 6.0, 14.0, 101.0, 101.0, 28.0, 11.0, 41.0, 24.0, 3.0, 68.0, 7.0, 9.0, 101.0, 101.0, 98.0, 101.0, 101.0, 101.0, 11.0, 14.0, 31.0, 32.0, 22.0, 101.0, 2.0, 20.0, 23.0, 101.0, 23.0, 20.0, 1.0, 9.0, 31.0, 16.0, 11.0, 4.0, 34.0, 6.0, 101.0, 101.0, 37.0, 4.0, 15.0, 1.0, 101.0, 12.0, 15.0, 5.0, 101.0, 24.0, 5.0, 5.0, 31.0, 100.0, 38.0, 101.0, 11.0, 8.0, 28.0, 101.0, 34.0, 101.0, 101.0, 16.0, 11.0, 22.0, 13.0, 20.0, 101.0, 12.0, 101.0, 101.0, 101.0, 101.0, 101.0, 20.0, 23.0, 11.0, 101.0, 42.0, 3.0, 101.0, 12.0, 101.0, 16.0, 2.0, 9.0, 9.0, 101.0, 101.0, 101.0, 40.0, 101.0, 101.0, 16.0, 101.0, 21.0, 101.0, 10.0, 12.0, 1.0, 4.0, 5.0, 35.0, 1.0, 101.0, 97.0, 5.0, 21.0, 9.0, 101.0, 101.0, 28.0, 32.0, 101.0, 16.0, 10.0, 27.0, 2.0, 44.0, 101.0, 27.0, 5.0, 29.0, 101.0, 22.0, 39.0, 12.0, 2.0, 58.0, 1.0, 10.0, 37.0, 12.0, 101.0, 2.0, 6.0, 10.0, 92.0, 23.0, 2.0, 101.0, 1.0, 1.0, 62.0, 101.0, 16.0, 22.0, 26.0, 41.0, 101.0, 101.0, 101.0]
if(compiler == "gcc" and stdlib == "libstdc++"):
assert list(rankings["rank"].fillna(101)) == desired_ranks
assert DcgScore(rankings).mean() == pytest.approx(0.15820316053460715, abs=5*1e-3)
def test_batchFactorExperiment_timeframe(self):
data = pd.read_csv(
"python/test_alpenglow/test_data_4",
sep=' ',
header=None,
names=['time', 'user', 'item', 'id', 'score', 'eval']
)
sbExperiment = alpenglow.experiments.BatchFactorExperiment(
top_k=100,
negative_rate=3,
seed=254938879,
period_length=1000,
timeframe_length=2000
)
rankings = sbExperiment.run(data, verbose=True, exclude_known=True)
assert rankings.top_k == 100
desired_ranks=[101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 42.0, 1.0, 10.0, 101.0, 101.0, 20.0, 101.0, 101.0, 101.0, 18.0, 87.0, 101.0, 92.0, 101.0, 101.0, 48.0, 3.0, 101.0, 77.0, 25.0, 75.0, 3.0, 80.0, 10.0, 101.0, 101.0, 101.0, 101.0, 89.0, 101.0, 66.0, 101.0, 6.0, 101.0, 52.0, 83.0, 101.0, 101.0, 56.0, 24.0, 26.0, 38.0, 101.0, 101.0, 16.0, 58.0, 15.0, 31.0, 101.0, 26.0, 101.0, 76.0, 72.0, 12.0, 7.0, 50.0, 24.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 32.0, 101.0, 101.0, 101.0, 52.0, 95.0, 3.0, 101.0, 98.0, 94.0, 101.0, 22.0, 101.0, 101.0, 25.0, 101.0, 3.0, 83.0, 2.0, 101.0, 25.0, 9.0, 27.0, 101.0, 37.0, 12.0, 101.0, 101.0, 64.0, 101.0, 101.0, 101.0, 101.0, 26.0, 50.0, 5.0, 101.0, 101.0, 66.0, 101.0, 45.0, 11.0, 101.0, 7.0, 101.0, 34.0, 101.0, 1.0, 98.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 4.0, 101.0, 10.0, 10.0, 101.0, 101.0, 101.0, 101.0, 31.0, 6.0, 101.0, 101.0, 101.0, 7.0, 54.0, 12.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 6.0, 101.0, 101.0, 45.0, 101.0, 101.0, 22.0, 101.0, 101.0, 45.0, 101.0, 101.0, 89.0, 101.0, 101.0, 101.0, 30.0, 3.0, 20.0, 101.0, 3.0, 10.0, 101.0, 16.0, 101.0, 101.0, 101.0, 25.0, 94.0, 16.0, 101.0, 101.0, 101.0, 4.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 31.0, 101.0, 101.0, 53.0, 3.0, 101.0, 2.0, 2.0, 101.0, 43.0, 101.0, 26.0, 36.0, 101.0, 101.0, 5.0, 5.0, 101.0, 21.0, 3.0, 3.0, 5.0, 37.0, 47.0, 101.0, 101.0, 35.0, 12.0, 101.0, 23.0, 101.0, 28.0, 101.0, 7.0, 82.0, 26.0, 101.0, 101.0, 20.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 2.0, 10.0, 80.0, 72.0, 26.0, 101.0, 38.0, 48.0, 10.0, 101.0, 65.0, 101.0, 4.0, 101.0, 12.0, 101.0, 50.0, 101.0, 101.0, 101.0, 101.0, 101.0, 3.0, 7.0, 101.0, 10.0, 14.0, 78.0, 97.0, 6.0, 1.0, 8.0, 15.0, 29.0, 3.0, 101.0, 96.0, 46.0, 32.0, 101.0, 12.0, 58.0, 101.0, 101.0, 53.0, 52.0, 4.0, 25.0, 70.0, 99.0, 14.0, 25.0, 38.0, 3.0, 1.0, 101.0, 1.0, 101.0, 101.0, 101.0, 101.0, 101.0, 90.0, 1.0, 101.0, 101.0, 101.0, 4.0, 101.0, 100.0, 44.0, 22.0, 56.0, 42.0, 101.0, 65.0, 100.0, 35.0, 7.0, 101.0, 101.0, 101.0, 88.0, 101.0, 2.0, 58.0, 101.0, 33.0, 8.0, 1.0, 37.0, 93.0, 101.0, 3.0, 36.0, 17.0, 20.0, 62.0, 1.0, 35.0, 101.0, 4.0, 77.0, 17.0, 101.0, 101.0, 1.0, 101.0, 101.0, 28.0, 101.0, 101.0, 19.0, 101.0, 37.0, 18.0, 101.0, 1.0, 101.0, 101.0, 25.0, 35.0, 101.0, 19.0, 47.0, 42.0, 21.0, 101.0, 101.0, 88.0, 101.0, 9.0, 4.0, 101.0, 4.0, 101.0, 3.0, 87.0, 16.0, 14.0, 101.0, 101.0, 46.0, 13.0, 44.0, 101.0, 31.0, 101.0, 101.0, 101.0, 1.0, 101.0, 57.0, 101.0, 85.0, 1.0, 91.0, 101.0, 101.0, 101.0, 101.0, 42.0, 22.0, 101.0, 101.0, 55.0, 9.0, 15.0, 101.0, 16.0, 63.0, 44.0, 101.0, 101.0, 64.0, 10.0, 101.0, 4.0, 101.0, 11.0, 2.0, 94.0, 43.0, 19.0, 13.0, 11.0, 101.0, 1.0, 101.0, 20.0, 39.0, 51.0, 53.0, 13.0, 5.0, 101.0, 46.0, 101.0, 73.0, 101.0, 101.0, 101.0, 75.0, 36.0, 17.0, 101.0, 101.0, 101.0, 101.0, 1.0, 4.0, 101.0, 72.0, 35.0, 101.0, 101.0, 6.0, 15.0, 3.0, 101.0, 101.0, 24.0, 101.0, 101.0, 37.0, 101.0, 32.0, 83.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 25.0, 13.0, 101.0, 6.0, 101.0, 63.0, 34.0, 23.0, 96.0, 25.0, 3.0, 83.0, 13.0, 2.0, 17.0, 101.0, 25.0, 31.0, 1.0, 96.0, 2.0, 72.0, 4.0, 101.0, 2.0, 4.0, 94.0, 101.0, 101.0, 95.0, 101.0, 37.0, 101.0, 83.0, 2.0, 101.0, 12.0, 101.0, 101.0, 2.0, 2.0, 58.0, 101.0, 42.0, 23.0, 101.0, 23.0, 101.0, 101.0, 1.0, 13.0, 101.0, 7.0, 101.0, 25.0, 101.0, 16.0, 94.0, 67.0, 21.0, 101.0, 6.0, 101.0, 101.0, 17.0, 26.0, 101.0, 101.0, 101.0, 6.0, 14.0, 10.0, 101.0, 22.0, 101.0, 9.0, 54.0, 79.0, 72.0, 36.0, 79.0, 101.0, 101.0, 26.0, 101.0, 22.0, 57.0, 7.0, 4.0, 101.0, 101.0, 24.0, 23.0, 33.0, 2.0, 101.0, 101.0, 3.0, 4.0, 32.0, 29.0, 54.0, 25.0, 9.0, 101.0, 101.0, 12.0, 11.0, 16.0, 1.0, 101.0, 86.0, 13.0, 101.0, 101.0, 6.0, 49.0, 13.0, 92.0, 19.0, 10.0, 101.0, 2.0, 35.0, 3.0, 25.0, 101.0, 85.0, 101.0, 75.0, 84.0, 101.0, 29.0, 5.0, 101.0, 46.0, 14.0, 10.0, 43.0, 28.0, 10.0, 101.0, 101.0, 89.0, 20.0, 4.0, 7.0, 18.0, 43.0, 28.0, 46.0, 101.0, 7.0, 3.0, 101.0, 1.0, 11.0, 20.0, 101.0, 3.0, 99.0, 101.0, 9.0, 23.0, 4.0, 101.0, 101.0, 101.0, 101.0, 101.0, 8.0, 42.0, 43.0, 101.0, 5.0, 16.0, 17.0, 12.0, 101.0, 51.0, 58.0, 65.0, 101.0, 101.0, 101.0, 101.0, 101.0, 14.0, 14.0, 25.0, 26.0, 101.0, 101.0, 69.0, 18.0, 79.0, 38.0, 9.0, 101.0, 28.0, 9.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 8.0, 30.0, 32.0, 2.0, 35.0, 101.0, 9.0, 2.0, 29.0, 101.0, 19.0, 14.0, 6.0, 22.0, 21.0, 12.0, 2.0, 4.0, 101.0, 5.0, 101.0, 101.0, 82.0, 97.0, 17.0, 4.0, 101.0, 57.0, 13.0, 12.0, 101.0, 10.0, 16.0, 9.0, 13.0, 101.0, 68.0, 101.0, 12.0, 17.0, 33.0, 101.0, 81.0, 101.0, 101.0, 12.0, 16.0, 47.0, 19.0, 85.0, 101.0, 2.0, 101.0, 101.0, 101.0, 101.0, 101.0, 33.0, 33.0, 15.0, 101.0, 101.0, 12.0, 101.0, 8.0, 101.0, 30.0, 1.0, 11.0, 22.0, 87.0, 101.0, 101.0, 101.0, 101.0, 101.0, 18.0, 101.0, 38.0, 101.0, 15.0, 20.0, 3.0, 1.0, 1.0, 97.0, 1.0, 101.0, 34.0, 7.0, 62.0, 5.0, 101.0, 101.0, 60.0, 101.0, 101.0, 2.0, 12.0, 59.0, 9.0, 101.0, 101.0, 30.0, 8.0, 38.0, 96.0, 90.0, 76.0, 25.0, 8.0, 101.0, 5.0, 5.0, 101.0, 33.0, 101.0, 8.0, 33.0, 18.0, 101.0, 5.0, 6.0, 101.0, 6.0, 1.0, 100.0, 101.0, 1.0, 17.0, 48.0, 41.0, 101.0, 101.0, 101.0]
if(compiler == "gcc" and stdlib == "libstdc++"):
assert list(rankings["rank"].fillna(101)) == desired_ranks
assert DcgScore(rankings).mean() == pytest.approx(0.14411975824368886, abs=5*1e-3)
| apache-2.0 |
toobaz/pandas | pandas/tests/extension/base/missing.py | 2 | 4249 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseMissingTests(BaseExtensionTests):
def test_isna(self, data_missing):
expected = np.array([True, False])
result = pd.isna(data_missing)
tm.assert_numpy_array_equal(result, expected)
result = pd.Series(data_missing).isna()
expected = pd.Series(expected)
self.assert_series_equal(result, expected)
# GH 21189
result = pd.Series(data_missing).drop([0, 1]).isna()
expected = pd.Series([], dtype=bool)
self.assert_series_equal(result, expected)
def test_dropna_array(self, data_missing):
result = data_missing.dropna()
expected = data_missing[[1]]
self.assert_extension_array_equal(result, expected)
def test_dropna_series(self, data_missing):
ser = pd.Series(data_missing)
result = ser.dropna()
expected = ser.iloc[[1]]
self.assert_series_equal(result, expected)
def test_dropna_frame(self, data_missing):
df = pd.DataFrame({"A": data_missing})
# defaults
result = df.dropna()
expected = df.iloc[[1]]
self.assert_frame_equal(result, expected)
# axis = 1
result = df.dropna(axis="columns")
expected = pd.DataFrame(index=[0, 1])
self.assert_frame_equal(result, expected)
# multiple
df = pd.DataFrame({"A": data_missing, "B": [1, np.nan]})
result = df.dropna()
expected = df.iloc[:0]
self.assert_frame_equal(result, expected)
def test_fillna_scalar(self, data_missing):
valid = data_missing[1]
result = data_missing.fillna(valid)
expected = data_missing.fillna(valid)
self.assert_extension_array_equal(result, expected)
def test_fillna_limit_pad(self, data_missing):
arr = data_missing.take([1, 0, 0, 0, 1])
result = pd.Series(arr).fillna(method="ffill", limit=2)
expected = pd.Series(data_missing.take([1, 1, 1, 0, 1]))
self.assert_series_equal(result, expected)
def test_fillna_limit_backfill(self, data_missing):
arr = data_missing.take([1, 0, 0, 0, 1])
result = pd.Series(arr).fillna(method="backfill", limit=2)
expected = pd.Series(data_missing.take([1, 0, 1, 1, 1]))
self.assert_series_equal(result, expected)
def test_fillna_series(self, data_missing):
fill_value = data_missing[1]
ser = pd.Series(data_missing)
result = ser.fillna(fill_value)
expected = pd.Series(
data_missing._from_sequence(
[fill_value, fill_value], dtype=data_missing.dtype
)
)
self.assert_series_equal(result, expected)
# Fill with a series
result = ser.fillna(expected)
self.assert_series_equal(result, expected)
# Fill with a series not affecting the missing values
result = ser.fillna(ser)
self.assert_series_equal(result, ser)
def test_fillna_series_method(self, data_missing, fillna_method):
fill_value = data_missing[1]
if fillna_method == "ffill":
data_missing = data_missing[::-1]
result = pd.Series(data_missing).fillna(method=fillna_method)
expected = pd.Series(
data_missing._from_sequence(
[fill_value, fill_value], dtype=data_missing.dtype
)
)
self.assert_series_equal(result, expected)
def test_fillna_frame(self, data_missing):
fill_value = data_missing[1]
result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value)
expected = pd.DataFrame(
{
"A": data_missing._from_sequence(
[fill_value, fill_value], dtype=data_missing.dtype
),
"B": [1, 2],
}
)
self.assert_frame_equal(result, expected)
def test_fillna_fill_other(self, data):
result = pd.DataFrame({"A": data, "B": [np.nan] * len(data)}).fillna({"B": 0.0})
expected = pd.DataFrame({"A": data, "B": [0.0] * len(result)})
self.assert_frame_equal(result, expected)
| bsd-3-clause |
madjelan/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/collections.py | 2 | 50685 | """
Classes for the efficient drawing of large collections of objects that
share most properties, e.g. a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g. you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g. a large set of solid
line segemnts)
"""
from __future__ import print_function
import warnings
import numpy as np
import numpy.ma as ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as mcolors
import matplotlib.cm as cm
from matplotlib import docstring
import matplotlib.transforms as transforms
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
import matplotlib.mlab as mlab
class Collection(artist.Artist, cm.ScalarMappable):
"""
Base class for Collections. Must be subclassed to be usable.
All properties in a collection must be sequences or scalars;
if scalars, they will be converted to sequences. The
property of the ith element of the collection is::
prop[i % len(props)]
Keyword arguments and default values:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *offset_position*: 'screen' (default) or 'data'
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *hatch*: None
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets). If offset_position is 'screen'
(default) the offset is applied after the master transform has
been applied, that is, the offsets are in screen coordinates. If
offset_position is 'data', the offset is applied before the master
transform, i.e., the offsets are in data coordinates.
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
(ie a call to set_array has been made), at draw time a call to
scalar mappable will be made to set the face colors.
"""
_offsets = np.array([], np.float_)
# _offsets must be a Nx2 array!
_offsets.shape = (0, 2)
_transOffset = transforms.IdentityTransform()
_transforms = []
zorder = 1
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
antialiaseds = None,
offsets = None,
transOffset = None,
norm = None, # optional for ScalarMappable
cmap = None, # ditto
pickradius = 5.0,
hatch=None,
urls = None,
offset_position='screen',
**kwargs
):
"""
Create a Collection
%(Collection)s
"""
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.set_edgecolor(edgecolors)
self.set_facecolor(facecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_pickradius(pickradius)
self.set_urls(urls)
self.set_hatch(hatch)
self.set_offset_position(offset_position)
self._uniform_offsets = None
self._offsets = np.array([], np.float_)
# Force _offsets to be Nx2
self._offsets.shape = (0, 2)
if offsets is not None:
offsets = np.asanyarray(offsets)
offsets.shape = (-1, 2) # Make it Nx2
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self.update(kwargs)
self._paths = None
@staticmethod
def _get_value(val):
try:
return (float(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try:
float(val[0])
except (TypeError, ValueError):
pass # raise below
else:
return val
raise TypeError('val must be a float or nonzero sequence of floats')
@staticmethod
def _get_bool(val):
if not cbook.iterable(val):
val = (val,)
try:
bool(val[0])
except (TypeError, IndexError):
raise TypeError('val must be a bool or nonzero sequence of them')
return val
def get_paths(self):
return self._paths
def set_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_offset_transform(self):
t = self._transOffset
if (not isinstance(t, transforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
return t
def get_datalim(self, transData):
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
offsets = np.asanyarray(offsets, np.float_)
if np.ma.isMaskedArray(offsets):
offsets = offsets.filled(np.nan)
# get_path_collection_extents handles nan but not masked arrays
offsets.shape = (-1, 2) # Make it Nx2
result = mpath.get_path_collection_extents(
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset.frozen())
result = result.inverse_transformed(transData)
return result
def get_window_extent(self, renderer):
bbox = self.get_datalim(transforms.IdentityTransform())
#TODO:check to ensure that this does not fail for
#cases other than scatter plot legend
return bbox
def _prepare_points(self):
"""Point prep for drawing and hit testing"""
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(zip(xs, ys), path.codes))
if offsets.size > 0:
xs = self.convert_xunits(offsets[:,0])
ys = self.convert_yunits(offsets[:,1])
offsets = zip(xs, ys)
offsets = np.asanyarray(offsets, np.float_)
offsets.shape = (-1, 2) # Make it Nx2
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path) for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine :
offsets = transOffset.transform_non_affine(offsets)
# This might have changed an ndarray into a masked array.
transOffset = transOffset.get_affine()
if np.ma.isMaskedArray(offsets):
offsets = offsets.filled(np.nan)
# Changing from a masked array to nan-filled ndarray
# is probably most efficient at this point.
return transform, transOffset, offsets, paths
@allow_rasterization
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__, self.get_gid())
self.update_scalarmappable()
transform, transOffset, offsets, paths = self._prepare_points()
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_snap(self.get_snap())
if self._hatch:
gc.set_hatch(self._hatch)
renderer.draw_path_collection(
gc, transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, self.get_facecolor(), self.get_edgecolor(),
self._linewidths, self._linestyles, self._antialiaseds, self._urls,
self._offset_position)
gc.restore()
renderer.close_group(self.__class__.__name__)
def set_pickradius(self, pr):
self._pickradius = pr
def get_pickradius(self):
return self._pickradius
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns True | False, ``dict(ind=itemlist)``, where every
item in itemlist contains the event.
"""
if callable(self._contains):
return self._contains(self,mouseevent)
if not self.get_visible():
return False, {}
if self._picker is True: # the Boolean constant, not just nonzero or 1
pickradius = self._pickradius
else:
try:
pickradius = float(self._picker)
except TypeError:
# This should not happen if "contains" is called via
# pick, the normal route; the check is here in case
# it is called through some unanticipated route.
warnings.warn(
"Collection picker %s could not be converted to float"
% self._picker)
pickradius = self._pickradius
transform, transOffset, offsets, paths = self._prepare_points()
ind = mpath.point_in_path_collection(
mouseevent.x, mouseevent.y, pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, pickradius <= 0)
return len(ind)>0, dict(ind=ind)
def set_urls(self, urls):
if urls is None:
self._urls = [None,]
else:
self._urls = urls
def get_urls(self): return self._urls
def set_hatch(self, hatch):
"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
Unlike other properties such as linewidth and colors, hatching
can only be specified for the collection as a whole, not separately
for each member.
ACCEPTS: [ '/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*' ]
"""
self._hatch = hatch
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
def set_offsets(self, offsets):
"""
Set the offsets for the collection. *offsets* can be a scalar
or a sequence.
ACCEPTS: float or sequence of floats
"""
offsets = np.asanyarray(offsets, np.float_)
offsets.shape = (-1, 2) # Make it Nx2
#This decision is based on how they are initialized above
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
def get_offsets(self):
"""
Return the offsets for the collection.
"""
#This decision is based on how they are initialized above in __init__()
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
def set_offset_position(self, offset_position):
"""
Set how offsets are applied. If *offset_position* is 'screen'
(default) the offset is applied after the master transform has
been applied, that is, the offsets are in screen coordinates.
If offset_position is 'data', the offset is applied before the
master transform, i.e., the offsets are in data coordinates.
"""
if offset_position not in ('screen', 'data'):
raise ValueError("offset_position must be 'screen' or 'data'")
self._offset_position = offset_position
def get_offset_position(self):
"""
Returns how offsets are applied for the collection. If
*offset_position* is 'screen', the offset is applied after the
master transform has been applied, that is, the offsets are in
screen coordinates. If offset_position is 'data', the offset
is applied before the master transform, i.e., the offsets are
in data coordinates.
"""
return self._offset_position
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
ACCEPTS: float or sequence of floats
"""
if lw is None: lw = mpl.rcParams['patch.linewidth']
self._linewidths = self._get_value(lw)
def set_linewidths(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) ]
"""
try:
dashd = backend_bases.GraphicsContextBase.dashd
if cbook.is_string_like(ls):
if ls in dashd:
dashes = [dashd[ls]]
elif ls in cbook.ls_mapper:
dashes = [dashd[cbook.ls_mapper[ls]]]
else:
raise ValueError()
elif cbook.iterable(ls):
try:
dashes = []
for x in ls:
if cbook.is_string_like(x):
if x in dashd:
dashes.append(dashd[x])
elif x in cbook.ls_mapper:
dashes.append(dashd[cbook.ls_mapper[x]])
else:
raise ValueError()
elif cbook.iterable(x) and len(x) == 2:
dashes.append(x)
else:
raise ValueError()
except ValueError:
if len(ls)==2:
dashes = ls
else:
raise ValueError()
else:
raise ValueError()
except ValueError:
raise ValueError('Do not know how to convert %s to dashes'%ls)
self._linestyles = dashes
def set_linestyles(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_dashes(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
ACCEPTS: Boolean or sequence of booleans
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = self._get_bool(aa)
def set_antialiaseds(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence of rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'none', the patch will not be filled.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._is_filled = True
try:
if c.lower() == 'none':
self._is_filled = False
except AttributeError:
pass
if c is None: c = mpl.rcParams['patch.facecolor']
self._facecolors_original = c
self._facecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)
def set_facecolors(self, c):
"""alias for set_facecolor"""
return self.set_facecolor(c)
def get_facecolor(self):
return self._facecolors
get_facecolors = get_facecolor
def get_edgecolor(self):
if self._edgecolors == 'face':
return self.get_facecolors()
else:
return self._edgecolors
get_edgecolors = get_edgecolor
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence of rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'face', the edge color will always be the same as
the face color. If it is 'none', the patch boundary will not
be drawn.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._is_stroked = True
try:
if c.lower() == 'none':
self._is_stroked = False
except AttributeError:
pass
try:
if c.lower() == 'face':
self._edgecolors = 'face'
self._edgecolors_original = 'face'
return
except AttributeError:
pass
if c is None:
c = mpl.rcParams['patch.edgecolor']
self._edgecolors_original = c
self._edgecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)
def set_edgecolors(self, c):
"""alias for set_edgecolor"""
return self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float or *None*.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = mcolors.colorConverter.to_rgba_array(
self._facecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
if self._edgecolors_original != 'face':
self._edgecolors = mcolors.colorConverter.to_rgba_array(
self._edgecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_linewidths(self):
return self._linewidths
get_linewidth = get_linewidths
def get_linestyles(self):
return self._linestyles
get_dashes = get_linestyle = get_linestyles
def update_scalarmappable(self):
"""
If the scalar mappable array is not none, update colors
from scalar data
"""
if self._A is None:
return
if self._A.ndim > 1:
raise ValueError('Collections can only map rank 1 arrays')
if not self.check_update("array"):
return
if self._is_filled:
self._facecolors = self.to_rgba(self._A, self._alpha)
elif self._is_stroked:
self._edgecolors = self.to_rgba(self._A, self._alpha)
def update_from(self, other):
'copy properties from other to self'
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._edgecolors_original = other._edgecolors_original
self._edgecolors = other._edgecolors
self._facecolors_original = other._facecolors_original
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._pickradius = other._pickradius
self._hatch = other._hatch
# update_from for scalarmappable
self._A = other._A
self.norm = other.norm
self.cmap = other.cmap
# self.update_dict = other.update_dict # do we need to copy this? -JJL
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object defn
docstring.interpd.update(Collection = """\
Valid Collection keyword arguments:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets)
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
""")
class PathCollection(Collection):
"""
This is the most basic :class:`Collection` subclass.
"""
@docstring.dedent_interpd
def __init__(self, paths, sizes=None, **kwargs):
"""
*paths* is a sequence of :class:`matplotlib.path.Path`
instances.
%(Collection)s
"""
Collection.__init__(self, **kwargs)
self.set_paths(paths)
self._sizes = sizes
def set_paths(self, paths):
self._paths = paths
def get_paths(self):
return self._paths
def get_sizes(self):
return self._sizes
@allow_rasterization
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class PolyCollection(Collection):
@docstring.dedent_interpd
def __init__(self, verts, sizes = None, closed = True, **kwargs):
"""
*verts* is a sequence of ( *verts0*, *verts1*, ...) where
*verts_i* is a sequence of *xy* tuples of vertices, or an
equivalent :mod:`numpy` array of shape (*nv*, 2).
*sizes* is *None* (default) or a sequence of floats that
scale the corresponding *verts_i*. The scaling is applied
before the Artist master transform; if the latter is an identity
transform, then the overall scaling is such that if
*verts_i* specify a unit square, then *sizes_i* is the area
of that square in points^2.
If len(*sizes*) < *nv*, the additional values will be
taken cyclically from the array.
*closed*, when *True*, will explicitly close the polygon.
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_verts(verts, closed)
def set_verts(self, verts, closed=True):
'''This allows one to delay initialization of the vertices.'''
if np.ma.isMaskedArray(verts):
verts = verts.astype(np.float_).filled(np.nan)
# This is much faster than having Path do it one at a time.
if closed:
self._paths = []
for xy in verts:
if len(xy):
if np.ma.isMaskedArray(xy):
xy = np.ma.concatenate([xy, np.zeros((1,2))])
else:
xy = np.asarray(xy)
xy = np.concatenate([xy, np.zeros((1,2))])
codes = np.empty(xy.shape[0], dtype=mpath.Path.code_type)
codes[:] = mpath.Path.LINETO
codes[0] = mpath.Path.MOVETO
codes[-1] = mpath.Path.CLOSEPOLY
self._paths.append(mpath.Path(xy, codes))
else:
self._paths.append(mpath.Path(xy))
else:
self._paths = [mpath.Path(xy) for xy in verts]
set_paths = set_verts
@allow_rasterization
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class BrokenBarHCollection(PolyCollection):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
@docstring.dedent_interpd
def __init__(self, xranges, yrange, **kwargs):
"""
*xranges*
sequence of (*xmin*, *xwidth*)
*yrange*
*ymin*, *ywidth*
%(Collection)s
"""
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [ [(xmin, ymin), (xmin, ymax), (xmin+xwidth, ymax), (xmin+xwidth, ymin), (xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
@staticmethod
def span_where(x, ymin, ymax, where, **kwargs):
"""
Create a BrokenBarHCollection to plot horizontal bars from
over the regions in *x* where *where* is True. The bars range
on the y-axis from *ymin* to *ymax*
A :class:`BrokenBarHCollection` is returned. *kwargs* are
passed on to the collection.
"""
xranges = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1]-xslice[0]))
collection = BrokenBarHCollection(xranges, [ymin, ymax-ymin], **kwargs)
return collection
class RegularPolyCollection(Collection):
"""Draw a collection of regular polygons with *numsides*."""
_path_generator = mpath.Path.unit_regular_polygon
@docstring.dedent_interpd
def __init__(self,
numsides,
rotation = 0 ,
sizes = (1,),
**kwargs):
"""
*numsides*
the number of sides of the polygon
*rotation*
the rotation of the polygon in radians
*sizes*
gives the area of the circle circumscribing the
regular polygon in points^2
%(Collection)s
Example: see :file:`examples/dynamic_collection.py` for
complete example::
offsets = np.random.rand(20,2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
black = (0,0,0,1)
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors = facecolors,
edgecolors = (black,),
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
@allow_rasterization
def draw(self, renderer):
self._transforms = [
transforms.Affine2D().rotate(-self._rotation).scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
def get_sizes(self):
return self._sizes
class StarPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
"""
All parameters must be sequences or scalars; if scalars, they will
be converted to sequences. The property of the ith line
segment is::
prop[i % len(props)]
i.e., the properties cycle if the ``len`` of props is less than the
number of segments.
"""
zorder = 2
def __init__(self, segments, # Can be None.
linewidths = None,
colors = None,
antialiaseds = None,
linestyles = 'solid',
offsets = None,
transOffset = None,
norm = None,
cmap = None,
pickradius = 5,
**kwargs
):
"""
*segments*
a sequence of (*line0*, *line1*, *line2*), where::
linen = (x0, y0), (x1, y1), ... (xm, ym)
or the equivalent numpy array with two columns. Each line
can be a different length.
*colors*
must be a sequence of RGBA tuples (eg arbitrary color
strings, etc, not allowed).
*antialiaseds*
must be a sequence of ones or zeros
*linestyles* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
a string or dash tuple. The dash tuple is::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
If *linewidths*, *colors*, or *antialiaseds* is None, they
default to their rcParams setting, in sequence form.
If *offsets* and *transOffset* are not None, then
*offsets* are transformed by *transOffset* and applied after
the segments have been transformed to display coordinates.
If *offsets* is not None but *transOffset* is None, then the
*offsets* are added to the segments before any transformation.
In this case, a single offset can be specified as::
offsets=(xo,yo)
and this value will be added cumulatively to each successive
segment, so as to produce a set of successively offset curves.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` array
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (ie a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
"""
if colors is None: colors = mpl.rcParams['lines.color']
if linewidths is None: linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None: antialiaseds = (mpl.rcParams['lines.antialiased'],)
self.set_linestyles(linestyles)
colors = mcolors.colorConverter.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
facecolors='none',
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
pickradius=pickradius,
**kwargs)
self.set_segments(segments)
def set_segments(self, segments):
if segments is None: return
_segments = []
for seg in segments:
if not np.ma.isMaskedArray(seg):
seg = np.asarray(seg, np.float_)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(seg) for seg in _segments]
set_verts = set_segments # for compatibility with PolyCollection
set_paths = set_segments
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i%Noffs
segs[i] = segs[i] + offsets[io:io+1]
return segs
def set_color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self.set_edgecolor(c)
def color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
warnings.warn('LineCollection.color deprecated; use set_color instead')
return self.set_color(c)
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
class CircleCollection(Collection):
"""
A collection of circles, drawn using splines.
"""
@docstring.dedent_interpd
def __init__(self, sizes, **kwargs):
"""
*sizes*
Gives the area of the circle in points^2
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
def get_sizes(self):
"return sizes of circles"
return self._sizes
@allow_rasterization
def draw(self, renderer):
# sizes is the area of the circle circumscribing the polygon
# in points^2
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
class EllipseCollection(Collection):
"""
A collection of ellipses, drawn using splines.
"""
@docstring.dedent_interpd
def __init__(self, widths, heights, angles, units='points', **kwargs):
"""
*widths*: sequence
lengths of first axes (e.g., major axis lengths)
*heights*: sequence
lengths of second axes
*angles*: sequence
angles of first axes, degrees CCW from the X-axis
*units*: ['points' | 'inches' | 'dots' | 'width' | 'height'
| 'x' | 'y' | 'xy']
units in which majors and minors are given; 'width' and
'height' refer to the dimensions of the axes, while 'x'
and 'y' refer to the *offsets* data units. 'xy' differs
from all others in that the angle as plotted varies with
the aspect ratio, and equals the specified angle only when
the aspect ratio is unity. Hence it behaves the same as
the :class:`~matplotlib.patches.Ellipse` with
axes.transData as its transform.
Additional kwargs inherited from the base :class:`Collection`:
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._widths = 0.5 * np.asarray(widths).ravel()
self._heights = 0.5 * np.asarray(heights).ravel()
self._angles = np.asarray(angles).ravel() *(np.pi/180.0)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = []
self._paths = [mpath.Path.unit_circle()]
def _set_transforms(self):
"""
Calculate transforms immediately before drawing.
"""
self._transforms = []
ax = self.axes
fig = self.figure
if self._units == 'xy':
sc = 1
elif self._units == 'x':
sc = ax.bbox.width / ax.viewLim.width
elif self._units == 'y':
sc = ax.bbox.height / ax.viewLim.height
elif self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
_affine = transforms.Affine2D
for x, y, a in zip(self._widths, self._heights, self._angles):
trans = _affine().scale(x * sc, y * sc).rotate(a)
self._transforms.append(trans)
if self._units == 'xy':
m = ax.transData.get_affine().get_matrix().copy()
m[:2, 2:] = 0
self.set_transform(_affine(m))
@allow_rasterization
def draw(self, renderer):
self._set_transforms()
Collection.draw(self, renderer)
class PatchCollection(Collection):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
def __init__(self, patches, match_original=False, **kwargs):
"""
*patches*
a sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
*match_original*
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
If any of *edgecolors*, *facecolors*, *linewidths*,
*antialiaseds* are None, they default to their
:data:`matplotlib.rcParams` patch setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not
None (ie a call to set_array has been made), at draw time a
call to scalar mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.get_fill():
return patch.get_facecolor()
return [0, 0, 0, 0]
facecolors = [determine_facecolor(p) for p in patches]
edgecolors = [p.get_edgecolor() for p in patches]
linewidths = [p.get_linewidth() for p in patches]
linestyles = [p.get_linestyle() for p in patches]
antialiaseds = [p.get_antialiased() for p in patches]
Collection.__init__(
self,
edgecolors=edgecolors,
facecolors=facecolors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds = antialiaseds)
else:
Collection.__init__(self, **kwargs)
self.set_paths(patches)
def set_paths(self, patches):
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
class TriMesh(Collection):
"""
Class for the efficient drawing of a triangular mesh using
Gouraud shading.
A triangular mesh is a :class:`~matplotlib.tri.Triangulation`
object.
"""
def __init__(self, triangulation, **kwargs):
Collection.__init__(self, **kwargs)
self._triangulation = triangulation;
self._shading = 'gouraud'
self._is_filled = True
self._bbox = transforms.Bbox.unit()
# Unfortunately this requires a copy, unless Triangulation
# was rewritten.
xy = np.hstack((triangulation.x.reshape(-1,1),
triangulation.y.reshape(-1,1)))
self._bbox.update_from_data_xy(xy)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(self._triangulation)
@staticmethod
def convert_mesh_to_paths(tri):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support meshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
triangles = tri.get_masked_triangles()
verts = np.concatenate((tri.x[triangles][...,np.newaxis],
tri.y[triangles][...,np.newaxis]), axis=2)
return [Path(x) for x in verts]
@allow_rasterization
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
# Get a list of triangles and the color at each vertex.
tri = self._triangulation
triangles = tri.get_masked_triangles()
verts = np.concatenate((tri.x[triangles][...,np.newaxis],
tri.y[triangles][...,np.newaxis]), axis=2)
self.update_scalarmappable()
colors = self._facecolors[triangles];
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
renderer.draw_gouraud_triangles(gc, verts, colors, transform.frozen())
gc.restore()
renderer.close_group(self.__class__.__name__)
class QuadMesh(Collection):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices. The
dimensions of this array are (*meshWidth* + 1, *meshHeight* +
1). Each vertex in the mesh has a different set of "mesh
coordinates" representing its position in the topology of the
mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the
colors).
For example, the first entry in *coordinates* is the
coordinates of the vertex at mesh coordinates (0, 0), then the one
at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
so on.
*shading* may be 'flat', or 'gouraud'
"""
def __init__(self, meshWidth, meshHeight, coordinates,
antialiased=True, shading='flat', **kwargs):
Collection.__init__(self, **kwargs)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
self._coordinates = coordinates
self._antialiased = antialiased
self._shading = shading
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
# By converting to floats now, we can avoid that on every draw.
self._coordinates = self._coordinates.reshape((meshHeight + 1, meshWidth + 1, 2))
self._coordinates = np.array(self._coordinates, np.float_)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support quadmeshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
if ma.isMaskedArray(coordinates):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[0:-1, 0:-1],
c[0:-1, 1: ],
c[1: , 1: ],
c[1: , 0:-1],
c[0:-1, 0:-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [Path(x) for x in points]
def convert_mesh_to_triangles(self, meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of triangles, each point
with its own color. This is useful for experiments using
`draw_qouraud_triangle`.
"""
Path = mpath.Path
if ma.isMaskedArray(coordinates):
p = coordinates.data
else:
p = coordinates
p_a = p[0:-1, 0:-1]
p_b = p[0:-1, 1: ]
p_c = p[1: , 1: ]
p_d = p[1: , 0:-1]
p_center = (p_a + p_b + p_c + p_d) / 4.0
triangles = np.concatenate((
p_a, p_b, p_center,
p_b, p_c, p_center,
p_c, p_d, p_center,
p_d, p_a, p_center,
), axis=2)
triangles = triangles.reshape((meshWidth * meshHeight * 4, 3, 2))
c = self.get_facecolor().reshape((meshHeight + 1, meshWidth + 1, 4))
c_a = c[0:-1, 0:-1]
c_b = c[0:-1, 1: ]
c_c = c[1: , 1: ]
c_d = c[1: , 0:-1]
c_center = (c_a + c_b + c_c + c_d) / 4.0
colors = np.concatenate((
c_a, c_b, c_center,
c_b, c_c, c_center,
c_c, c_d, c_center,
c_d, c_a, c_center,
), axis=2)
colors = colors.reshape((meshWidth * meshHeight * 4, 3, 4))
return triangles, colors
def get_datalim(self, transData):
return self._bbox
@allow_rasterization
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__, self.get_gid())
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
offsets.shape = (-1, 2) # Make it Nx2
self.update_scalarmappable()
if not transform.is_affine:
coordinates = self._coordinates.reshape(
(self._coordinates.shape[0] *
self._coordinates.shape[1],
2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
if self._shading == 'gouraud':
triangles, colors = self.convert_mesh_to_triangles(
self._meshWidth, self._meshHeight, coordinates)
renderer.draw_gouraud_triangles(gc, triangles, colors, transform.frozen())
else:
renderer.draw_quad_mesh(
gc, transform.frozen(), self._meshWidth, self._meshHeight,
coordinates, offsets, transOffset, self.get_facecolor(),
self._antialiased, self.get_edgecolors())
gc.restore()
renderer.close_group(self.__class__.__name__)
patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'TriMesh', 'PolyCollection', 'BrokenBarHCollection',
'RegularPolyCollection', 'PathCollection',
'StarPolygonCollection', 'PatchCollection',
'CircleCollection', 'Collection',):
docstring.interpd.update({k:patchstr})
docstring.interpd.update(LineCollection = artist.kwdoc(LineCollection))
| mit |
AlirezaShahabi/zipline | zipline/assets/assets.py | 6 | 34618 | # Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
from numbers import Integral
import numpy as np
import sqlite3
from sqlite3 import Row
import warnings
from logbook import Logger
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import with_metaclass, string_types
from zipline.errors import (
ConsumeAssetMetaDataError,
InvalidAssetType,
MultipleSymbolsFound,
RootSymbolNotFound,
SidAssignmentError,
SidNotFound,
SymbolNotFound,
MapAssetIdentifierIndexError,
)
from zipline.assets._assets import (
Asset, Equity, Future
)
log = Logger('assets.py')
# Expected fields for an Asset's metadata
ASSET_FIELDS = [
'sid',
'asset_type',
'symbol',
'root_symbol',
'asset_name',
'start_date',
'end_date',
'first_traded',
'exchange',
'notice_date',
'expiration_date',
'contract_multiplier',
# The following fields are for compatibility with other systems
'file_name', # Used as symbol
'company_name', # Used as asset_name
'start_date_nano', # Used as start_date
'end_date_nano', # Used as end_date
]
# Expected fields for an Asset's metadata
ASSET_TABLE_FIELDS = [
'sid',
'symbol',
'asset_name',
'start_date',
'end_date',
'first_traded',
'exchange',
]
# Expected fields for an Asset's metadata
FUTURE_TABLE_FIELDS = ASSET_TABLE_FIELDS + [
'root_symbol',
'notice_date',
'expiration_date',
'contract_multiplier',
]
EQUITY_TABLE_FIELDS = ASSET_TABLE_FIELDS
# Create the query once from the fields, so that the join is not done
# repeatedly.
FUTURE_BY_SID_QUERY = 'select {0} from futures where sid=?'.format(
", ".join(FUTURE_TABLE_FIELDS))
EQUITY_BY_SID_QUERY = 'select {0} from equities where sid=?'.format(
", ".join(EQUITY_TABLE_FIELDS))
class AssetFinder(object):
def __init__(self,
metadata=None,
allow_sid_assignment=True,
fuzzy_char=None,
db_path=':memory:',
create_table=True):
self.fuzzy_char = fuzzy_char
# This flag controls if the AssetFinder is allowed to generate its own
# sids. If False, metadata that does not contain a sid will raise an
# exception when building assets.
self.allow_sid_assignment = allow_sid_assignment
if allow_sid_assignment:
self.end_date_to_assign = normalize_date(
pd.Timestamp('now', tz='UTC'))
self.conn = sqlite3.connect(db_path)
self.conn.text_factory = str
self.cursor = self.conn.cursor()
# The AssetFinder also holds a nested-dict of all metadata for
# reference when building Assets
self.metadata_cache = {}
# Create table and read in metadata.
# Should we use flags like 'r', 'w', instead?
# What we need to support is:
# - A 'throwaway' mode where the metadata is read each run.
# - A 'write' mode where the data is written to the provided db_path
# - A 'read' mode where the asset finder uses a prexisting db.
if create_table:
self.create_db_tables()
if metadata is not None:
self.consume_metadata(metadata)
# Cache for lookup of assets by sid, the objects in the asset lookp may
# be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset, _retrieve_equity etc. will populate the cache on
# first retrieval.
self._asset_cache = {}
self._equity_cache = {}
self._future_cache = {}
self._asset_type_cache = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = None
def create_db_tables(self):
c = self.conn.cursor()
c.execute("""
CREATE TABLE equities(
sid integer,
symbol text,
asset_name text,
start_date integer,
end_date integer,
first_traded integer,
exchange text,
fuzzy text
)""")
c.execute('CREATE INDEX equities_sid on equities(sid)')
c.execute('CREATE INDEX equities_symbol on equities(symbol)')
c.execute('CREATE INDEX equities_fuzzy on equities(fuzzy)')
c.execute("""
CREATE TABLE futures(
sid integer,
symbol text,
asset_name text,
start_date integer,
end_date integer,
first_traded integer,
exchange text,
root_symbol text,
notice_date integer,
expiration_date integer,
contract_multiplier real
)""")
c.execute('CREATE INDEX futures_sid on futures(sid)')
c.execute('CREATE INDEX futures_root_symbol on equities(symbol)')
c.execute("""
CREATE TABLE asset_router
(sid integer,
asset_type text)
""")
c.execute('CREATE INDEX asset_router_sid on asset_router(sid)')
self.conn.commit()
def asset_type_by_sid(self, sid):
try:
return self._asset_type_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
# Python 3 compatibility required forcing to int for sid = 0.
t = (int(sid),)
query = 'select asset_type from asset_router where sid=:sid'
c.execute(query, t)
data = c.fetchone()
if data is None:
return
asset_type = data[0]
self._asset_type_cache[sid] = asset_type
return asset_type
def retrieve_asset(self, sid, default_none=False):
if isinstance(sid, Asset):
return sid
try:
asset = self._asset_cache[sid]
except KeyError:
asset_type = self.asset_type_by_sid(sid)
if asset_type == 'equity':
asset = self._retrieve_equity(sid)
elif asset_type == 'future':
asset = self._retrieve_futures_contract(sid)
else:
asset = None
self._asset_cache[sid] = asset
if asset is not None:
return asset
elif default_none:
return None
else:
raise SidNotFound(sid=sid)
def _retrieve_equity(self, sid):
try:
return self._equity_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
c.row_factory = Row
t = (int(sid),)
c.execute(EQUITY_BY_SID_QUERY, t)
data = dict(c.fetchone())
if data:
if data['start_date']:
data['start_date'] = pd.Timestamp(data['start_date'], tz='UTC')
if data['end_date']:
data['end_date'] = pd.Timestamp(data['end_date'], tz='UTC')
if data['first_traded']:
data['first_traded'] = pd.Timestamp(
data['first_traded'], tz='UTC')
equity = Equity(**data)
else:
equity = None
self._equity_cache[sid] = equity
return equity
def _retrieve_futures_contract(self, sid):
try:
return self._future_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
t = (int(sid),)
c.row_factory = Row
c.execute(FUTURE_BY_SID_QUERY, t)
data = dict(c.fetchone())
if data:
if data['start_date']:
data['start_date'] = pd.Timestamp(data['start_date'], tz='UTC')
if data['end_date']:
data['end_date'] = pd.Timestamp(data['end_date'], tz='UTC')
if data['first_traded']:
data['first_traded'] = pd.Timestamp(
data['first_traded'], tz='UTC')
if data['notice_date']:
data['notice_date'] = pd.Timestamp(
data['notice_date'], tz='UTC')
if data['expiration_date']:
data['expiration_date'] = pd.Timestamp(
data['expiration_date'], tz='UTC')
future = Future(**data)
else:
future = None
self._future_cache[sid] = future
return future
def lookup_symbol_resolve_multiple(self, symbol, as_of_date=None):
"""
Return matching Asset of name symbol in database.
If multiple Assets are found and as_of_date is not set,
raises MultipleSymbolsFound.
If no Asset was active at as_of_date, and allow_expired is False
raises SymbolNotFound.
"""
if as_of_date is not None:
as_of_date = pd.Timestamp(normalize_date(as_of_date))
c = self.conn.cursor()
if as_of_date:
# If one SID exists for symbol, return that symbol
t = (symbol, as_of_date.value, as_of_date.value)
query = ("select sid from equities "
"where symbol=? "
"and start_date<=? "
"and end_date>=?")
c.execute(query, t)
candidates = c.fetchall()
if len(candidates) == 1:
return self._retrieve_equity(candidates[0][0])
# If no SID exists for symbol, return SID with the
# highest-but-not-over end_date
if len(candidates) == 0:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? "
"and start_date<=? "
"order by end_date desc "
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if len(candidates) > 1:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? " +
"and start_date<=? " +
"order by start_date desc, end_date desc " +
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
raise SymbolNotFound(symbol=symbol)
else:
t = (symbol,)
query = ("select sid from equities where symbol=?")
c.execute(query, t)
data = c.fetchall()
if len(data) == 1:
return self._retrieve_equity(data[0][0])
elif not data:
raise SymbolNotFound(symbol=symbol)
else:
options = []
for row in data:
sid = row[0]
asset = self._retrieve_equity(sid)
options.append(asset)
raise MultipleSymbolsFound(symbol=symbol,
options=options)
def lookup_symbol(self, symbol, as_of_date, fuzzy=False):
"""
If a fuzzy string is provided, then we try various symbols based on
the provided symbol. This is to facilitate mapping from a broker's
symbol to ours in cases where mapping to the broker's symbol loses
information. For example, if we have CMCS_A, but a broker has CMCSA,
when the broker provides CMCSA, it can also provide fuzzy='_',
so we can find a match by inserting an underscore.
"""
symbol = symbol.upper()
as_of_date = normalize_date(as_of_date)
if not fuzzy:
try:
return self.lookup_symbol_resolve_multiple(symbol, as_of_date)
except SymbolNotFound:
return None
else:
c = self.conn.cursor()
fuzzy = symbol.replace(self.fuzzy_char, '')
t = (fuzzy, as_of_date.value, as_of_date.value)
query = ("select sid from equities "
"where fuzzy=? " +
"and start_date<=? " +
"and end_date>=?")
c.execute(query, t)
candidates = c.fetchall()
# If one SID exists for symbol, return that symbol
if len(candidates) == 1:
return self._retrieve_equity(candidates[0][0])
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if len(candidates) > 1:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? " +
"and start_date<=? " +
"order by start_date desc, end_date desc" +
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
def lookup_future_chain(self, root_symbol, as_of_date, knowledge_date):
""" Return the futures chain for a given root symbol.
Parameters
----------
root_symbol : str
Root symbol of the desired future.
as_of_date : pd.Timestamp or pd.NaT
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this
date is the primary contract, etc. If NaT is given, the
chain is unbounded, and all contracts for this root symbol
are returned.
knowledge_date : pd.Timestamp or pd.NaT
Date for determining which contracts exist for inclusion in
this chain. Contracts exist only if they have a start_date
on or before this date. If NaT is given and as_of_date is
is not NaT, the value of as_of_date is used for
knowledge_date.
Returns
-------
list
A list of Future objects, the chain for the given
parameters.
Raises
------
RootSymbolNotFound
Raised when a future chain could not be found for the given
root symbol.
"""
c = self.conn.cursor()
if as_of_date is pd.NaT:
# If the as_of_date is NaT, get all contracts for this
# root symbol.
t = {'root_symbol': root_symbol}
c.execute("""
select sid from futures
where root_symbol=:root_symbol
order by notice_date asc
""", t)
else:
if knowledge_date is pd.NaT:
# If knowledge_date is NaT, default to using as_of_date
t = {'root_symbol': root_symbol,
'as_of_date': as_of_date.value,
'knowledge_date': as_of_date.value}
else:
t = {'root_symbol': root_symbol,
'as_of_date': as_of_date.value,
'knowledge_date': knowledge_date.value}
c.execute("""
select sid from futures
where root_symbol=:root_symbol
and :as_of_date < notice_date
and start_date <= :knowledge_date
order by notice_date asc
""", t)
sids = [r[0] for r in c.fetchall()]
if not sids:
# Check if root symbol exists.
c.execute("""
select count(sid) from futures where root_symbol=:root_symbol
""", t)
count = c.fetchone()[0]
if count == 0:
raise RootSymbolNotFound(root_symbol=root_symbol)
else:
# If symbol exists, return empty future chain.
return []
return [self._retrieve_futures_contract(sid) for sid in sids]
@property
def sids(self):
c = self.conn.cursor()
query = 'select sid from asset_router'
c.execute(query)
return [r[0] for r in c.fetchall()]
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
try:
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
result = self.retrieve_asset(int(asset_convertible))
if result is None:
raise SymbolNotFound(symbol=asset_convertible)
matches.append(result)
elif isinstance(asset_convertible, string_types):
# Throws SymbolNotFound on failure to match.
matches.append(
self.lookup_symbol_resolve_multiple(
asset_convertible,
as_of_date,
)
)
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
except SymbolNotFound:
missing.append(asset_convertible)
return None
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidNotFound(sid=asset_convertible_or_iterable)
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
__________
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
_______
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# If symbols or Assets are provided, construction and mapping is
# necessary
self.consume_identifiers(index)
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
# Handle missing assets
if len(missing) > 0:
warnings.warn("Missing assets for identifiers: " + missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _insert_metadata(self, identifier, **kwargs):
"""
Inserts the given metadata kwargs to the entry for the given
identifier. Matching fields in the existing entry will be overwritten.
:param identifier: The identifier for which to insert metadata
:param kwargs: The keyed metadata to insert
"""
if identifier in self.metadata_cache:
# Multiple pass insertion no longer supported.
# This could and probably should raise an Exception, but is
# currently just a short-circuit for compatibility with existing
# testing structure in the test_algorithm module which creates
# multiple sources which all insert redundant metadata.
return
entry = {}
for key, value in kwargs.items():
# Do not accept invalid fields
if key not in ASSET_FIELDS:
continue
# Do not accept Nones
if value is None:
continue
# Do not accept empty strings
if value == '':
continue
# Do not accept nans from dataframes
if isinstance(value, float) and np.isnan(value):
continue
entry[key] = value
# Check if the sid is declared
try:
entry['sid']
except KeyError:
# If the identifier is not a sid, assign one
if hasattr(identifier, '__int__'):
entry['sid'] = identifier.__int__()
else:
if self.allow_sid_assignment:
# Assign the sid the value of its insertion order.
# This assumes that we are assigning values to all assets.
entry['sid'] = len(self.metadata_cache)
else:
raise SidAssignmentError(identifier=identifier)
# If the file_name is in the kwargs, it will be used as the symbol
try:
entry['symbol'] = entry.pop('file_name')
except KeyError:
pass
# If the identifier coming in was a string and there is no defined
# symbol yet, set the symbol to the incoming identifier
try:
entry['symbol']
pass
except KeyError:
if isinstance(identifier, string_types):
entry['symbol'] = identifier
# If the company_name is in the kwargs, it may be the asset_name
try:
company_name = entry.pop('company_name')
try:
entry['asset_name']
except KeyError:
entry['asset_name'] = company_name
except KeyError:
pass
# If dates are given as nanos, pop them
try:
entry['start_date'] = entry.pop('start_date_nano')
except KeyError:
pass
try:
entry['end_date'] = entry.pop('end_date_nano')
except KeyError:
pass
try:
entry['notice_date'] = entry.pop('notice_date_nano')
except KeyError:
pass
try:
entry['expiration_date'] = entry.pop('expiration_date_nano')
except KeyError:
pass
# Process dates to Timestamps
try:
entry['start_date'] = pd.Timestamp(entry['start_date'], tz='UTC')
except KeyError:
# Set a default start_date of the EPOCH, so that all date queries
# work when a start date is not provided.
entry['start_date'] = pd.Timestamp(0, tz='UTC')
try:
# Set a default end_date of 'now', so that all date queries
# work when a end date is not provided.
entry['end_date'] = pd.Timestamp(entry['end_date'], tz='UTC')
except KeyError:
entry['end_date'] = self.end_date_to_assign
try:
entry['notice_date'] = pd.Timestamp(entry['notice_date'],
tz='UTC')
except KeyError:
pass
try:
entry['expiration_date'] = pd.Timestamp(entry['expiration_date'],
tz='UTC')
except KeyError:
pass
# Build an Asset of the appropriate type, default to Equity
asset_type = entry.pop('asset_type', 'equity')
if asset_type.lower() == 'equity':
try:
fuzzy = entry['symbol'].replace(self.fuzzy_char, '') \
if self.fuzzy_char else None
except KeyError:
fuzzy = None
asset = Equity(**entry)
c = self.conn.cursor()
t = (asset.sid,
asset.symbol,
asset.asset_name,
asset.start_date.value if asset.start_date else None,
asset.end_date.value if asset.end_date else None,
asset.first_traded.value if asset.first_traded else None,
asset.exchange,
fuzzy)
c.execute("""INSERT INTO equities(
sid,
symbol,
asset_name,
start_date,
end_date,
first_traded,
exchange,
fuzzy)
VALUES(?, ?, ?, ?, ?, ?, ?, ?)""", t)
t = (asset.sid,
'equity')
c.execute("""INSERT INTO asset_router(sid, asset_type)
VALUES(?, ?)""", t)
elif asset_type.lower() == 'future':
asset = Future(**entry)
c = self.conn.cursor()
t = (asset.sid,
asset.symbol,
asset.asset_name,
asset.start_date.value if asset.start_date else None,
asset.end_date.value if asset.end_date else None,
asset.first_traded.value if asset.first_traded else None,
asset.exchange,
asset.root_symbol,
asset.notice_date.value if asset.notice_date else None,
asset.expiration_date.value
if asset.expiration_date else None,
asset.contract_multiplier)
c.execute("""INSERT INTO futures(
sid,
symbol,
asset_name,
start_date,
end_date,
first_traded,
exchange,
root_symbol,
notice_date,
expiration_date,
contract_multiplier)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", t)
t = (asset.sid,
'future')
c.execute("""INSERT INTO asset_router(sid, asset_type)
VALUES(?, ?)""", t)
else:
raise InvalidAssetType(asset_type=asset_type)
self.metadata_cache[identifier] = entry
def consume_identifiers(self, identifiers):
"""
Consumes the given identifiers in to the metadata cache of this
AssetFinder.
"""
for identifier in identifiers:
# Handle case where full Assets are passed in
# For example, in the creation of a DataFrameSource, the source's
# 'sid' args may be full Assets
if isinstance(identifier, Asset):
sid = identifier.sid
metadata = identifier.to_dict()
metadata['asset_type'] = identifier.__class__.__name__
self.insert_metadata(identifier=sid, **metadata)
else:
self.insert_metadata(identifier)
def consume_metadata(self, metadata):
"""
Consumes the provided metadata in to the metadata cache. The
existing values in the cache will be overwritten when there
is a conflict.
:param metadata: The metadata to be consumed
"""
# Handle dicts
if isinstance(metadata, dict):
self._insert_metadata_dict(metadata)
# Handle DataFrames
elif isinstance(metadata, pd.DataFrame):
self._insert_metadata_dataframe(metadata)
# Handle readables
elif hasattr(metadata, 'read'):
self._insert_metadata_readable(metadata)
else:
raise ConsumeAssetMetaDataError(obj=metadata)
def clear_metadata(self):
"""
Used for testing.
"""
self.metadata_cache = {}
self.conn = sqlite3.connect(':memory:')
self.create_db_tables()
def insert_metadata(self, identifier, **kwargs):
self._insert_metadata(identifier, **kwargs)
self.conn.commit()
def _insert_metadata_dataframe(self, dataframe):
for identifier, row in dataframe.iterrows():
self._insert_metadata(identifier, **row)
self.conn.commit()
def _insert_metadata_dict(self, dict):
for identifier, entry in dict.items():
self._insert_metadata(identifier, **entry)
self.conn.commit()
def _insert_metadata_readable(self, readable):
for row in readable.read():
# Parse out the row of the readable object
metadata_dict = {}
for field in ASSET_FIELDS:
try:
row_value = row[field]
# Avoid passing placeholders
if row_value and (row_value != 'None'):
metadata_dict[field] = row[field]
except KeyError:
continue
except IndexError:
continue
# Locate the identifier, fail if not found
if 'sid' in metadata_dict:
identifier = metadata_dict['sid']
elif 'symbol' in metadata_dict:
identifier = metadata_dict['symbol']
else:
raise ConsumeAssetMetaDataError(obj=row)
self._insert_metadata(identifier, **metadata_dict)
self.conn.commit()
def _compute_asset_lifetimes(self):
"""
Compute and cache a recarry of asset lifetimes.
FUTURE OPTIMIZATION: We're looping over a big array, which means this
probably should be in C/Cython.
"""
with self.conn as transaction:
results = transaction.execute(
'SELECT sid, start_date, end_date from equities'
).fetchall()
lifetimes = np.recarray(
shape=(len(results),),
dtype=[('sid', 'i8'), ('start', 'i8'), ('end', 'i8')],
)
# TODO: This is **WAY** slower than it could be because we have to
# check for None everywhere. If we represented "no start date" as
# 0, and "no end date" as MAX_INT in our metadata, this would be
# significantly faster.
NO_START = 0
NO_END = np.iinfo(int).max
for idx, (sid, start, end) in enumerate(results):
lifetimes[idx] = (
sid,
start if start is not None else NO_START,
end if end is not None else NO_END,
)
return lifetimes
def lifetimes(self, dates):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `data`.
See Also
--------
numpy.putmask
"""
# This is a less than ideal place to do this, because if someone adds
# assets to the finder after we've touched lifetimes we won't have
# those new assets available. Mutability is not my favorite
# programming feature.
if self._asset_lifetimes is None:
self._asset_lifetimes = self._compute_asset_lifetimes()
lifetimes = self._asset_lifetimes
raw_dates = dates.asi8[:, None]
mask = (lifetimes.start <= raw_dates) & (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
| apache-2.0 |
almudenasanz/novelty-detection-in-hri | helper files/print_ske.py | 2 | 6623 | import matplotlib.pyplot as plt
import user_data_loader as udl
import pandas as pd
# draw a vector
# retrieved from: http://stackoverflow.com/a/11156353/630598
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
# Specifying all the links between joints:
def get_links(data):
''' Returns a dict with all the links of the skeleton.
Arguments:
data: a pd.DataFrame with columns with joint names'''
return { 'head_neck': list(zip(data.T['head'].values, data.T['neck'].values)),
'neck_lshoulder': zip(data.T['neck'].values, data.T['left_shoulder'].values),
'lshoulder_lelbow': zip(data.T['left_shoulder'].values, data.T['left_elbow'].values),
'lelbow_lhand': zip(data.T['left_elbow'].values, data.T['left_hand'].values),
'neck_rshoulder': zip(data.T['neck'].values, data.T['right_shoulder'].values),
'rshoulder_relbow': zip(data.T['right_shoulder'].values, data.T['right_elbow'].values),
'relbow_rhand': zip(data.T['right_elbow'].values, data.T['right_hand'].values),
'lshoulder_torso': zip(data.T['left_shoulder'].values, data.T['torso'].values),
'rshoulder_torso': zip(data.T['right_shoulder'].values, data.T['torso'].values),
'torso_lhip': zip(data.T['torso'].values, data.T['left_hip'].values),
'lhip_rhip': zip(data.T['left_hip'].values, data.T['right_hip'].values),
'lhip_lknee': zip(data.T['left_hip'].values, data.T['left_knee'].values),
'lknee_lfoot': zip(data.T['left_knee'].values, data.T['left_foot'].values),
'torso_rhip': zip(data.T['torso'].values, data.T['right_hip'].values),
'rhip_rknee': zip(data.T['right_hip'].values, data.T['right_knee'].values),
'rknee_rfoot': zip(data.T['right_knee'].values, data.T['right_foot'].values)
}
def plot_skeleton(axis, datapoints, links=False, **kwargs):
''' Plots a skeleton in 3D'''
axis.scatter(datapoints.x, datapoints.y, datapoints.z, **kwargs)
if links:
joint_links = get_links(datapoints)
for jl in joint_links: # adding joint links to the plot:
arrow = Arrow3D(joint_links[jl][0], joint_links[jl][1], joint_links[jl][2], lw=1, arrowstyle="-", **kwargs)
axis.add_artist(arrow)
def to_xyz(series, colors=None):
''' converts series with index = head_pos_x, head_pos_y, etc...
to a dataframe with index=joints and columns = x, y z '''
def_colors = {'STAND_POINTING_RIGHT':'red', 'STAND_POINTING_FORWARD':'green', 'STAND_POINTING_LEFT':'blue'}
c = colors if colors else def_colors
xyz = pd.DataFrame(index=udl.joints, columns=['x','y','z', 'color'])
x = series[udl.ind_pos_x]
y = series[udl.ind_pos_y]
z = series[udl.ind_pos_z]
for d in (x,y,z): # renaming index so it is the same as xyz
d.index = udl.joints
xyz.x, xyz.y, xyz.z = x, y, z
xyz.color = c[series[-1]]
return xyz
def irow_to_xyz(irow, **kwargs):
''' Helper function to pass the pd.iterrows tuple to the to_xyz function '''
return to_xyz(irow[1], **kwargs)
def df_to_xyz(df):
''' converts a a pd.Dataframe with user data to a '3D-plottable' dataframe '''
return pd.concat(map(irow_to_xyz, df.iterrows()))
def normalize_user(user):
'''
returns a normalized user
'''
uf = udl.load_user_file('data/exp03-user'+str(user).zfill(2)+'.arff')
multiind_first, multiind_second = udl.make_multiindex(udl.joints, udl.attribs)
uf.columns = pd.MultiIndex.from_arrays([list(multiind_first), list(multiind_second)], names=['joint', 'attrib'])
orig_torso, df_normalized = udl.normalize_joints(uf, 'torso')
uf.update(df_normalized)
uf.torso = uf.torso - uf.torso
uf.columns = udl.index
return uf
def print_users(users, ax):
'''
Returns an ax plotted with all the users from a [[number_user, pose]...] list
'''
normalized_users = []
for u in users:
if u[1] =='STAND_POINTING_RIGHT': u[1] = 'red'
if u[1] =='STAND_POINTING_LEFT': u[1] = 'blue'
if u[1] =='STAND_POINTING_FORWARD' : u[1] = 'green'
normalized_users.append([normalize_user(u[0]), u[1]])
for uf in normalized_users:
xyz_03u01 = df_to_xyz(uf[0])
#clouds = xyz_03u01.groupby('color') # Discomment to plot the users clouds as well
# Plot skeleton joints and links
means = uf[0].groupby('pose').mean()
means.insert(len(means.columns), 'pose', means.index )
# Prepare means to be printed:
m_groups = [to_xyz(means.ix[i]) for i,ind in enumerate(means.index)]
for m in m_groups:
col = m['color'][0] # Just need the 1st one
if col == uf[1]:
plot_skeleton(ax, m, links=True, color=col)
ske = m
plot_skeleton(ax, ske, links=True, color='black') # Plot the last user with black dots to differentiate ir
'''
Discomment to plot the user clouds as well
for c, values in clouds:
if c == uf[1]:
ax.scatter(values.x, values.y, values.z, color=c, alpha=0.2, marker='o')
for c, values in clouds:
if c == uf[1]:
ax.scatter(values.x, values.y, values.z, color='black', alpha=0.2, marker='x')
'''
ax.view_init(-90,90)
return ax
def print_user(i, pose):
if pose =='STAND_POINTING_RIGHT': pose = 'red'
if pose =='STAND_POINTING_LEFT': pose = 'blue'
if pose =='STAND_POINTING_FORWARD' : pose ='green'
uf = normalize_user(i)
xyz_03u01 = df_to_xyz(uf)
clouds = xyz_03u01.groupby('color')
means = uf.groupby('pose').mean()
means.insert(len(means.columns), 'pose', means.index )
# Prepare means to be printed:
m_groups = [to_xyz(means.ix[i]) for i,ind in enumerate(means.index)]
# Plot skeleton joints and links
ax = plt.axes(projection='3d')
for c, values in clouds:
if c == pose:
ax.scatter(values.x, values.y, values.z, color=c, alpha=0.2, marker='o')
for m in m_groups:
col = m['color'][0] # Just need the 1st one
if col == pose:
plot_skeleton(ax, m, links=True, color=pose)
ax.view_init(-90,90)
#plt.savefig('/Users/almudenasanz/Downloads/skeleton.pdf', format='pdf') | gpl-3.0 |
JoeLaMartina/aima-python | submissions/Ottenlips/myNN.py | 10 | 4316 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Ottenlips import billionaires
from submissions.Ottenlips import billionaires
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
bill = DataFrame()
list_of_billionaire = billionaires.get_billionaires()
def billtarget(billions):
if billions<3.0:
return 1
else:
return 0
for billionaires in list_of_billionaire:
# print(billionaires['wealth']['type'])
# print(billionaires)
bill.target.append(billtarget(billionaires['wealth']['worth in billions']))
# bill.target.append(billionaires['wealth']['how']['inherited'])
bill.data.append([
float(billionaires['demographics']['age']),
float(billionaires['location']['gdp']),
float(billionaires['rank']),
])
bill.feature_names = [
'age',
'gdp of origin country',
'rank',
]
bill.target_names = [
'very rich',
'less rich',
]
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
hidden_layer_sizes = (10,),
# activation = 'relu',
solver='sgd',
#alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive',
# power_t = 0.5,
max_iter = 100, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = True,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
mlpcTwo = MLPClassifier(
hidden_layer_sizes = (1000,),
# activation = 'relu',
solver='sgd',
#alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive',
# power_t = 0.5,
max_iter = 1000, # 200,
shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = True,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
billScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(bill.data)
billScaled.data = scaleGrid(bill.data)
billScaled.feature_names = bill.feature_names
billScaled.target = bill.target
billScaled.target_names = bill.target_names
Examples = {
'BillMLPC': {
'frame': bill,
'mlpc': mlpc,
},
'BillMLPCTwo': {
'frame': bill,
'mlpc': mlpcTwo,
},
'BillScaled':{
'frame':billScaled,
},
'BillScaled':{
'frame':billScaled,
},
'Bill': {'frame':bill},
}
#
# billTwo = DataFrame()
#
#
#
# for billionaires in list_of_billionaire:
# # print(billionaires['wealth']['type'])
# #print(billionaires)
# billTwo.target.append(float(billionaires['wealth']['worth in billions']))
# # bill.target.append(billionaires['wealth']['how']['inherited'])
# billTwo.data.append([
# float(billionaires['location']['gdp']),
# float(billionaires['rank']),
# float(billionaires['demographics']['age']),
# ])
#
#
#
#
# billTwo.feature_names = [
# 'gdp of origin country',
# 'rank',
# 'age',
# ]
#
# billTwo.target_names = [
# 'worth',
# ] | mit |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/backends/backend_gtk.py | 4 | 37325 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os, sys, warnings
def fn_name(): return sys._getframe(1).f_code.co_name
if six.PY3:
warnings.warn(
"The gtk* backends have not been tested with Python 3.x",
ImportWarning)
try:
import gobject
import gtk; gdk = gtk.gdk
import pango
except ImportError:
raise ImportError("Gtk* backend requires pygtk to be installed.")
pygtk_version_required = (2,4,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
_new_tooltip_api = (gtk.pygtk_version[1] >= 12)
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib.backends.backend_gdk import RendererGDK, FigureCanvasGDK
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import markers
from matplotlib import cbook
from matplotlib import verbose
from matplotlib import rcParams
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
# Hide the benign warning that it can't stat a file that doesn't
warnings.filterwarnings('ignore', '.*Unable to retrieve the file info for.*', gtk.Warning)
cursord = {
cursors.MOVE : gdk.Cursor(gdk.FLEUR),
cursors.HAND : gdk.Cursor(gdk.HAND2),
cursors.POINTER : gdk.Cursor(gdk.LEFT_PTR),
cursors.SELECT_REGION : gdk.Cursor(gdk.TCROSS),
}
# ref gtk+/gtk/gtkwidget.h
def GTK_WIDGET_DRAWABLE(w):
flags = w.flags();
return flags & gtk.VISIBLE != 0 and flags & gtk.MAPPED != 0
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
if gtk.main_level() == 0:
gtk.main()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK(figure)
manager = FigureManagerGTK(canvas, num)
return manager
class TimerGTK(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses GTK for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = gobject.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
gobject.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if len(self.callbacks) > 0 and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK (gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
65511 : 'super',
65512 : 'super',
65406 : 'alt',
65289 : 'tab',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.EXPOSURE_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK)
def __init__(self, figure):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
FigureCanvasBase.__init__(self, figure)
gtk.DrawingArea.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._pixmap_width = -1
self._pixmap_height = -1
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('expose_event', self.expose_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(False)
self.set_flags(gtk.CAN_FOCUS)
self._renderer_init()
self.last_downclick = {}
def destroy(self):
#gtk.DrawingArea.destroy(self)
self.close_event()
if self._idle_draw_id != 0:
gobject.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
if event.direction==gdk.SCROLL_UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
dblclick = (event.type == gdk._2BUTTON_PRESS)
if not dblclick:
# GTK is the only backend that generates a DOWN-UP-DOWN-DBLCLICK-UP event
# sequence for a double click. All other backends have a DOWN-UP-DBLCLICK-UP
# sequence. In order to provide consistency to matplotlib users, we will
# eat the extra DOWN event in the case that we detect it is part of a double
# click.
# first, get the double click time in milliseconds.
current_time = event.get_time()
last_time = self.last_downclick.get(event.button,0)
dblclick_time = gtk.settings_get_for_screen(gdk.screen_get_default()).get_property('gtk-double-click-time')
delta_time = current_time-last_time
if delta_time < dblclick_time:
del self.last_downclick[event.button] # we do not want to eat more than one event.
return False # eat.
self.last_downclick[event.button] = current_time
FigureCanvasBase.button_press_event(self, x, y, event.button, dblclick=dblclick, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
key = self._get_key(event)
if _debug: print("hit", key)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
key = self._get_key(event)
if _debug: print("release", key)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
# flipy so y=0 is bottom of canvas
y = self.allocation.height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
x, y, state = event.window.get_pointer()
FigureCanvasBase.enter_notify_event(self, event, xy=(x, y))
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
for key_mask, prefix in (
[gdk.MOD4_MASK, 'super'],
[gdk.MOD1_MASK, 'alt'],
[gdk.CONTROL_MASK, 'ctrl'], ):
if event.state & key_mask:
key = '{0}+{1}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if widget.window is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def draw(self):
# Note: FigureCanvasBase.draw() is inconveniently named as it clashes
# with the deprecated gtk.Widget.draw()
self._need_redraw = True
if GTK_WIDGET_DRAWABLE(self):
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.window.process_updates (False)
def draw_idle(self):
if self._idle_draw_id != 0:
return
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
self._idle_draw_id = gobject.idle_add(idle_draw)
def _renderer_init(self):
"""Override by GTK backends to select a different renderer
Renderer should provide the methods:
set_pixmap ()
set_width_height ()
that are used by
_render_figure() / _pixmap_prepare()
"""
self._renderer = RendererGDK (self, self.figure.dpi)
def _pixmap_prepare(self, width, height):
"""
Make sure _._pixmap is at least width, height,
create new pixmap if necessary
"""
if _debug: print('FigureCanvasGTK.%s' % fn_name())
create_pixmap = False
if width > self._pixmap_width:
# increase the pixmap in 10%+ (rather than 1 pixel) steps
self._pixmap_width = max (int (self._pixmap_width * 1.1),
width)
create_pixmap = True
if height > self._pixmap_height:
self._pixmap_height = max (int (self._pixmap_height * 1.1),
height)
create_pixmap = True
if create_pixmap:
self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,
self._pixmap_height)
self._renderer.set_pixmap (self._pixmap)
def _render_figure(self, pixmap, width, height):
"""used by GTK and GTKcairo. GTKAgg overrides
"""
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def expose_event(self, widget, event):
"""Expose_event for all GTK backends. Should not be overridden.
"""
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if GTK_WIDGET_DRAWABLE(self):
if self._need_redraw:
x, y, w, h = self.allocation
self._pixmap_prepare (w, h)
self._render_figure(self._pixmap, w, h)
self._need_redraw = False
x, y, w, h = event.area
self.window.draw_drawable (self.style.fg_gc[self.state],
self._pixmap, x, y, x, y, w, h)
return False # finish event propagation?
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
filetypes['png'] = 'Portable Network Graphics'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format, *args, **kwargs):
if self.flags() & gtk.REALIZED == 0:
# for self.window(for pixmap) and has a side effect of altering
# figure width,height (via configure-event?)
gtk.DrawingArea.realize(self)
width, height = self.get_width_height()
pixmap = gdk.Pixmap (self.window, width, height)
self._renderer.set_pixmap (pixmap)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
# set the default quality, if we are writing a JPEG.
# http://www.pygtk.org/docs/pygtk/class-gdkpixbuf.html#method-gdkpixbuf--save
options = cbook.restrict_dict(kwargs, ['quality'])
if format in ['jpg','jpeg']:
if 'quality' not in options:
options['quality'] = rcParams['savefig.jpeg_quality']
options['quality'] = str(options['quality'])
if is_string_like(filename):
try:
pixbuf.save(filename, format, options=options)
except gobject.GError as exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
elif is_writable_file_like(filename):
if hasattr(pixbuf, 'save_to_callback'):
def save_callback(buf, data=None):
data.write(buf)
try:
pixbuf.save_to_callback(save_callback, format, user_data=filename, options=options)
except gobject.GError as exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
else:
raise ValueError("Saving to a Python file-like object is only supported by PyGTK >= 2.8")
else:
raise ValueError("filename must be a path or a file-like object")
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerGTK(*args, **kwargs)
def flush_events(self):
gtk.gdk.threads_enter()
while gtk.events_pending():
gtk.main_iteration(True)
gtk.gdk.flush()
gtk.gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The gtk.Toolbar (gtk only)
vbox : The gtk.VBox containing the canvas and toolbar (gtk only)
window : The gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print('FigureManagerGTK.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.window = gtk.Window()
self.set_window_title("Figure %d" % num)
if (window_icon):
try:
self.window.set_icon_from_file(window_icon)
except:
# some versions of gtk throw a glib.GError but not
# all, so I am not sure how to catch it. I am unhappy
# diong a blanket catch here, but an not sure what a
# better way is - JDH
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
self.vbox = gtk.VBox()
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False)
tb_w, tb_h = self.toolbar.size_request()
h += tb_h
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print('FigureManagerGTK.%s' % fn_name())
if hasattr(self, 'toolbar') and self.toolbar is not None:
self.toolbar.destroy()
if hasattr(self, 'vbox'):
self.vbox.destroy()
if hasattr(self, 'window'):
self.window.destroy()
if hasattr(self, 'canvas'):
self.canvas.destroy()
self.__dict__.clear() #Is this needed? Other backends don't have it.
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
gtk.main_level() >= 1:
gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle(self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK (canvas, self.window)
else:
toolbar = None
return toolbar
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK(NavigationToolbar2, gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
gtk.Toolbar.__init__(self)
NavigationToolbar2.__init__(self, canvas)
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.window.set_cursor(cursord[cursor])
def release(self, event):
try: del self._pixmapBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
drawable = self.canvas.window
if drawable is None:
return
gc = drawable.new_gc()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0,x1), min(y0, y1), w, h)]
try:
lastrect, pixmapBack = self._pixmapBack
except AttributeError:
#snap image back
if event.inaxes is None:
return
ax = event.inaxes
l,b,w,h = [int(val) for val in ax.bbox.bounds]
b = int(height)-(b+h)
axrect = l,b,w,h
self._pixmapBack = axrect, gtk.gdk.Pixmap(drawable, w, h)
self._pixmapBack[1].draw_drawable(gc, drawable, l, b, 0, 0, w, h)
else:
drawable.draw_drawable(gc, pixmapBack, 0, 0, *lastrect)
drawable.draw_rectangle(gc, False, *rect)
def _init_toolbar(self):
self.set_style(gtk.TOOLBAR_ICONS)
self._init_toolbar2_4()
def _init_toolbar2_4(self):
basedir = os.path.join(rcParams['datapath'],'images')
if not _new_tooltip_api:
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file + '.png')
image = gtk.Image()
image.set_from_file(fname)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
if _new_tooltip_api:
tbutton.set_tooltip_text(tooltip_text)
else:
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
toolitem = gtk.SeparatorToolItem()
self.insert(toolitem, -1)
# set_draw() not making separator invisible,
# bug #143692 fixed Jun 06 2004, will be in GTK+ 2.6
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = gtk.ToolItem()
self.insert(toolitem, -1)
self.message = gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
chooser = self.get_filechooser()
fname, format = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))
try:
self.canvas.print_figure(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = gtk.Window()
if (window_icon):
try: window.set_icon_from_file(window_icon)
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = gtk.VBox()
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True)
window.show()
def _get_canvas(self, fig):
return FigureCanvasGTK(fig)
class FileChooserDialog(gtk.FileChooserDialog):
"""GTK+ 2.4 file selector which presents the user with a menu
of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK),
path = None,
filetypes = [],
default_filetype = None
):
super(FileChooserDialog, self).__init__ (title, parent, action,
buttons)
super(FileChooserDialog, self).set_do_overwrite_confirmation(True)
self.set_default_response (gtk.RESPONSE_OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = gtk.HBox (spacing=10)
hbox.pack_start (gtk.Label ("File Format:"), expand=False)
liststore = gtk.ListStore(gobject.TYPE_STRING)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start (cbox)
self.filetypes = filetypes
self.sorted_filetypes = list(six.iteritems(filetypes))
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
cbox.append_text ("%s (*.%s)" % (name, ext))
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(gtk.RESPONSE_OK):
break
filename = self.get_filename()
break
return filename, self.ext
class DialogLineprops(object):
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in markers.MarkerStyle.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = gtk.gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = gtk.gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# set icon used when windows are minimized
# Unfortunately, the SVG renderer (rsvg) leaks memory under earlier
# versions of pygtk, so we have to use a PNG file instead.
try:
if gtk.pygtk_version < (2, 8, 0) or sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(rcParams['datapath'], 'images', icon_filename)
except:
window_icon = None
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel gtk.Window
parent = parent.get_toplevel()
if parent.flags() & gtk.TOPLEVEL == 0:
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = gtk.MessageDialog(
parent = parent,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
dialog.run()
dialog.destroy()
FigureCanvas = FigureCanvasGTK
FigureManager = FigureManagerGTK
| mit |
kempa-liehr/DSVMs | flink-vm/roles/apache_flink/files/examples/local_mae_no_parallel.py | 1 | 3134 | from flink.plan.Environment import get_environment
from flink.plan.Constants import WriteMode
from flink.functions.MapFunction import MapFunction
from flink.functions.JoinFunction import JoinFunction
from sklearn import cross_validation, linear_model
import numpy as np
import random
def add_noise(data, seed=1, sigma=0.3):
random.seed(seed)
return [random.gauss(x, sigma) for x in data]
def fit_estimator(data):
"""
Train an estimator on the data and return it
data : DataProvider
"""
X_raw = data.X_train[:,1]
X = np.array(add_noise(X_raw)).reshape(-1, 1)
y = data.y_train[:,1]
est = linear_model.LinearRegression()
est.fit(X, y)
return est
class AnalyticalFunction(object):
def __init__(self, a=5, b=2):
self.a = a
self.b = b
def value(self, x):
return self.a * x + self.b
class DataProvider:
"""
Provide access to our training and test sets
"""
def __init__(self, size=100, test_size=0.1):
fn = AnalyticalFunction()
X_raw = range(50,size+50)
X_data = np.array(X_raw).reshape(-1, 1)
y_data = np.array([fn.value(x) for x in X_raw])
# we need to add an index column
# it enables us to later join predictions and actual values
index = np.array(range(size)).reshape(-1, 1)
self.X = np.c_[index, X_data]
self.y = np.c_[index, y_data]
# split up the data set in train and test sets
self.X_train, self.X_test, self.y_train, self.y_test = cross_validation.train_test_split(self.X, self.y, test_size=test_size, random_state=1)
class AbsoluteErrorJoin(JoinFunction):
"""
Rich function that calculates the absolute error on join
"""
def join(self, a, b):
return abs(a[1] - b[1])
class MapEstimator(MapFunction):
"""
Rich function, needs to be initialised with an estimator
"""
def __init__(self, est):
super(MapEstimator, self).__init__()
self.est = est
def map(self, row):
i, x = row
x = np.array(x).reshape(1, -1)
return (i, self.est.predict(x).tolist()[0])
if __name__ == "__main__":
env = get_environment()
env.set_parallelism(1)
# create our data set
prov = DataProvider(size=10000)
# fit an estimator
est = fit_estimator(prov)
# get the X_test set as list and unpack it
# then map our Estimator to get predictions
data_pred = env.from_elements(*prov.X_test.tolist()) \
.map(MapEstimator(est)) \
# get the actual values for the test set
data_actual = env.from_elements(*prov.y_test.tolist())
# join predictions and actual results on index column
# calculate the mean absolute error by
# 1. calculating absolute error while joining
# 2. adding all errors in parallel
# 3. adding the reduce results again without parallelism
# 4. dividing by the test set size
result = data_pred.join(data_actual).where(0).equal_to(0).using(AbsoluteErrorJoin()) \
.reduce(lambda a, b: a + b) \
.map(lambda x: x / len(prov.y_test.tolist()))
# write the result without parallelism
result.write_text('result.txt', write_mode=WriteMode.OVERWRITE) \
.set_parallelism(1)
env.execute(local=True)
| apache-2.0 |
ndingwall/scikit-learn | sklearn/feature_selection/_variance_threshold.py | 10 | 3395 | # Author: Lars Buitinck
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from ._base import SelectorMixin
from ..utils.sparsefuncs import mean_variance_axis, min_max_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(SelectorMixin, BaseEstimator):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, default=0
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Notes
-----
Allows NaN in the input.
Raises ValueError if no feature in X meets the variance threshold.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any, default=None
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = self._validate_data(X, accept_sparse=('csr', 'csc'),
dtype=np.float64,
force_all_finite='allow-nan')
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
if self.threshold == 0:
mins, maxes = min_max_axis(X, axis=0)
peak_to_peaks = maxes - mins
else:
self.variances_ = np.nanvar(X, axis=0)
if self.threshold == 0:
peak_to_peaks = np.ptp(X, axis=0)
if self.threshold == 0:
# Use peak-to-peak to avoid numeric precision issues
# for constant features
compare_arr = np.array([self.variances_, peak_to_peaks])
self.variances_ = np.nanmin(compare_arr, axis=0)
if np.all(~np.isfinite(self.variances_) |
(self.variances_ <= self.threshold)):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self)
return self.variances_ > self.threshold
def _more_tags(self):
return {'allow_nan': True}
| bsd-3-clause |
charris/numpy | numpy/core/code_generators/ufunc_docstrings.py | 7 | 106598 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
import textwrap
docdict = {}
def get(name):
return docdict.get(name)
# common parameter text to all ufuncs
subst = {
'PARAMS': textwrap.dedent("""
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
""").strip(),
'BROADCASTABLE_2': ("If ``x1.shape != x2.shape``, they must be "
"broadcastable to a common\n shape (which becomes "
"the shape of the output)."),
'OUT_SCALAR_1': "This is a scalar if `x` is a scalar.",
'OUT_SCALAR_2': "This is a scalar if both `x1` and `x2` are scalars.",
}
def add_newdoc(place, name, doc):
doc = textwrap.dedent(doc).strip()
skip = (
# gufuncs do not use the OUT_SCALAR replacement strings
'matmul',
# clip has 3 inputs, which is not handled by this
'clip',
)
if name[0] != '_' and name not in skip:
if '\nx :' in doc:
assert '$OUT_SCALAR_1' in doc, "in {}".format(name)
elif '\nx2 :' in doc or '\nx1, x2 :' in doc:
assert '$OUT_SCALAR_2' in doc, "in {}".format(name)
else:
assert False, "Could not detect number of inputs in {}".format(name)
for k, v in subst.items():
doc = doc.replace('$' + k, v)
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
``np.abs`` is a shorthand for this function.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
$OUT_SCALAR_1
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10], cmap='gray')
>>> plt.show()
The `abs` function can be used as a shorthand for ``np.absolute`` on
ndarrays.
>>> x = np.array([-1.2, 1.2])
>>> abs(x)
array([1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added.
$BROADCASTABLE_2
$PARAMS
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
The ``+`` operator can be used as a shorthand for ``np.add`` on ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> x1 + x2
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
$PARAMS
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi].
$OUT_SCALAR_1
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that ``cos(z) = x``. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts ``[-inf, -1]`` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
$OUT_SCALAR_1
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in ``[-pi, pi]`` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
$PARAMS
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
$OUT_SCALAR_1
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Array of the same shape as `x`.
$OUT_SCALAR_1
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
$PARAMS
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
$OUT_SCALAR_1
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [``1j, infj``] and [``-1j, -infj``] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates.
$BROADCASTABLE_2
$PARAMS
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
$OUT_SCALAR_2
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Array of the same shape as `x`.
$OUT_SCALAR_1
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that ``tanh(z) = x``. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True])
The ``&`` operator can be used as a shorthand for ``np.bitwise_and`` on
ndarrays.
>>> x1 = np.array([2, 5, 255])
>>> x2 = np.array([3, 14, 16])
>>> x1 & x2
array([ 2, 4, 16])
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32),
... np.array([4, 4, 4, 2147483647], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True])
The ``|`` operator can be used as a shorthand for ``np.bitwise_or`` on
ndarrays.
>>> x1 = np.array([2, 5, 255])
>>> x2 = np.array([4, 4, 4])
>>> x1 | x2
array([ 6, 5, 255])
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False])
The ``^`` operator can be used as a shorthand for ``np.bitwise_xor`` on
ndarrays.
>>> x1 = np.array([True, True])
>>> x2 = np.array([False, True])
>>> x1 ^ x2
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
``i >= x``. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
$OUT_SCALAR_1
See Also
--------
floor, trunc, rint, fix
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
$OUT_SCALAR_1
See Also
--------
ceil, floor, rint, fix
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
$PARAMS
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
$OUT_SCALAR_1
Notes
-----
`conj` is an alias for `conjugate`:
>>> np.conj is np.conjugate
True
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
$PARAMS
Returns
-------
y : ndarray
The corresponding cosine values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array of same shape as `x`.
$OUT_SCALAR_1
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
$PARAMS
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
$OUT_SCALAR_1
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = np.degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
$PARAMS
Returns
-------
y : ndarray
The corresponding angle in degrees.
$OUT_SCALAR_1
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'heaviside',
"""
Compute the Heaviside step function.
The Heaviside step function is defined as::
0 if x1 < 0
heaviside(x1, x2) = x2 if x1 == 0
1 if x1 > 0
where `x2` is often taken to be 0.5, but 0 and 1 are also sometimes used.
Parameters
----------
x1 : array_like
Input values.
x2 : array_like
The value of the function when x1 is 0.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The output array, element-wise Heaviside step function of `x1`.
$OUT_SCALAR_2
Notes
-----
.. versionadded:: 1.13.0
References
----------
.. Wikipedia, "Heaviside step function",
https://en.wikipedia.org/wiki/Heaviside_step_function
Examples
--------
>>> np.heaviside([-1.5, 0, 2.0], 0.5)
array([ 0. , 0.5, 1. ])
>>> np.heaviside([-1.5, 0, 2.0], 1)
array([ 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise.
$OUT_SCALAR_2
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
The ``/`` operator can be used as a shorthand for ``np.divide`` on
ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = 2 * np.ones(3)
>>> x1 / x2
array([[0. , 0.5, 1. ],
[1.5, 2. , 2.5],
[3. , 3.5, 4. ]])
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False])
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True])
The ``==`` operator can be used as a shorthand for ``np.equal`` on
ndarrays.
>>> a = np.array([2, 4, 6])
>>> b = np.array([2, 4, 2])
>>> a == b
array([ True, True, False])
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
$OUT_SCALAR_1
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
https://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='gray')
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='hsv')
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise 2 to the power `x`.
$OUT_SCALAR_1
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise exponential minus one: ``out = exp(x) - 1``.
$OUT_SCALAR_1
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
$PARAMS
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
$OUT_SCALAR_1
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
$OUT_SCALAR_1
See Also
--------
ceil, trunc, rint, fix
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", where
``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`. The "floor-towards-zero"
function is called ``fix`` in NumPy.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
Python ``%`` (`remainder`), function so that ``a = a % b + b * (a // b)``
up to roundoff.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
$OUT_SCALAR_2
See Also
--------
remainder : Remainder complementary to floor_divide.
divmod : Simultaneous floor division and remainder.
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
The ``//`` operator can be used as a shorthand for ``np.floor_divide``
on ndarrays.
>>> x1 = np.array([1., 2., 3., 4.])
>>> x1 // 2.5
array([0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
$OUT_SCALAR_2
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False])
The ``>`` operator can be used as a shorthand for ``np.greater`` on
ndarrays.
>>> a = np.array([4, 2])
>>> b = np.array([2, 2])
>>> a > b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : bool or ndarray of bool
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False])
The ``>=`` operator can be used as a shorthand for ``np.greater_equal``
on ndarrays.
>>> a = np.array([4, 2, 1])
>>> b = np.array([2, 2, 2])
>>> a >= b
array([ True, True, False])
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
$BROADCASTABLE_2
$PARAMS
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
$OUT_SCALAR_2
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_1
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
The result depends on the bit-width:
>>> x = np.invert(np.array(13, dtype=np.uint16))
>>> x
65522
>>> np.binary_repr(x, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=np.int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(np.array([True, False]))
array([False, True])
The ``~`` operator can be used as a shorthand for ``np.invert`` on
ndarrays.
>>> x1 = np.array([True, False])
>>> ~x1
array([False, True])
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray, bool
True where ``x`` is not positive infinity, negative infinity,
or NaN; false otherwise.
$OUT_SCALAR_1
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
$PARAMS
Returns
-------
y : bool (scalar) or boolean ndarray
True where ``x`` is positive or negative infinity, false otherwise.
$OUT_SCALAR_1
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray or bool
True where ``x`` is NaN, false otherwise.
$OUT_SCALAR_1
See Also
--------
isinf, isneginf, isposinf, isfinite, isnat
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False])
""")
add_newdoc('numpy.core.umath', 'isnat',
"""
Test element-wise for NaT (not a time) and return result as a boolean array.
.. versionadded:: 1.13.0
Parameters
----------
x : array_like
Input array with datetime or timedelta data type.
$PARAMS
Returns
-------
y : ndarray or bool
True where ``x`` is NaT, false otherwise.
$OUT_SCALAR_1
See Also
--------
isnan, isinf, isneginf, isposinf, isfinite
Examples
--------
>>> np.isnat(np.datetime64("NaT"))
True
>>> np.isnat(np.datetime64("2016-01-01"))
False
>>> np.isnat(np.array(["NaT", "2016-01-01"], dtype="datetime64[ns]"))
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
$OUT_SCALAR_2
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
Note that the dtype of the second argument may change the dtype of the
result and can lead to unexpected results in some cases (see
:ref:`Casting Rules <ufuncs.casting>`):
>>> a = np.left_shift(np.uint8(255), 1) # Expect 254
>>> print(a, type(a)) # Unexpected result due to upcasting
510 <class 'numpy.int64'>
>>> b = np.left_shift(np.uint8(255), np.uint8(1))
>>> print(b, type(b))
254 <class 'numpy.uint8'>
The ``<<`` operator can be used as a shorthand for ``np.left_shift`` on
ndarrays.
>>> x1 = 5
>>> x2 = np.array([1, 2, 3])
>>> x1 << x2
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False])
The ``<`` operator can be used as a shorthand for ``np.less`` on ndarrays.
>>> a = np.array([1, 2])
>>> b = np.array([2, 2])
>>> a < b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True])
The ``<=`` operator can be used as a shorthand for ``np.less_equal`` on
ndarrays.
>>> a = np.array([4, 2, 1])
>>> b = np.array([2, 2, 2])
>>> a <= b
array([False, True, True])
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
$PARAMS
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
$OUT_SCALAR_1
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
$OUT_SCALAR_1
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., nan])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
$OUT_SCALAR_1
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
$BROADCASTABLE_2
$PARAMS
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
$OUT_SCALAR_2
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
$BROADCASTABLE_2
$PARAMS
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
$OUT_SCALAR_2
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
$OUT_SCALAR_1
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
Boolean result of the logical AND operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False])
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False])
The ``&`` operator can be used as a shorthand for ``np.logical_and`` on
boolean ndarrays.
>>> a = np.array([True, False])
>>> b = np.array([False, False])
>>> a & b
array([False, False])
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
$PARAMS
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
$OUT_SCALAR_1
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
Boolean result of the logical OR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False])
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True])
The ``|`` operator can be used as a shorthand for ``np.logical_or`` on
boolean ndarrays.
>>> a = np.array([True, False])
>>> b = np.array([False, False])
>>> a | b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True])
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]])
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([nan, nan, nan])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([nan, nan, nan])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., nan])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., nan])
""")
add_newdoc('numpy.core.umath', 'clip',
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Equivalent to but faster than ``np.minimum(np.maximum(a, a_min), a_max)``.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : array_like
Minimum value.
a_max : array_like
Maximum value.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
$PARAMS
See Also
--------
numpy.clip :
Wrapper that makes the ``a_min`` and ``a_max`` arguments optional,
dispatching to one of `~numpy.core.umath.clip`,
`~numpy.core.umath.minimum`, and `~numpy.core.umath.maximum`.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
""")
add_newdoc('numpy.core.umath', 'matmul',
"""
Matrix product of two arrays.
Parameters
----------
x1, x2 : array_like
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that matches the signature `(n,k),(k,m)->(n,m)`. If not
provided or None, a freshly-allocated array is returned.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
.. versionadded:: 1.16
Now handles ufunc kwargs
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
ValueError
If the last dimension of `x1` is not the same size as
the second-to-last dimension of `x2`.
If a scalar value is passed in.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
dot : alternative matrix product with different broadcasting rules.
Notes
-----
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional
matrices.
- If either argument is N-D, N > 2, it is treated as a stack of
matrices residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by
prepending a 1 to its dimensions. After matrix multiplication
the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by
appending a 1 to its dimensions. After matrix multiplication
the appended 1 is removed.
``matmul`` differs from ``dot`` in two important ways:
- Multiplication by scalars is not allowed, use ``*`` instead.
- Stacks of matrices are broadcast together as if the matrices
were elements, respecting the signature ``(n,k),(k,m)->(n,m)``:
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
The matmul function implements the semantics of the ``@`` operator introduced
in Python 3.5 following :pep:`465`.
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4, 1],
[2, 2]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1, 2])
>>> np.matmul(b, a)
array([1, 2])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a,b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
98
>>> sum(a[0, 1, :] * b[0 , :, 1])
98
Vector, vector returns the scalar inner product, but neither argument
is complex-conjugated:
>>> np.matmul([2j, 3j], [2j, 3j])
(-13+0j)
Scalar multiplication raises an error.
>>> np.matmul([1,2], 3)
Traceback (most recent call last):
...
ValueError: matmul: Input operand 1 does not have enough dimensions ...
The ``@`` operator can be used as a shorthand for ``np.matmul`` on
ndarrays.
>>> x1 = np.array([2j, 3j])
>>> x2 = np.array([2j, 3j])
>>> x1 @ x2
(-13+0j)
.. versionadded:: 1.10.0
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y1 : ndarray
Fractional part of `x`.
$OUT_SCALAR_1
y2 : ndarray
Integral part of `x`.
$OUT_SCALAR_1
Notes
-----
For integer input the return values are floats.
See Also
--------
divmod : ``divmod(x, 1)`` is equivalent to ``modf`` with the return values
switched, except it always has a positive remainder.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
The ``*`` operator can be used as a shorthand for ``np.multiply`` on
ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> x1 * x2
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
$PARAMS
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
$OUT_SCALAR_1
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
The unary ``-`` operator can be used as a shorthand for ``np.negative`` on
ndarrays.
>>> x1 = np.array(([1., -1.]))
>>> -x1
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'positive',
"""
Numerical positive, element-wise.
.. versionadded:: 1.13.0
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = +x`.
$OUT_SCALAR_1
Notes
-----
Equivalent to `x.copy()`, but only defined for types that support
arithmetic.
Examples
--------
>>> x1 = np.array(([1., -1.]))
>>> np.positive(x1)
array([ 1., -1.])
The unary ``+`` operator can be used as a shorthand for ``np.positive`` on
ndarrays.
>>> x1 = np.array(([1., -1.]))
>>> +x1
array([ 1., -1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True])
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]])
The ``!=`` operator can be used as a shorthand for ``np.not_equal`` on
ndarrays.
>>> a = np.array([1., 2.])
>>> b = np.array([1., 3.])
>>> a != b
array([False, True])
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape. Note that an
integer type raised to a negative integer power will raise a ValueError.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
$OUT_SCALAR_2
See Also
--------
float_power : power function that promotes integers to float
Examples
--------
Cube each element in an array.
>>> x1 = np.arange(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
The ``**`` operator can be used as a shorthand for ``np.power`` on
ndarrays.
>>> x2 = np.array([1, 2, 3, 3, 2, 1])
>>> x1 = np.arange(6)
>>> x1 ** x2
array([ 0, 1, 8, 27, 16, 5])
""")
add_newdoc('numpy.core.umath', 'float_power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in `x2`.
`x1` and `x2` must be broadcastable to the same shape. This differs from
the power function in that integers, float16, and float32 are promoted to
floats with a minimum precision of float64 so that the result is always
inexact. The intent is that the function will return a usable result for
negative powers and seldom overflow for positive powers.
.. versionadded:: 1.12.0
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
$OUT_SCALAR_2
See Also
--------
power : power function that preserves type
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.float_power(x1, 3)
array([ 0., 1., 8., 27., 64., 125.])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.float_power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.float_power(x1, x2)
array([[ 0., 1., 8., 27., 16., 5.],
[ 0., 1., 8., 27., 16., 5.]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
$PARAMS
Returns
-------
y : ndarray
The corresponding radian values.
$OUT_SCALAR_1
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
$PARAMS
Returns
-------
y : ndarray
The corresponding angle in radians.
$OUT_SCALAR_1
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
Return array.
$OUT_SCALAR_1
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
as the divisor `x2`. The MATLAB function equivalent to ``np.remainder``
is ``mod``.
.. warning::
This should not be confused with:
* Python 3.7's `math.remainder` and C's ``remainder``, which
computes the IEEE remainder, which are the complement to
``round(x1 / x2)``.
* The MATLAB ``rem`` function and or the C ``%`` operator which is the
complement to ``int(x1 / x2)``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The element-wise remainder of the quotient ``floor_divide(x1, x2)``.
$OUT_SCALAR_2
See Also
--------
floor_divide : Equivalent of Python ``//`` operator.
divmod : Simultaneous floor division and remainder.
fmod : Equivalent of the MATLAB ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
``mod`` is an alias of ``remainder``.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
The ``%`` operator can be used as a shorthand for ``np.remainder`` on
ndarrays.
>>> x1 = np.arange(7)
>>> x1 % 5
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'divmod',
"""
Return element-wise quotient and remainder simultaneously.
.. versionadded:: 1.13.0
``np.divmod(x, y)`` is equivalent to ``(x // y, x % y)``, but faster
because it avoids redundant work. It is used to implement the Python
built-in function ``divmod`` on NumPy arrays.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out1 : ndarray
Element-wise quotient resulting from floor division.
$OUT_SCALAR_2
out2 : ndarray
Element-wise remainder from floor division.
$OUT_SCALAR_2
See Also
--------
floor_divide : Equivalent to Python's ``//`` operator.
remainder : Equivalent to Python's ``%`` operator.
modf : Equivalent to ``divmod(x, 1)`` for positive ``x`` with the return
values switched.
Examples
--------
>>> np.divmod(np.arange(5), 3)
(array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1]))
The `divmod` function can be used as a shorthand for ``np.divmod`` on
ndarrays.
>>> x = np.arange(5)
>>> divmod(x, 3)
(array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1]))
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
$OUT_SCALAR_2
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
The ``>>`` operator can be used as a shorthand for ``np.right_shift`` on
ndarrays.
>>> x1 = 10
>>> x2 = np.array([1,2,3])
>>> x1 >> x2
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
$OUT_SCALAR_1
See Also
--------
fix, ceil, floor, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan
is returned for nan inputs.
For complex inputs, the `sign` function returns
``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``.
complex(nan, 0) is returned for complex nan inputs.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
The sign of `x`.
$OUT_SCALAR_1
Notes
-----
There is more than one definition of sign in common use for complex
numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}`
which is different from a common alternative, :math:`x/|x|`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
>>> np.sign(5-2j)
(1+0j)
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
$PARAMS
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
$OUT_SCALAR_1
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False])
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
$OUT_SCALAR_2
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The next representable values of `x1` in the direction of `x2`.
$OUT_SCALAR_2
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True])
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x : array_like
Values to find the spacing of.
$PARAMS
Returns
-------
out : ndarray or scalar
The spacing of values of `x`.
$OUT_SCALAR_1
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
$PARAMS
Returns
-------
y : array_like
The sine of each element of x.
$OUT_SCALAR_1
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
$PARAMS
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
$OUT_SCALAR_1
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, np.inf])
array([ 2., nan, inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
$PARAMS
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
$OUT_SCALAR_1
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise `x*x`, of the same shape and dtype as `x`.
$OUT_SCALAR_1
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
The ``-`` operator can be used as a shorthand for ``np.subtract`` on
ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> x1 - x2
array([[0., 0., 0.],
[3., 3., 3.],
[6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding tangent values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
https://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
$OUT_SCALAR_2
Notes
-----
In Python, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
The ``/`` operator can be used as a shorthand for ``np.true_divide`` on
ndarrays.
>>> x = np.arange(5)
>>> x / 4
array([0. , 0.25, 0.5 , 0.75, 1. ])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
$PARAMS
Returns
-------
mantissa : ndarray
Floating values between -1 and 1.
$OUT_SCALAR_1
exponent : ndarray
Integer exponents of 2.
$OUT_SCALAR_1
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
$OUT_SCALAR_2
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float16)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
add_newdoc('numpy.core.umath', 'gcd',
"""
Returns the greatest common divisor of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : array_like, int
Arrays of values.
$BROADCASTABLE_2
Returns
-------
y : ndarray or scalar
The greatest common divisor of the absolute value of the inputs
$OUT_SCALAR_2
See Also
--------
lcm : The lowest common multiple
Examples
--------
>>> np.gcd(12, 20)
4
>>> np.gcd.reduce([15, 25, 35])
5
>>> np.gcd(np.arange(6), 20)
array([20, 1, 2, 1, 4, 5])
""")
add_newdoc('numpy.core.umath', 'lcm',
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : array_like, int
Arrays of values.
$BROADCASTABLE_2
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
$OUT_SCALAR_2
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm.reduce([3, 12, 20])
60
>>> np.lcm.reduce([40, 12, 20])
120
>>> np.lcm(np.arange(6), 20)
array([ 0, 20, 20, 60, 20, 20])
""")
| bsd-3-clause |
DonBeo/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
ljchang/nltools | nltools/external/srm.py | 1 | 26959 | #!/usr/bin/env python
# coding: latin-1
""" Shared Response Model (SRM)
===========================
The implementations are based on the following publications:
Chen, P. H. C., Chen, J., Yeshurun, Y., Hasson, U., Haxby, J., & Ramadge,
P. J. (2015). A reduced-dimension fMRI shared response model. In Advances
in Neural Information Processing Systems (pp. 460-468).
Anderson, M. J., Capota, M., Turek, J. S., Zhu, X., Willke, T. L., Wang,
Y., & Norman, K. A. (2016, December). Enabling factor analysis on
thousand-subject neuroimaging datasets. In Big Data (Big Data),
2016 IEEE International Conference on (pp. 1151-1160). IEEE.
Copyright 2016 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Authors: Po-Hsuan Chen (Princeton Neuroscience Institute) and Javier Turek
# (Intel Labs), 2015
from __future__ import division
import logging
import numpy as np
import scipy
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import assert_all_finite
from sklearn.exceptions import NotFittedError
import sys
__all__ = [
"SRM", "DetSRM"
]
logger = logging.getLogger(__name__)
def _init_w_transforms(data, features, random_states):
"""Initialize the mappings (Wi) for the SRM with random orthogonal matrices.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
features : int The number of features in the model.
random_states : list of `RandomState`s. One `RandomState` instance per subject.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The initialized orthogonal transforms (mappings) :math:`W_i` for each
subject.
voxels : list of int
A list with the number of voxels per subject.
Note
----
This function assumes that the numpy random number generator was
initialized.
Not thread safe.
"""
w = []
subjects = len(data)
voxels = np.empty(subjects, dtype=int)
# Set Wi to a random orthogonal voxels by features matrix
for subject in range(subjects):
if data[subject] is not None:
voxels[subject] = data[subject].shape[0]
rnd_matrix = random_states[subject].random_sample((voxels[subject], features))
q, r = np.linalg.qr(rnd_matrix)
w.append(q)
else:
voxels[subject] = 0
w.append(None)
return w, voxels
class SRM(BaseEstimator, TransformerMixin):
"""Probabilistic Shared Response Model (SRM)
Given multi-subject data, factorize it as a shared response S among all
subjects and an orthogonal transform W per subject:
.. math:: X_i \\approx W_i S, \\forall i=1 \\dots N
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
rand_seed : int, default: 0
Seed for initializing the random number generator.
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
s_ : array, shape=[features, samples]
The shared response.
sigma_s_ : array, shape=[features, features]
The covariance of the shared response Normal distribution.
mu_ : list of array, element i has shape=[voxels_i]
The voxel means over the samples for each subject.
rho2_ : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
random_state_: `RandomState`
Random number generator initialized using rand_seed
Note
----
The number of voxels may be different between subjects. However, the
number of samples must be the same across subjects.
The probabilistic Shared Response Model is approximated using the
Expectation Maximization (EM) algorithm proposed in [Chen2015]_. The
implementation follows the optimizations published in [Anderson2016]_.
This is a single node version.
The run-time complexity is :math:`O(I (V T K + V K^2 + K^3))` and the
memory complexity is :math:`O(V T)` with I - the number of iterations,
V - the sum of voxels from all subjects, T - the number of samples, and
K - the number of features (typically, :math:`V \\gg T \\gg K`).
"""
def __init__(self, n_iter=10, features=50, rand_seed=0):
self.n_iter = n_iter
self.features = features
self.rand_seed = rand_seed
return
def fit(self, X, y=None):
"""Compute the probabilistic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Probabilistic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
if X[subject] is not None:
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.sigma_s_, self.w_, self.mu_, self.rho2_, self.s_ = self._srm(X)
return self
def transform(self, X, y=None):
"""Use the model to transform matrix to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
note that number of voxels and samples can vary across subjects
y : not used (as it is unsupervised learning)
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
if X[subject] is not None:
s[subject] = self.w_[subject].T.dot(X[subject])
return s
def _init_structures(self, data, subjects):
"""Initializes data structures for SRM and preprocess the data.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
subjects : int
The total number of subjects in `data`.
Returns
-------
x : list of array, element i has shape=[voxels_i, samples]
Demeaned data for each subject.
mu : list of array, element i has shape=[voxels_i]
Voxel means over samples, per subject.
rho2 : array, shape=[subjects]
Noise variance :math:`\\rho^2` per subject.
trace_xtx : array, shape=[subjects]
The squared Frobenius norm of the demeaned data in `x`.
"""
x = []
mu = []
rho2 = np.zeros(subjects)
trace_xtx = np.zeros(subjects)
for subject in range(subjects):
rho2[subject] = 1
if data[subject] is not None:
mu.append(np.mean(data[subject], 1))
trace_xtx[subject] = np.sum(data[subject] ** 2)
x.append(data[subject] - mu[subject][:, np.newaxis])
else:
mu.append(None)
trace_xtx[subject] = 0
x.append(None)
return x, mu, rho2, trace_xtx
def _likelihood(self, chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples):
"""Calculate the log-likelihood function
Parameters
----------
chol_sigma_s_rhos : array, shape=[features, features]
Cholesky factorization of the matrix (Sigma_S + sum_i(1/rho_i^2)
* I)
log_det_psi : float
Determinant of diagonal matrix Psi (containing the rho_i^2 value
voxels_i times).
chol_sigma_s : array, shape=[features, features]
Cholesky factorization of the matrix Sigma_S
trace_xt_invsigma2_x : float
Trace of :math:`\\sum_i (||X_i||_F^2/\\rho_i^2)`
inv_sigma_s_rhos : array, shape=[features, features]
Inverse of :math:`(\\Sigma_S + \\sum_i(1/\\rho_i^2) * I)`
wt_invpsi_x : array, shape=[features, samples]
samples : int
The total number of samples in the data.
Returns
-------
loglikehood : float
The log-likelihood value.
"""
log_det = (np.log(np.diag(chol_sigma_s_rhos) ** 2).sum() + log_det_psi
+ np.log(np.diag(chol_sigma_s) ** 2).sum())
loglikehood = -0.5 * samples * log_det - 0.5 * trace_xt_invsigma2_x
loglikehood += 0.5 * np.trace(
wt_invpsi_x.T.dot(inv_sigma_s_rhos).dot(wt_invpsi_x))
# + const --> -0.5*nTR*nvoxel*subjects*math.log(2*math.pi)
return loglikehood
@staticmethod
def _update_transform_subject(Xi, S):
"""Updates the mappings `W_i` for one subject.
Parameters
----------
Xi : array, shape=[voxels, timepoints]
The fMRI data :math:`X_i` for aligning the subject.
S : array, shape=[features, timepoints]
The shared response.
Returns
-------
Wi : array, shape=[voxels, features]
The orthogonal transform (mapping) :math:`W_i` for the subject.
"""
A = Xi.dot(S.T)
# Solve the Procrustes problem
U, _, V = np.linalg.svd(A, full_matrices=False)
return U.dot(V)
def transform_subject(self, X):
"""Transform a new subject using the existing model.
The subject is assumed to have recieved equivalent stimulation
Parameters
----------
X : 2D array, shape=[voxels, timepoints]
The fMRI data of the new subject.
Returns
-------
w : 2D array, shape=[voxels, features]
Orthogonal mapping `W_{new}` for new subject
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of TRs in the subject
if X.shape[1] != self.s_.shape[1]:
raise ValueError("The number of timepoints(TRs) does not match the"
"one in the model.")
w = self._update_transform_subject(X, self.s_)
return w
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
sigma_s : array, shape=[features, features]
The covariance :math:`\\Sigma_s` of the shared response Normal
distribution.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
mu : list of array, element i has shape=[voxels_i]
The voxel means :math:`\\mu_i` over the samples for each subject.
rho2 : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
s : array, shape=[features, samples]
The shared response.
"""
samples = min([d.shape[1] for d in data if d is not None],
default=sys.maxsize)
subjects = len(data)
self.random_state_ = np.random.RandomState(self.rand_seed)
random_states = [np.random.RandomState(self.random_state_.randint(2 ** 32)) for i in range(len(data))]
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject, and trace_xtx with
# the ||X_i||_F^2 of each subject.
w, voxels = _init_w_transforms(data, self.features, random_states)
x, mu, rho2, trace_xtx = self._init_structures(data, subjects)
shared_response = np.zeros((self.features, samples))
sigma_s = np.identity(self.features)
# Main loop of the algorithm (run
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# E-step:
# Sum the inverted the rho2 elements for computing W^T * Psi^-1 * W
rho0 = (1 / rho2).sum()
# Invert Sigma_s using Cholesky factorization
(chol_sigma_s, lower_sigma_s) = scipy.linalg.cho_factor(
sigma_s, check_finite=False)
inv_sigma_s = scipy.linalg.cho_solve(
(chol_sigma_s, lower_sigma_s), np.identity(self.features),
check_finite=False)
# Invert (Sigma_s + rho_0 * I) using Cholesky factorization
sigma_s_rhos = inv_sigma_s + np.identity(self.features) * rho0
chol_sigma_s_rhos, lower_sigma_s_rhos = \
scipy.linalg.cho_factor(sigma_s_rhos,
check_finite=False)
inv_sigma_s_rhos = scipy.linalg.cho_solve(
(chol_sigma_s_rhos, lower_sigma_s_rhos),
np.identity(self.features), check_finite=False)
# Compute the sum of W_i^T * rho_i^-2 * X_i, and the sum of traces
# of X_i^T * rho_i^-2 * X_i
wt_invpsi_x = np.zeros((self.features, samples))
trace_xt_invsigma2_x = 0.0
for subject in range(subjects):
if data[subject] is not None:
wt_invpsi_x += (w[subject].T.dot(x[subject])) / rho2[subject]
trace_xt_invsigma2_x += trace_xtx[subject] / rho2[subject]
log_det_psi = np.sum(np.log(rho2) * voxels)
# Update the shared response
shared_response = sigma_s.dot(
np.identity(self.features) - rho0 * inv_sigma_s_rhos).dot(
wt_invpsi_x)
# M-step
# Update Sigma_s and compute its trace
sigma_s = (inv_sigma_s_rhos
+ shared_response.dot(shared_response.T) / samples)
trace_sigma_s = samples * np.trace(sigma_s)
# Update each subject's mapping transform W_i and error variance
# rho_i^2
for subject in range(subjects):
if x[subject] is not None:
a_subject = x[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, s_subject, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
rho2[subject] = trace_xtx[subject]
rho2[subject] += -2 * np.sum(w[subject] * a_subject).sum()
rho2[subject] += trace_sigma_s
rho2[subject] /= samples * voxels[subject]
else:
rho2[subject] = 0
if logger.isEnabledFor(logging.INFO):
# Calculate and log the current log-likelihood for checking
# convergence
loglike = self._likelihood(
chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples)
logger.info('Objective function %f' % loglike)
return sigma_s, w, mu, rho2, shared_response
class DetSRM(BaseEstimator, TransformerMixin):
"""Deterministic Shared Response Model (DetSRM)
Given multi-subject data, factorize it as a shared response S among all
subjects and an orthogonal transform W per subject:
.. math:: X_i \\approx W_i S, \\forall i=1 \\dots N
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
rand_seed : int, default: 0
Seed for initializing the random number generator.
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
s_ : array, shape=[features, samples]
The shared response.
random_state_: `RandomState`
Random number generator initialized using rand_seed
Note
----
The number of voxels may be different between subjects. However, the
number of samples must be the same across subjects.
The Deterministic Shared Response Model is approximated using the
Block Coordinate Descent (BCD) algorithm proposed in [Chen2015]_.
This is a single node version.
The run-time complexity is :math:`O(I (V T K + V K^2))` and the memory
complexity is :math:`O(V T)` with I - the number of iterations, V - the
sum of voxels from all subjects, T - the number of samples, K - the
number of features (typically, :math:`V \\gg T \\gg K`), and N - the
number of subjects.
"""
def __init__(self, n_iter=10, features=50, rand_seed=0):
self.n_iter = n_iter
self.features = features
self.rand_seed = rand_seed
return
def fit(self, X, y=None):
"""Compute the Deterministic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Deterministic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.w_, self.s_ = self._srm(X)
return self
def transform(self, X, y=None):
"""Use the model to transform data to the Shared Response subspace
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject.
y : not used
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
s[subject] = self.w_[subject].T.dot(X[subject])
return s
def _objective_function(self, data, w, s):
"""Calculate the objective function
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response
Returns
-------
objective : float
The objective function value.
"""
subjects = len(data)
objective = 0.0
for m in range(subjects):
objective += \
np.linalg.norm(data[m] - w[m].dot(s), 'fro')**2
return objective * 0.5 / data[0].shape[1]
def _compute_shared_response(self, data, w):
""" Compute the shared response S
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
s : array, shape=[features, samples]
The shared response for the subjects data with the mappings in w.
"""
s = np.zeros((w[0].shape[1], data[0].shape[1]))
for m in range(len(w)):
s = s + w[m].T.dot(data[m])
s /= len(w)
return s
@staticmethod
def _update_transform_subject(Xi, S):
"""Updates the mappings `W_i` for one subject.
Parameters
----------
Xi : array, shape=[voxels, timepoints]
The fMRI data :math:`X_i` for aligning the subject.
S : array, shape=[features, timepoints]
The shared response.
Returns
-------
Wi : array, shape=[voxels, features]
The orthogonal transform (mapping) :math:`W_i` for the subject.
"""
A = Xi.dot(S.T)
# Solve the Procrustes problem
U, _, V = np.linalg.svd(A, full_matrices=False)
return U.dot(V)
def transform_subject(self, X):
"""Transform a new subject using the existing model.
The subject is assumed to have recieved equivalent stimulation
Parameters
----------
X : 2D array, shape=[voxels, timepoints]
The fMRI data of the new subject.
Returns
-------
w : 2D array, shape=[voxels, features]
Orthogonal mapping `W_{new}` for new subject
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of TRs in the subject
if X.shape[1] != self.s_.shape[1]:
raise ValueError("The number of timepoints(TRs) does not match the"
"one in the model.")
w = self._update_transform_subject(X, self.s_)
return w
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
"""
subjects = len(data)
self.random_state_ = np.random.RandomState(self.rand_seed)
random_states = [
np.random.RandomState(self.random_state_.randint(2 ** 32))
for i in range(len(data))]
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject.
w, _ = _init_w_transforms(data, self.features, random_states)
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
# Main loop of the algorithm
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# Update each subject's mapping transform W_i:
for subject in range(subjects):
a_subject = data[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, _, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
# Update the shared response:
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
return w, shared_response
| mit |
alexeyum/scikit-learn | examples/cluster/plot_segmentation_toy.py | 91 | 3522 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
Eric89GXL/scikit-learn | examples/tree/plot_iris.py | 8 | 2161 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
pl.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = pl.contourf(xx, yy, Z, cmap=pl.cm.Paired)
pl.xlabel(iris.feature_names[pair[0]])
pl.ylabel(iris.feature_names[pair[1]])
pl.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
pl.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=pl.cm.Paired)
pl.axis("tight")
pl.suptitle("Decision surface of a decision tree using paired features")
pl.legend()
pl.show()
| bsd-3-clause |
howardcheung/data-preprocessing-helper | src/gui_adv_main.py | 1 | 36011 | #!/usr/bin/python3
"""
This script file contains methods to define the graphical user interface
of the tool with basic setting for beginners and advanced setting for
flexibility
Author: Howard Cheung ([email protected])
Date: 2017/08/29
License of the source code: MIT license
"""
# import python internal modules
from calendar import monthrange
from datetime import datetime
from math import isnan
from ntpath import split
from os.path import isfile, dirname
from pathlib import Path
from traceback import format_exc
from webbrowser import open as webbrowseropen
# import third party modules
from pandas import ExcelFile
import wx
from wx import adv
# import user-defined modules
from data_read import read_data
from format_data import convert_df
# define global variables
DESCRIPTION = \
"""Data Preprocessing Helper is a software made to
help people to preprocess their data that contain ugly
features such as time-of-change values, blank values, etc
to nicely presented data with no blank values
"""
LICENSE = \
"""GNU GENERAL PUBLIC LICENSE Version 3
Data Preprocessing Helper
Copyright (C) 2017 Howard Cheung
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For licenses of modules involved in the development of the software,
please visit <https://github.com/howardcheung/data-preprocessing-helper/>
"""
# classes for tabs
# Template from https://wiki.wxpython.org/Simple%20wx.Notebook%20Example
class BasicTab(wx.Panel):
"""
The first tab
"""
def __init__(self, parent, frame):
"""
This is the initilization function for the tab.
Inputs:
==========
parent: wx.Notebook
parent object
frame: ex.Frame
the parent frame object
"""
super(BasicTab, self).__init__(parent)
# define layer size
begin_depth = 40
layer_diff = 60 # fewer settings and hence more space
first_blk = 20
sec_blk = 250
third_blk = 525
# title
# position: (from top to bottom, from left to right)
# wx.StaticText(self, label=u''.join([
# u'Basic settings'
# ]), pos=(first_blk, begin_depth))
# Inputs to the data file path
layer_depth = begin_depth+layer_diff*0
text = wx.StaticText(
self, label=u'Data file path:',
pos=(first_blk, layer_depth+2)
)
# require additional object for textbox
# with default path
frame.dfpath = wx.TextCtrl(
self, value=u'../dat/time_of_change.csv',
pos=(sec_blk, layer_depth), size=(250, 20)
)
# load worksheet name choices for files with xls/ xlsx extension
# for self.sheetname ComboBox
frame.dfpath.Bind(wx.EVT_TEXT, frame.ChangeForXlsFile)
button = wx.Button(
self, label=u'Browse...', pos=(third_blk, layer_depth)
)
button.Bind(wx.EVT_BUTTON, frame.OnOpen)
layer_depth += layer_diff
# ask for existence of header as a checkbox
text = wx.StaticText(
self, label=u'Existence of a header row:',
pos=(first_blk, layer_depth+2)
)
frame.header = wx.CheckBox(self, pos=(sec_blk, layer_depth))
frame.header.SetValue(True)
# for the positioning of the header numeric function
wx.StaticText(
self, label=u''.join([
'Number of rows to be skipped\nabove the header row:'
]), pos=(third_blk-150, layer_depth), size=(150, 30)
)
frame.header_no = wx.SpinCtrl(
self, value='0', min=0, max=100000, # max: approx. 1 month
pos=(third_blk+20, layer_depth),
size=(50, 20)
)
# add the dynamic information of the checkbox
frame.header.Bind(wx.EVT_CHECKBOX, frame.HeaderInput)
layer_depth += layer_diff
# Inputs to the directory to save the plots
text = wx.StaticText(
self, label=u'Path to save file:', pos=(first_blk, layer_depth+2)
)
# require additional object for textbox
# with default path
frame.newdfpath = wx.TextCtrl(
self, value=u'./example.csv',
pos=(sec_blk, layer_depth), size=(250, 20)
)
frame.newdfpath.Bind(wx.EVT_TEXT, frame.ChangeForXlsFileOutput)
button = wx.Button(
self, label=u'Browse...', pos=(third_blk, layer_depth)
)
button.Bind(wx.EVT_BUTTON, frame.SaveOpen)
layer_depth += layer_diff
# add start time input
text = wx.StaticText(self, label=u''.join([
u'Start time in the new data file:'
u'\n(inaccurate for too much extension)'
]), pos=(first_blk, layer_depth+2))
# create spin control for the date and time
frame.start_yr = wx.SpinCtrl(
self, value='2017', min=1, max=4000,
pos=(sec_blk, layer_depth), size=(55, 20)
)
text = wx.StaticText(self, label=u''.join([
u'Year'
]), pos=(sec_blk, layer_depth+20))
# reset the last day of the month if needed
frame.start_yr.Bind(wx.EVT_COMBOBOX, frame.ChangeStartDayLimit)
frame.start_mon = wx.ComboBox(
self, pos=(sec_blk+70, layer_depth),
choices=[str(ind) for ind in range(1, 13)], size=(50, 20)
)
frame.start_mon.SetValue('1')
# reset the last day of the month if needed
frame.start_mon.Bind(wx.EVT_COMBOBOX, frame.ChangeStartDayLimit)
frame.start_mon.SetEditable(False)
text = wx.StaticText(self, label=u''.join([
u'Month'
]), pos=(sec_blk+70, layer_depth+20))
frame.start_day = wx.ComboBox(
self, pos=(sec_blk+70*2, layer_depth),
choices=[str(ind) for ind in range(1, 32)], size=(50, 20)
)
frame.start_day.SetValue('1')
frame.start_day.SetEditable(False)
text = wx.StaticText(self, label=u''.join([
u'Day'
]), pos=(sec_blk+70*2, layer_depth+20))
frame.start_hr = wx.ComboBox(
self, pos=(sec_blk+70*3, layer_depth),
choices=['%02i' % ind for ind in range(24)], size=(50, 20)
)
frame.start_hr.SetValue('00')
frame.start_hr.SetEditable(False)
text = wx.StaticText(self, label=u''.join([
u'Hour'
]), pos=(sec_blk+70*3, layer_depth+20))
frame.start_min = wx.ComboBox(
self, pos=(sec_blk+70*4, layer_depth),
choices=['%02i' % ind for ind in range(60)], size=(50, 20)
)
frame.start_min.SetValue('00')
frame.start_min.SetEditable(False)
text = wx.StaticText(self, label=u''.join([
u'Minutes'
]), pos=(sec_blk+70*4, layer_depth+20))
frame.use_starttime = wx.CheckBox(
self, pos=(sec_blk+70*5, layer_depth+2)
)
frame.use_starttime.SetValue(True)
wx.StaticText(
self, label=u'Use file\nstarting time',
pos=(sec_blk+70*5, layer_depth+20),
size=(75, 30)
) # set size to (75,30) to push the right border for space in panel
layer_depth += (layer_diff+20)
# add ending time input
text = wx.StaticText(self, label=u''.join([
u'Ending time in the new data file:',
u'\n(inaccurate for too much extension)'
]), pos=(first_blk, layer_depth+2))
# create spin control for the date and time
frame.end_yr = wx.SpinCtrl(
self, value='2017', min=1, max=4000,
pos=(sec_blk, layer_depth), size=(55, 20)
)
text = wx.StaticText(self, label=u''.join([
u'Year'
]), pos=(sec_blk, layer_depth+20))
# reset the last day of the month if needed
frame.end_yr.Bind(wx.EVT_COMBOBOX, frame.ChangeEndDayLimit)
frame.end_mon = wx.ComboBox(
self, pos=(sec_blk+70, layer_depth),
choices=[str(ind) for ind in range(1, 13)], size=(50, 20)
)
frame.end_mon.SetValue('12')
# reset the last day of the month if needed
frame.end_mon.Bind(wx.EVT_COMBOBOX, frame.ChangeEndDayLimit)
frame.end_mon.SetEditable(False)
text = wx.StaticText(self, label=u''.join([
u'Month'
]), pos=(sec_blk+70, layer_depth+20))
frame.end_day = wx.ComboBox(
self, pos=(sec_blk+70*2, layer_depth),
choices=[str(ind) for ind in range(1, 32)], size=(50, 20)
)
frame.end_day.SetValue('31')
frame.end_day.SetEditable(False)
text = wx.StaticText(self, label=u''.join([
u'Day'
]), pos=(sec_blk+70*2, layer_depth+20))
frame.end_hr = wx.ComboBox(
self, pos=(sec_blk+70*3, layer_depth),
choices=['%02i' % ind for ind in range(24)], size=(50, 20)
)
frame.end_hr.SetValue('23')
frame.end_hr.SetEditable(False)
text = wx.StaticText(self, label=u''.join([
u'Hour'
]), pos=(sec_blk+70*3, layer_depth+20))
frame.end_min = wx.ComboBox(
self, pos=(sec_blk+70*4, layer_depth),
choices=['%02i' % ind for ind in range(60)], size=(50, 20)
)
frame.end_min.SetValue('59')
frame.end_min.SetEditable(False)
text = wx.StaticText(self, label=u''.join([
u'Minutes'
]), pos=(sec_blk+70*4, layer_depth+20))
frame.no_endtime = wx.CheckBox(self, pos=(sec_blk+70*5, layer_depth+2))
frame.no_endtime.SetValue(True)
wx.StaticText(
self, label=u'Autogen\nending time',
pos=(sec_blk+70*5, layer_depth+20)
)
layer_depth += (layer_diff+20)
# add fixed interval input
text = wx.StaticText(
self, label=u'New time interval in the output file:',
pos=(first_blk, layer_depth+2)
)
frame.time_int = wx.SpinCtrl(
self, value='10', min=1, max=31*24*60, # max: approx. 1 month
pos=(sec_blk, layer_depth), size=(50, 20)
)
text = wx.StaticText(
self, label=u'minutes',
pos=(sec_blk+50+10, layer_depth+2)
)
layer_depth += layer_diff
class AdvancedTab(wx.Panel):
"""
The second tab
"""
def __init__(self, parent, frame):
"""
This is the initilization function for the tab.
Inputs:
==========
parent: wx.Frame
parent object
frame: ex.Frame
the parent frame object
"""
super(AdvancedTab, self).__init__(parent)
# define layer size
begin_depth = 40
layer_diff = 40
first_blk = 20
sec_blk = 250
third_blk = 525
# title
# position: (from top to bottom, from left to right)
# wx.StaticText(self, label=u''.join([
# u'Advanced settings'
# ]), pos=(first_blk, begin_depth))
# option to select sheet, if any, and choose if all sheets
# should be loaded
layer_depth = begin_depth+layer_diff*0
wx.StaticText(self, label=u''.join([
u'For xls/xlsx input files only:'
]), pos=(first_blk, layer_depth))
layer_depth = layer_depth+layer_diff*0.75
wx.StaticText(self, label=u''.join([
u'Choose a worksheet to load\n',
u'for xls/xlsx input file:'
]), pos=(first_blk, layer_depth-5))
frame.sheetname = wx.ComboBox(
self, pos=(sec_blk, layer_depth), size=(100, 30)
)
frame.sheetname.Enable(False)
wx.StaticText(self, label=u''.join([
u'Load all worksheets with the\n',
u'same config for xls/xlsx input file:'
]), pos=(third_blk-150, layer_depth-5))
frame.loadallsheets = wx.CheckBox(
self, pos=(third_blk+55, layer_depth)
)
frame.loadallsheets.SetValue(False)
if 'xls' not in get_ext(frame.dfpath.GetValue()):
frame.loadallsheets.Enable(False)
# check if anything needs to be changed after
# checking/unchecking the box
frame.loadallsheets.Bind(wx.EVT_CHECKBOX, frame.LoadAllSheets)
layer_depth += layer_diff
# Separator of output file
# can be any string, but provide the choices
layer_depth += layer_diff/2.0
wx.StaticText(self, label=u''.join([
u'For csv output files only:'
]), pos=(first_blk, layer_depth))
layer_depth = layer_depth+layer_diff/2.0
wx.StaticText(
self, label=u''.join([
u'Separator of the output file:'
]), pos=(first_blk, layer_depth)
)
# do not use unicode here
frame.output_sep = wx.ComboBox(
self, value=',', choices=[';', ','],
pos=(sec_blk, layer_depth), size=(50, 20)
)
layer_depth += layer_diff
# Inputs to the format time string
layer_depth += layer_diff/2.0
wx.StaticText(self, label=u''.join([
u'Output/Input time string in the first column of the data file:'
]), pos=(first_blk, layer_depth))
layer_depth = layer_depth+layer_diff/2.0
text = wx.StaticText(self, label=u'\n'.join([
u'Format of time string', u'in the output file',
u'(invalid for xls/xlsx file output):'
]), pos=(first_blk, layer_depth+2))
# # require additional object for textbox
frame.outputtimestring = wx.TextCtrl(
self, value=u'%Y/%m/%d %H:%M:%S',
pos=(sec_blk, layer_depth), size=(250, 20)
)
frame.outputtimestring.Enable(
get_ext(frame.dfpath.GetValue()) == 'csv'
)
# a button for instructions
button = wx.Button(
self,
label=u'Instructions to enter the format of the time string',
pos=(sec_blk, layer_depth+layer_diff/2.0)
)
button.Bind(wx.EVT_BUTTON, frame.TimeInstruct)
layer_depth += (layer_diff+layer_diff/2.0)
# other output format
wx.StaticText(self, label=u'\n'.join([
u'or output the time as values of ',
]), pos=(first_blk, layer_depth+2))
frame.numtimeoutput = wx.ComboBox(self, value='None', choices=[
'None', 'seconds', 'minutes', 'hours', 'days'
], pos=(sec_blk, layer_depth), size=(70, 20))
frame.numtimeoutput.SetEditable(False)
frame.numtimeoutput.Bind(wx.EVT_COMBOBOX, frame.ChangeOptionForNum)
wx.StaticText(self, label=u'\n'.join([
u'from the user-defined start time',
]), pos=(sec_blk+80, layer_depth+2))
layer_depth += (layer_diff)
# Inputs to the format time string
text = wx.StaticText(self, label=u'\n'.join([
u'Format of time string in the first',
u'column of the input file:'
]), pos=(first_blk, layer_depth+2))
# require additional object for textbox
frame.timestring = wx.TextCtrl(
self, value=u'%m/%d/%y %I:%M:%S %p CST',
pos=(sec_blk, layer_depth), size=(250, 20)
)
# Computer to auto-detect the format instead
frame.autotimeinputformat = wx.CheckBox(
self, pos=(sec_blk+70*4, layer_depth+2)
)
frame.autotimeinputformat.SetValue(True)
wx.StaticText(
self, label=u'Auto-detecting format\nof input time string',
pos=(sec_blk+70*4, layer_depth+20)
)
frame.autotimeinputformat.Bind(
wx.EVT_CHECKBOX, frame.GreyOutInputTimeString
)
frame.timestring.Enable(False) # disable the option
# a button for instructions
text = wx.StaticText(self, label=u''.join([
u'Same instructions as the format in output file'
]), pos=(sec_blk, layer_depth+20))
layer_depth += (layer_diff+20)
# Relationship between time series data points
layer_depth += layer_diff/2.0
wx.StaticText(self, label=u''.join([
u'Assumptions on data structure in the input file:'
]), pos=(first_blk, layer_depth))
layer_depth = layer_depth+layer_diff/2.0
wx.StaticText(
self, label=u'Assumption between data points:',
pos=(first_blk, layer_depth+2)
)
frame.func_choice = wx.ComboBox(
self, value=u'Step Function',
choices=[
u'Step Function',
u'Continuous variable (inter- and extrapolation)'
], pos=(sec_blk, layer_depth), size=(225, 20)
)
frame.func_choice.SetSelection(0)
frame.func_choice.SetEditable(False)
layer_depth += layer_diff
# Assumptions on extrapolation
wx.StaticText(
self, label=u''.join([
u'Assumptions for data points\n',
u'earlier than the existing data:'
]), pos=(first_blk, layer_depth+2)
)
frame.early_pts = wx.ComboBox(
self, value=u'Use the minimum value in the trend',
choices=[
u'Use the minimum value in the trend',
u'Use the first value in the trend',
u'Use blanks'
], pos=(sec_blk, layer_depth), size=(225, 20)
)
frame.early_pts.SetSelection(2)
frame.early_pts.SetEditable(False)
layer_depth += layer_diff
class MainFrame(wx.Frame):
"""
Frame holding the tabs
"""
def __init__(self, parent, title):
"""
This is the initilization function for the GUI.
Inputs:
==========
parent: wx.Frame
parent object
title: str
title of the window
"""
super(MainFrame, self).__init__(
parent, title=title, size=(720, 770),
style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER
) # size of the application window
# Here we create a panel and a notebook on the panel
p = wx.Panel(self)
# create menubar for About information
menubar = wx.MenuBar()
helpm = wx.Menu()
abti = wx.MenuItem(helpm, wx.ID_ABOUT, '&About', 'Program Information')
self.Bind(wx.EVT_MENU, self.AboutDialog, abti)
helpm.Append(abti)
menubar.Append(helpm, '&Help')
self.SetMenuBar(menubar)
nb = wx.Notebook(p)
# create the page windows as children of the notebook
# pass the frame into it to make the frame event functions available
page1 = BasicTab(nb, frame=self)
page2 = AdvancedTab(nb, frame=self)
# add the pages to the notebook with the label to show on the tab
nb.AddPage(page1, "Basic settings")
nb.AddPage(page2, "Advanced settings")
# create button
button_ok = wx.Button(p, label=u'Preprocess', size=(100, 30))
button_ok.Bind(wx.EVT_BUTTON, self.Analyzer)
# finally, put the notebook in a sizer for the panel to manage
# the layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(nb, 15, 0, border=10) # 25 for space under the advanced tab
sizer.Add(button_ok, 1, wx.ALIGN_RIGHT | wx.RIGHT | wx.TOP | wx.BOTTOM, border=5)
sizer.SetSizeHints(self)
self.SetSizerAndFit(sizer)
self.Centre()
# define all event functions here
def AboutDialog(self, evt):
"""
Function to show the dialog box for About Information
"""
info = adv.AboutDialogInfo()
info.SetName('Data Preprocessing Helper')
info.SetVersion('0.3.8')
info.SetDescription(DESCRIPTION)
info.SetCopyright('(C) Copyright 2017 Howard Cheung')
info.SetWebSite(
'https://github.com/howardcheung/data-preprocessing-helper/'
)
info.SetLicence(LICENSE)
info.AddDeveloper('Howard Cheung [howard.at (at) gmail.com]')
adv.AboutBox(info)
def OnClose(self, evt):
"""
Function to close the main window
"""
self.Close(True)
evt.Skip()
def OnOpen(self, evt):
"""
Function to open a file
Reference:
https://wxpython.org/Phoenix/docs/html/wx.FileDialog.html
"""
# proceed asking to the user the new directory to open
openFileDialog = wx.FileDialog(
self, 'Open file', '', '',
''.join([
'csv files (*.csv)|*.csv;|',
'xls files (*.xls)|*.xls;|',
'xlsx files (*.xlsx)|*.xlsx'
]), wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return False # the user changed idea...
# proceed loading the file chosen by the user
# this can be done with e.g. wxPython input streams:
filepath = openFileDialog.GetPath()
self.dfpath.SetValue(filepath)
# check if file exists
if not isfile(filepath):
wx.LogError('Cannot open file "%s".' % openFileDialog.GetPath())
return False
# load worksheet name choices for files with xls/ xlsx extension
# for self.sheetname ComboBox
self.ChangeForXlsFile(evt)
def ChangeForXlsFile(self, evt):
"""
Change options if the input file is an excel file
"""
# load worksheet name choices for files with xls/ xlsx extension
# for self.sheetname ComboBox
filepath = self.dfpath.GetValue()
ext = get_ext(filepath)
if ext == 'xls' or ext == 'xlsx':
self.loadallsheets.Enable(True)
if not self.loadallsheets.GetValue():
self.sheetname.Enable(True)
try: # the file may not exist
with ExcelFile(filepath) as xlsx:
sheetnames = xlsx.sheet_names
self.sheetname.SetItems(sheetnames)
self.sheetname.SetValue(sheetnames[0])
except FileNotFoundError:
pass
else:
self.loadallsheets.Enable(False)
self.loadallsheets.SetValue(False) # reset loading all worksheets
self.sheetname.Enable(False)
def HeaderInput(self, evt):
"""
Function to allow input of file header informaiton if the
existence of a header is confirmed
"""
if evt.IsChecked():
self.header_no.Enable(True)
else:
self.header_no.Enable(False)
def LoadAllSheets(self, evt):
"""
To disable the selection of the sheets based on the selection
of the option of loadallsheet
"""
if self.loadallsheets.GetValue():
self.sheetname.Enable(False)
else:
self.sheetname.Enable(True)
def SaveOpen(self, evt):
"""
Function to open a file
Reference:
https://wxpython.org/Phoenix/docs/html/wx.DirDialog.html
"""
# proceed asking to the user the new file to open
openFileDialog = wx.FileDialog(
self, 'Open file', '', '',
''.join([
'csv files (*.csv)|*.csv;|',
'xls files (*.xls)|*.xls;|',
'xlsx files (*.xlsx)|*.xlsx'
]), wx.FD_SAVE
)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return False # the user changed idea...
# proceed saving the file chosen by the user
# this can be done with e.g. wxPython input streams:
filepath = openFileDialog.GetPath()
self.newdfpath.SetValue(filepath)
# change GUI as needed
self.ChangeForXlsFileOutput(evt)
def ChangeForXlsFileOutput(self, evt):
"""
Change options if the output file is an excel file
"""
filepath = self.newdfpath.GetValue()
ext = get_ext(filepath)
if ext == 'xls' or ext == 'xlsx':
# no longer need output time string setting
self.outputtimestring.Enable(False)
else:
if self.numtimeoutput.GetValue() == 'None':
self.outputtimestring.Enable(True)
def TimeInstruct(self, evt):
"""
Function to open instructions for time string
"""
webbrowseropen(
u''.join([
u'https://docs.python.org/3.5/library/datetime.html',
u'#strftime-and-strptime-behavior'
])
)
def ChangeOptionForNum(self, evt):
"""
Change options for number values
"""
if self.numtimeoutput.GetValue() != 'None':
self.outputtimestring.Enable(False)
else:
self.ChangeForXlsFileOutput(evt)
def GreyOutInputTimeString(self, evt):
"""
Grey out input time string when the auto-detection option is
selected
"""
if self.autotimeinputformat.GetValue():
self.timestring.Enable(False)
else:
self.timestring.Enable(True)
def ChangeStartDayLimit(self, evt):
"""
Function to change the limit of the starting day selection
based on the selection of the year and the month
"""
# find the number of days based on the current configuration
lastday = monthrange(
int(self.start_yr.GetValue()), int(self.start_mon.GetValue())
)[1]
# check the current selection. Set it to the last day of the month
# if the current selection exceed the new last day
if int(self.start_day.GetValue()) > lastday:
self.start_day.SetValue(str(lastday))
# add days to fit monthrange
while self.start_day.GetCount() < lastday:
self.start_day.Append(str(self.start_day.GetCount()+1))
# remove days to fit monthrange
while self.start_day.GetCount() > lastday:
self.start_day.Delete(self.start_day.GetCount()-1)
evt.Skip()
def ChangeEndDayLimit(self, evt):
"""
Function to change the limit of the starting day selection
based on the selection of the year and the month
"""
# find the number of days based on the current configuration
lastday = monthrange(
int(self.end_yr.GetValue()), int(self.end_mon.GetValue())
)[1]
# check the current selection. Set it to the last day of the month
# if the current selection exceed the new last day
if int(self.end_day.GetValue()) > lastday:
self.end_day.SetValue(str(lastday))
# add days to fit monthrange
while self.end_day.GetCount() < lastday:
self.end_day.Append(str(self.end_day.GetCount()+1))
# remove days to fit monthrange
while self.end_day.GetCount() > lastday:
self.end_day.Delete(self.end_day.GetCount()-1)
evt.Skip()
def Analyzer(self, evt):
"""
Function to initiate the main analysis.
"""
# check all required inputs
# check the existence of the folder
if not isfile(self.dfpath.GetValue()):
wx.MessageBox(
u'Cannot open the data file!', u'Error',
wx.OK | wx.ICON_INFORMATION
)
return
# check the existence of the saving path
if not Path(dirname(self.newdfpath.GetValue())).exists():
box = wx.MessageDialog(
self, u'Saving directory does not exist!', u'Error',
wx.OK | wx.ICON_INFORMATION
)
box.Fit()
box.ShowModal()
return
# check file type
ext = get_ext(self.newdfpath.GetValue())
if not (ext == 'csv' or ext == 'xls' or ext == 'xlsx'):
box = wx.MessageDialog(
self, u'Output file type not supported!', u'Error',
wx.OK | wx.ICON_INFORMATION
)
box.Fit()
box.ShowModal()
return
# check the time
start_time = datetime(
int(self.start_yr.GetValue()), int(self.start_mon.GetValue()),
int(self.start_day.GetValue()), int(self.start_hr.GetValue()),
int(self.start_min.GetValue())
)
end_time = datetime(
int(self.end_yr.GetValue()), int(self.end_mon.GetValue()),
int(self.end_day.GetValue()), int(self.end_hr.GetValue()),
int(self.end_min.GetValue())
)
if not self.no_endtime.GetValue() and start_time > end_time:
wx.MessageBox(
u'Starting time later than ending time!', u'Error',
wx.OK | wx.ICON_INFORMATION
)
return
# Run the analyzer
# output any error to a message box if needed
try:
header_exist = self.header.GetValue()
datadfs = read_data(
self.dfpath.GetValue(),
header=(self.header_no.GetValue() if header_exist else None),
time_format=self.timestring.GetValue(),
sheetnames=(
[] if self.loadallsheets.GetValue() else (
None
if get_ext(self.dfpath.GetValue()) == 'csv'
else [self.sheetname.GetValue()]
)
), dateautodetect=self.autotimeinputformat.GetValue()
)
# return error if load all sheet option is selected for csv file
# output
if get_ext(self.newdfpath.GetValue()) == 'csv' and \
self.loadallsheets.GetValue() and \
len(datadfs) > 1:
wx.MessageBox(
u'\n'.join([
u'Cannot output multiple worksheets to a csv file!',
u'Please output it as a xls or xlsx file!'
]), u'Error',
wx.OK | wx.ICON_INFORMATION
)
return
# show warning for columns that contain no valid data
for sheet_name in datadfs:
datadf = datadfs[sheet_name]
for col in datadf.columns:
if all([
isinstance(x, str) or isnan(x)
for x in datadf.loc[:, col]
]):
dlg = MessageDlg(''.join([
'Column ', col, ' in ', sheet_name,
' does not contain any valid values.',
' Closing in 2s......'
]), u'Warning')
wx.CallLater(2000, dlg.Destroy)
dlg.ShowModal()
convert_df(
datadfs, (None if self.use_starttime.GetValue() else start_time),
(None if self.no_endtime.GetValue() else end_time),
interval=int(self.time_int.GetValue())*60,
step=(True if self.func_choice.GetSelection() == 0 else False),
ini_val=self.early_pts.GetSelection()+1,
output_file=self.newdfpath.GetValue(),
sep=self.output_sep.GetValue(),
output_timestring=self.outputtimestring.GetValue(),
outputtimevalue=self.numtimeoutput.GetValue()
)
# function to be called upon finishing processing
wx.CallLater(0, self.ShowMessage)
evt.Skip()
except PermissionError: # file writing error
dlg = MessageDlg(''.join([
'Unable to write to the file "', self.newdfpath.GetValue(),
'"\n\n',
'Please close the file/ stop using the file and press '
'the Preprocess button again.\n\n'
]), u'File writing error')
dlg.ShowModal()
except BaseException:
# box = wx.MessageDialog(
# self, format_exc(), u'Error', wx.OK | wx.ICON_INFORMATION
# )
chgdep = ErrorReportingDialog(None)
chgdep.ShowModal()
chgdep.Destroy()
return
def ShowMessage(self):
"""
Function to show message about the completion of the analysis
"""
wx.MessageBox(
u'Processing Completed', u'Status', wx.OK | wx.ICON_INFORMATION
)
class MessageDlg(wx.Dialog):
"""
Function for auto-closing message diaglog
from https://stackoverflow.com/questions/6012380/wxmessagebox-with-an-auto-close-timer-in-wxpython
"""
def __init__(self, message, title):
"""
Initailizing a new dialog box that can be closed automatically
"""
wx.Dialog.__init__(self, None, -1, title, size=(400, 120))
self.CenterOnScreen(wx.BOTH)
ok = wx.Button(self, wx.ID_OK, "OK")
ok.SetDefault()
text = wx.StaticText(self, -1, message)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(text, 1, wx.ALIGN_CENTER | wx.TOP, 10)
vbox.Add(ok, 1, wx.ALIGN_CENTER | wx.BOTTOM, 10)
self.SetSizer(vbox)
class ErrorReportingDialog(wx.Dialog):
"""
Error Diaglog box
from http://zetcode.com/wxpython/dialogs/
"""
def __init__(self, *args, **kw):
"""
Initializing the dialog box
"""
super(ErrorReportingDialog, self).__init__(*args, **kw)
self.InitUI()
self.SetSize((500, 400))
self.SetTitle(u'Error')
def InitUI(self):
"""
Interface of the error dialog box
"""
pnl = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
sb = wx.StaticBox(pnl, label=u''.join([
u'Process failed. Please report your situation with the following error messages:'
]))
sbs = wx.StaticBoxSizer(sb, orient=wx.VERTICAL)
sbs.Add(wx.TextCtrl(
pnl, value=format_exc(), size=(475, 400),
style=wx.TE_READONLY | wx.TE_MULTILINE
))
pnl.SetSizer(sbs)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
closeButton = wx.Button(self, label='Close')
hbox2.Add(closeButton, flag=wx.LEFT, border=5)
vbox.Add(pnl, proportion=1, flag=wx.ALL | wx.EXPAND, border=5)
vbox.Add(hbox2, flag=wx.ALIGN_CENTER | wx.TOP | wx.BOTTOM, border=10)
self.SetSizer(vbox)
closeButton.Bind(wx.EVT_BUTTON, self.OnClose)
def OnClose(self, e):
"""
Close the error dialog box
"""
self.Destroy()
# define functions
def gui_main():
"""
Main function to intiate the GUI
"""
app = wx.App()
MainFrame(None, title=u'Data Preprocessing Helper').Show()
app.MainLoop()
def get_ext(filepath: str) -> str:
"""
Return the extension of a file given a file path
Inputs:
==========
filepath: str
string character for a filepath
"""
return split(filepath)[1].split('.')[-1]
if __name__ == "__main__":
gui_main()
| gpl-3.0 |
h2educ/scikit-learn | sklearn/utils/multiclass.py | 45 | 12390 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
dougnd/matplotlib2tikz | test/testfunctions/text_overlay.py | 1 | 2562 | # -*- coding: utf-8 -*-
#
desc = 'Regular plot with overlay text'
# phash = '770b23744b93c68d'
phash = '370b233649d3f64c'
def plot():
from matplotlib import pyplot as pp
import numpy as np
fig = pp.figure()
xxx = np.linspace(0, 5)
yyy = xxx**2
pp.text(1, 5, 'test1', size=50, rotation=30.,
ha='center', va='bottom', color='r', style='italic',
weight='light',
bbox=dict(boxstyle='round, pad=0.2',
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
ls='dashdot'
)
)
pp.text(3, 6, 'test2', size=50, rotation=-30.,
ha='center', va='center', color='b', weight='bold',
bbox=dict(boxstyle='square',
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
pp.text(4, 8, 'test3', size=20, rotation=90.0,
ha='center', va='center', color='b', weight='demi',
bbox=dict(
boxstyle='rarrow',
ls='dashed',
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
pp.text(4, 16, 'test4', size=20, rotation=90.0,
ha='center', va='center', color='b', weight='heavy',
bbox=dict(
boxstyle='larrow',
ls='dotted',
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
pp.text(2, 18, 'test5', size=20,
ha='center', va='center', color='b',
bbox=dict(
boxstyle='darrow',
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
pp.text(1, 20, 'test6', size=20,
ha='center', va='center', color='b',
bbox=dict(
boxstyle='circle',
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
pp.text(3, 23, 'test7', size=20,
ha='center', va='center', color='b',
bbox=dict(
boxstyle='roundtooth',
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
pp.text(3, 20, 'test8', size=20,
ha='center', va='center', color='b',
bbox=dict(
boxstyle='sawtooth',
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
pp.plot(xxx, yyy, label='a graph')
pp.legend()
return fig
| mit |
Fireblend/scikit-learn | examples/plot_kernel_ridge_regression.py | 230 | 6222 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
arthur-gouveia/DAT210x | Module3/assignment3.py | 1 | 1172 | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
# Look pretty...
matplotlib.style.use('ggplot')
#
# TODO: Load up the Seeds Dataset into a Dataframe
# It's located at 'Datasets/wheat.data'
#
df = pd.read_csv('Datasets/wheat.data', index_col=0)
fig = plt.figure()
#
# TODO: Create a new 3D subplot using fig. Then use the
# subplot to graph a 3D scatter plot using the area,
# perimeter and asymmetry features. Be sure to use the
# optional display parameter c='red', and also label your
# axes
#
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('area')
ax.set_ylabel('perimeter')
ax.set_zlabel('asymmetry')
ax.scatter(df.area, df.perimeter, df.asymmetry, c='red')
#
# TODO: Create a new 3D subplot using fig. Then use the
# subplot to graph a 3D scatter plot using the width,
# groove and length features. Be sure to use the
# optional display parameter c='green', and also label your
# axes
#
fig2 = plt.figure()
ax = fig2.add_subplot(111, projection='3d')
ax.set_xlabel('width')
ax.set_ylabel('groove')
ax.set_zlabel('length')
ax.scatter(df.width, df.groove, df.length, c='red')
plt.show()
| mit |
blab/nextstrain-augur | builds/dengue/dengue_titers.py | 1 | 9995 | from builtins import range
def tree_additivity_symmetry(titer_model):
'''
The titer model makes two major assumptions:
1 - Once we correct for virus avidity and serum potency, titers are roughly symmetric
2 - Antigenic relationships evolve in a tree-like fashion
We can check the validity of these assumptions by plotting:
1 - titer symmetry (T(serum i,virus j) - vice versa)
2 - For random quartets of antigenic distances, how do the relative distances between the tips relate to one another?
If they're tree-like, we expect the largest - second largest distance to be roughly exponentially distributed.
How does this compare to quartets of values pulled randomly from a normal distribution?
Code adapted from https://github.com/blab/nextflu/blob/pnas-hi-titers/augur/src/diagnostic_figures.py#L314
'''
import numpy as np
from matplotlib import pyplot as plt
reciprocal_measurements = []
reciprocal_measurements_titers = []
for (testvir, serum) in titer_model.titers.titers_normalized:
tmp_recip = [v for v in titer_model.titers.titers_normalized if serum[0]==v[0] and testvir==v[1][0]]
for v in tmp_recip:
val_fwd = titer_model.titers.titers_normalized[(testvir,serum)]
val_bwd = titer_model.titers.titers_normalized[v]
date_fwd = titer_model.node_lookup[testvir].attr['num_date']
date_bwd = titer_model.node_lookup[serum[0]].attr['num_date']
diff_uncorrected = val_fwd - val_bwd
diff_corrected = (val_fwd - titer_model.serum_potency[serum] - titer_model.virus_effect[testvir])\
-(val_bwd - titer_model.serum_potency[v[1]] - titer_model.virus_effect[serum[0]])
val_bwd = titer_model.titers.titers_normalized[v]
reciprocal_measurements.append([testvir, serum, diff_uncorrected, diff_corrected, np.sign(date_fwd-date_bwd)])
reciprocal_measurements_titers.append([testvir, serum, val_fwd, val_bwd,
(val_fwd - titer_model.serum_potency[serum] - titer_model.virus_effect[testvir]),
(val_bwd - titer_model.serum_potency[v[1]] - titer_model.virus_effect[serum[0]]),
])
plt.figure(figsize=(9,6))
ax = plt.subplot(121)
# multiple the difference by the +/- one to polarize all comparisons by date
vals= [x[2]*x[-1] for x in reciprocal_measurements]
plt.hist(vals, alpha=0.7, label=r"Raw $T_{ij}-T_{ji}$", normed=True)
print("raw reciprocal titers: ", str(np.round(np.mean(vals),3))+'+/-'+str(np.round(np.std(vals),3)))
vals= [x[3]*x[-1] for x in reciprocal_measurements]
plt.hist(vals, alpha=0.7, label=r"Normalized $T_{ij}-T_{ji}$", normed=True)
print("normalized reciprocal titers: ", str(np.round(np.mean(vals),3))+'+/-'+str(np.round(np.std(vals),3)))
plt.xlabel('Titer asymmetry', fontsize=12)
ax.tick_params(axis='both', labelsize=12)
plt.legend(fontsize=12, handlelength=0.8)
plt.tight_layout()
#### Analyze all cliques #######################################################
all_reciprocal = list(set([v[1] for v in reciprocal_measurements_titers]))
import networkx as nx
from random import sample
G = nx.Graph()
G.add_nodes_from(all_reciprocal)
for vi,v in enumerate(all_reciprocal):
for w in all_reciprocal[:vi]:
if ((v[0], w) in titer_model.titers.titers_normalized) and ((w[0], v) in titer_model.titers.titers_normalized):
G.add_edge(v,w)
C = nx.find_cliques(G)
def symm_distance(v,w):
res = titer_model.titers.titers_normalized[(v[0], w)] - titer_model.virus_effect[v[0]] - titer_model.serum_potency[w]
res += titer_model.titers.titers_normalized[(w[0], v)] - titer_model.virus_effect[w[0]] - titer_model.serum_potency[v]
return res*0.5
additivity_test = {'test':[], 'control':[]}
n_quartets = 1000
for clique in C:
if len(clique)>8:
for i in range(n_quartets):
Q = sample(clique, 4)
dists = []
for (a,b) in [((0,1), (2,3)),((0,2), (1,3)), ((0,3), (1,2))]:
dists.append(symm_distance(Q[a[0]], Q[a[1]])+symm_distance(Q[b[0]], Q[b[1]]))
dists.sort(reverse=True)
additivity_test['test'].append(dists[0]-dists[1])
dists = []
for di in range(3):
a,b,c,d = sample(clique,4)
dists.append(symm_distance(a, b)+symm_distance(c,d))
dists.sort(reverse=True)
additivity_test['control'].append(dists[0]-dists[1])
ax=plt.subplot(122)
plt.hist(additivity_test['control'], alpha=0.7,normed=True, bins = np.linspace(0,3,18),
label = 'Control, mean='+str(np.round(np.mean(additivity_test['control']),2)))
plt.hist(additivity_test['test'], alpha=0.7,normed=True, bins = np.linspace(0,3,18),
label = 'Quartet, mean='+str(np.round(np.mean(additivity_test['test']),2)))
ax.tick_params(axis='both', labelsize=12)
plt.xlabel(r'$\Delta$ top two distance sums', fontsize = 12)
plt.legend(fontsize=12, handlelength=0.8)
plt.tight_layout()
plt.savefig('./processed/titer_asymmetry.png')
def titer_model(process, sanofi_strain = None, **kwargs):
'''
estimate a titer tree model using titers in titer_fname.
'''
from base.titer_model import TreeModel, SubstitutionModel
## TREE MODEL
process.titer_tree = TreeModel(process.tree.tree, process.titers, **kwargs)
if 'cross_validate' in kwargs:
assert kwargs['training_fraction'] < 1.0
process.cross_validation = process.titer_tree.cross_validate(n=kwargs['cross_validate'], **kwargs)
else:
process.titer_tree.prepare(**kwargs) # make training set, find subtree with titer measurements, and make_treegraph
process.titer_tree.train(**kwargs) # pick longest branch on path between each (test, ref) pair, assign titer drops to this branch
# then calculate a cumulative antigenic evolution score for each node
if kwargs['training_fraction'] != 1.0:
process.titer_tree.validate(kwargs)
# add attributes for the estimated branch-specific titer drop values (dTiter)
# and cumulative (from root) titer drop values (cTiter) to each branch
for n in process.tree.tree.find_clades():
n.attr['cTiter'] = n.cTiter
n.attr['dTiter'] = n.dTiter
if sanofi_strain: # calculate antigenic distance from vaccine strain for each serotype-specific build
# find the vaccine strain in the tree
sanofi_tip = [i for i in process.tree.tree.find_clades() if i.name==sanofi_strain][0]
# sum up dTiter on all the branches on the path between the vaccine strain and each other strain
for tip in process.tree.tree.find_clades():
if tip == sanofi_tip:
tip.attr['dTiter_sanofi']=0.00
else:
trace = process.tree.tree.trace(tip, sanofi_tip)
trace_dTiter = sum([i.dTiter for i in trace])
tip.attr['dTiter_sanofi']= round(trace_dTiter, 2)
# export for auspice visualization
process.config["auspice"]["color_options"]["dTiter_sanofi"] = {
"menuItem": "antigenic dist. from vaccine", "type": "continuous", "legendTitle": "log2 titer distance from sanofi vaccine strain",
"key": "vaccine_dTiter", "vmin": "0.0", "vmax": "2.0"}
else: # export cumulative distance for auspice vis. of the all-serotype build
process.config["auspice"]["color_options"]["cTiter"] = {
"menuItem": "antigenic dist. from root", "type": "continuous", "legendTitle": "log2 titer distance from root",
"key": "cTiter", "vmin": "0.0", "vmax": "2.0"}
if kwargs['plot_symmetry'] == True:
tree_additivity_symmetry(process.titer_tree)
def titer_export(process):
from base.io_util import write_json
from itertools import chain
import pandas as pd
prefix = process.config["output"]["auspice"]+'/'+process.info["prefix"]+'_'
if hasattr(process, 'titer_tree'):
# export the raw titers
data = process.titer_tree.compile_titers()
write_json(data, prefix+'titers.json', indent=1)
# export the tree model
tree_model = {'potency':process.titer_tree.compile_potencies(),
'avidity':process.titer_tree.compile_virus_effects(),
'dTiter':{n.clade:n.dTiter for n in process.tree.tree.find_clades() if n.dTiter>1e-6}}
write_json(tree_model, prefix+'tree_model.json')
# export model performance on test set
if hasattr(process.titer_tree, 'cross_validation'):
predicted_values = list(chain.from_iterable([iteration.pop('values') for iteration in process.titer_tree.cross_validation ])) # flatten to one list of (actual, predicted) tuples
predicted_values = pd.DataFrame(predicted_values, columns=['actual', 'predicted']) # cast to df so we can easily write to csv
model_performance = pd.DataFrame(process.titer_tree.cross_validation) # list of dictionaries -> df
predicted_values.to_csv(prefix+'predicted_titers.csv', index=False)
model_performance.to_csv(prefix+'titer_model_performance.csv', index=False)
elif hasattr(process.titer_tree.hasattr, 'validation'):
predicted_values = pd.DataFrame(process.titer_tree.validation.pop('values'), columns=['actual', 'predicted'])
model_performance = pd.DataFrame(process.titer_tree.validation)
predicted_values.to_csv(prefix+'predicted_titers.csv', index=False)
model_performance.to_csv(prefix+'titer_model_performance.csv', index=False)
else:
print('Tree model not yet trained')
| agpl-3.0 |
vidartf/hyperspy | hyperspy/drawing/_markers/vertical_line_segment.py | 1 | 3293 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from hyperspy.drawing.marker import MarkerBase
class VerticalLineSegment(MarkerBase):
"""Vertical line segment marker that can be added to the signal figure
Parameters
---------
x: array or float
The position of line segment in x.
If float, the marker is fixed.
If array, the marker will be updated when navigating. The array should
have the same dimensions in the nagivation axes.
y1: array or float
The position of the start of the line segment in x.
see x1 arguments
y2: array or float
The position of the start of the line segment in y.
see x1 arguments
kwargs:
Kewywords argument of axvline valid properties (i.e. recognized by
mpl.plot).
Example
-------
>>> import numpy as np
>>> im = hs.signals.Signal2D(np.zeros((100, 100)))
>>> m = hs.plot.markers.vertical_line_segment(
>>> x=20, y1=30, y2=70, linewidth=4, color='red', linestyle='dotted')
>>> im.add_marker(m)
"""
def __init__(self, x, y1, y2, **kwargs):
MarkerBase.__init__(self)
lp = {'color': 'black', 'linewidth': 1}
self.marker_properties = lp
self.set_data(x1=x, y1=y1, y2=y2)
self.set_marker_properties(**kwargs)
def update(self):
if self.auto_update is False:
return
self._update_segment()
def plot(self):
if self.ax is None:
raise AttributeError(
"To use this method the marker needs to be first add to a " +
"figure using `s._plot.signal_plot.add_marker(m)` or " +
"`s._plot.navigator_plot.add_marker(m)`")
self.marker = self.ax.vlines(0, 0, 1, **self.marker_properties)
self._update_segment()
self.marker.set_animated(True)
try:
self.ax.hspy_fig._draw_animated()
except:
pass
def _update_segment(self):
segments = self.marker.get_segments()
segments[0][0, 0] = self.get_data_position('x1')
segments[0][1, 0] = segments[0][0, 0]
if self.get_data_position('y1') is None:
segments[0][0, 1] = plt.getp(self.marker.axes, 'ylim')[0]
else:
segments[0][0, 1] = self.get_data_position('y1')
if self.get_data_position('y2') is None:
segments[0][1, 1] = plt.getp(self.marker.axes, 'ylim')[1]
else:
segments[0][1, 1] = self.get_data_position('y2')
self.marker.set_segments(segments)
| gpl-3.0 |
gfyoung/pandas | pandas/tests/frame/methods/test_update.py | 4 | 4259 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
class TestDataFrameUpdate:
def test_update_nan(self):
# #15593 #15617
# test 1
df1 = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
df2 = DataFrame({"A": [None, 2, 3]})
expected = df1.copy()
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
# test 2
df1 = DataFrame({"A": [1.0, None, 3], "B": date_range("2000", periods=3)})
df2 = DataFrame({"A": [None, 2, 3]})
expected = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
def test_update(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = DataFrame(
[[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
tm.assert_frame_equal(df, expected)
def test_update_dtypes(self):
# gh 3016
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
df.update(other)
expected = DataFrame(
[[45.0, 45.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
tm.assert_frame_equal(df, expected)
def test_update_nooverwrite(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other, overwrite=False)
expected = DataFrame(
[[1.5, np.nan, 3], [1.5, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 3.0]]
)
tm.assert_frame_equal(df, expected)
def test_update_filtered(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other, filter_func=lambda x: x > 2)
expected = DataFrame(
[[1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"bad_kwarg, exception, msg",
[
# errors must be 'ignore' or 'raise'
({"errors": "something"}, ValueError, "The parameter errors must.*"),
({"join": "inner"}, NotImplementedError, "Only left join is supported"),
],
)
def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg):
df = DataFrame([[1.5, 1, 3.0]])
with pytest.raises(exception, match=msg):
df.update(df, **bad_kwarg)
def test_update_raise_on_overlap(self):
df = DataFrame(
[[1.5, 1, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[2.0, np.nan], [np.nan, 7]], index=[1, 3], columns=[1, 2])
with pytest.raises(ValueError, match="Data overlaps"):
df.update(other, errors="raise")
def test_update_from_non_df(self):
d = {"a": Series([1, 2, 3, 4]), "b": Series([5, 6, 7, 8])}
df = DataFrame(d)
d["a"] = Series([5, 6, 7, 8])
df.update(d)
expected = DataFrame(d)
tm.assert_frame_equal(df, expected)
d = {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}
df = DataFrame(d)
d["a"] = [5, 6, 7, 8]
df.update(d)
expected = DataFrame(d)
tm.assert_frame_equal(df, expected)
def test_update_datetime_tz(self):
# GH 25807
result = DataFrame([pd.Timestamp("2019", tz="UTC")])
result.update(result)
expected = DataFrame([pd.Timestamp("2019", tz="UTC")])
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
sinkap/trappy | tests/test_stats.py | 1 | 11936 | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from trappy.stats.Topology import Topology
from trappy.stats.Trigger import Trigger
from trappy.stats.Aggregator import MultiTriggerAggregator
import collections
import trappy
from trappy.base import Base
import pandas as pd
from pandas.util.testing import assert_series_equal
class TestTopology(unittest.TestCase):
def test_add_to_level(self):
"""Test level creation"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
check_groups = topology.get_level(level)
self.assertTrue(topology.has_level(level))
self.assertEqual(level_groups, check_groups)
def test_flatten(self):
"""Test Topology: flatten"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
flattened = [0, 1, 2, 3, 4, 5]
self.assertEqual(flattened, topology.flatten())
def test_cpu_topology_construction(self):
"""Test CPU Topology Construction"""
cluster_0 = [0, 3, 4, 5]
cluster_1 = [1, 2]
clusters = [cluster_0, cluster_1]
topology = Topology(clusters=clusters)
# Check cluster level creation
cluster_groups = [[0, 3, 4, 5], [1, 2]]
self.assertTrue(topology.has_level("cluster"))
self.assertEqual(cluster_groups, topology.get_level("cluster"))
# Check cpu level creation
cpu_groups = [[0], [1], [2], [3], [4], [5]]
self.assertTrue(topology.has_level("cpu"))
self.assertEqual(cpu_groups, topology.get_level("cpu"))
# Check "all" level
all_groups = [[0, 1, 2, 3, 4, 5]]
self.assertEqual(all_groups, topology.get_level("all"))
def test_level_span(self):
"""TestTopology: level_span"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
self.assertEqual(topology.level_span(level), 2)
def test_group_index(self):
"""TestTopology: get_index"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
self.assertEqual(topology.get_index(level, [1, 2]), 0)
self.assertEqual(topology.get_index(level, [0, 3, 4, 5]), 1)
class BaseTestStats(unittest.TestCase):
def setUp(self):
trace = trappy.BareTrace()
data = {
"identifier": [
0,
0,
0,
1,
1,
1,
],
"result": [
"fire",
"blank",
"fire",
"blank",
"fire",
"blank",
],
}
index = pd.Series([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], name="Time")
data_frame = pd.DataFrame(data, index=index)
trace.add_parsed_event("aim_and_fire", data_frame)
self._trace = trace
self.topology = Topology(clusters=[[0], [1]])
class TestTrigger(BaseTestStats):
def test_trigger_generation(self):
"""TestTrigger: generate"""
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger = Trigger(self._trace,
event_class,
filters,
value,
pivot)
expected = pd.Series([1, 1], index=pd.Index([0.1, 0.3], name="Time"))
assert_series_equal(expected, trigger.generate(0))
expected = pd.Series([1], index=pd.Index([0.5], name="Time"))
assert_series_equal(expected, trigger.generate(1))
def test_trigger_with_func(self):
"""Trigger works with a function or lambda as filter"""
def my_filter(val):
return val.startswith("fi")
trigger = Trigger(self._trace, self._trace.aim_and_fire,
filters={"result": my_filter}, value=1,
pivot="identifier")
expected = pd.Series([1], index=pd.Index([0.5], name="Time"))
assert_series_equal(expected, trigger.generate(1))
my_filters = {"result": lambda x: x.startswith("bl")}
trigger = Trigger(self._trace, self._trace.aim_and_fire,
filters=my_filters, value=1, pivot="identifier")
expected = pd.Series([1, 1], index=pd.Index([0.4, 0.6], name="Time"))
assert_series_equal(expected, trigger.generate(1))
def test_trigger_with_callable_class(self):
"""Trigger works with a callable class as filter"""
class my_filter(object):
def __init__(self, val_out):
self.prev_val = 0
self.val_out = val_out
def __call__(self, val):
ret = self.prev_val == self.val_out
self.prev_val = val
return ret
trigger = Trigger(self._trace, self._trace.aim_and_fire,
filters={"identifier": my_filter(1)}, value=1,
pivot="result")
expected = pd.Series([1], index=pd.Index([0.6], name="Time"))
assert_series_equal(expected, trigger.generate("blank"))
def test_filter_prev_values(self):
"""Trigger works with a filter that depends on previous values of the same pivot"""
# We generate an example in which we want a trigger whenever the
# identifier is no longer 1 for blank
class my_filter(object):
def __init__(self, val_out):
self.prev_val = 0
self.val_out = val_out
def __call__(self, val):
ret = self.prev_val == self.val_out
self.prev_val = val
return ret
trace = trappy.BareTrace()
data = collections.OrderedDict([
(0.1, ["blank", 1]),
(0.2, ["fire", 1]),
(0.3, ["blank", 0]), # value is no longer 1, trigger
(0.4, ["blank", 1]),
(0.5, ["fire", 0]), # This should NOT trigger
(0.6, ["blank", 0]), # value is no longer 1 for blank, trigger
])
data_frame = pd.DataFrame.from_dict(data, orient="index", )
data_frame.columns = ["result", "identifier"]
trace.add_parsed_event("aim_and_fire", data_frame)
trigger = Trigger(trace, trace.aim_and_fire,
filters={"identifier": my_filter(1)}, value=-1,
pivot="result")
expected = pd.Series([-1, -1], index=[0.3, 0.6])
assert_series_equal(expected, trigger.generate("blank"))
class TestAggregator(BaseTestStats):
def test_scalar_aggfunc_single_trigger(self):
"""TestAggregator: 1 trigger scalar aggfunc"""
def aggfunc(series):
return series.sum()
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger = Trigger(self._trace,
event_class,
filters,
value,
pivot)
aggregator = MultiTriggerAggregator([trigger],
self.topology,
aggfunc=aggfunc)
# There are three "fire" in total
# The all level in topology looks like
# [[0, 1]]
result = aggregator.aggregate(level="all")
self.assertEqual(result, [3.0])
# There are two "fire" on the first node group and a
# a single "fire" on the second node group at the cluster
# level which looks like
# [[0], [1]]
result = aggregator.aggregate(level="cluster")
self.assertEqual(result, [2.0, 1.0])
def test_vector_aggfunc_single_trigger(self):
"""TestAggregator: 1 trigger vector aggfunc"""
def aggfunc(series):
return series.cumsum()
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger = Trigger(self._trace, event_class, filters, value, pivot)
aggregator = MultiTriggerAggregator([trigger],
self.topology,
aggfunc=aggfunc)
# There are three "fire" in total
# The all level in topology looks like
# [[0, 1]]
result = aggregator.aggregate(level="all")
expected_result = pd.Series([1.0, 1.0, 2.0, 2.0, 3.0, 3.0],
index=pd.Index([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
)
assert_series_equal(result[0], expected_result)
def test_vector_aggfunc_multiple_trigger(self):
"""TestAggregator: multi trigger vector aggfunc"""
def aggfunc(series):
return series.cumsum()
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger_fire = Trigger(self._trace,
event_class,
filters,
value,
pivot)
filters = {
"result": "blank"
}
value = -1
trigger_blank = Trigger(self._trace, event_class, filters, value,
pivot)
aggregator = MultiTriggerAggregator([trigger_fire, trigger_blank],
self.topology,
aggfunc=aggfunc)
# There are three "fire" in total
# The all level in topology looks like
# [[0, 1]]
result = aggregator.aggregate(level="all")
expected_result = pd.Series([1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
index=pd.Index([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
)
assert_series_equal(result[0], expected_result)
def test_default_aggfunc_multiple_trigger(self):
"""MultiTriggerAggregator with the default aggfunc"""
trigger_fire = Trigger(self._trace, self._trace.aim_and_fire,
filters={"result": "fire"},
pivot="identifier", value=1)
trigger_blank = Trigger(self._trace, self._trace.aim_and_fire,
filters={"result": "blank"},
pivot="identifier", value=2)
aggregator = MultiTriggerAggregator([trigger_fire, trigger_blank],
self.topology)
results = aggregator.aggregate(level="cpu")
expected_results = [
pd.Series([1., 2., 1., 0., 0., 0.],
index=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6]),
pd.Series([0., 0., 0., 2., 1., 2.],
index=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6]),
]
self.assertEquals(len(results), len(expected_results))
for result, expected in zip(results, expected_results):
assert_series_equal(result, expected)
| apache-2.0 |
kiae-grid/panda-bigmon-core | core/common/models.py | 1 | 138277 | # Create your models here.
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from __future__ import unicode_literals
from ..pandajob.columns_config import COLUMNS, ORDER_COLUMNS, COL_TITLES, FILTERS
from django.db import models
models.options.DEFAULT_NAMES += ('allColumns', 'orderColumns', \
'primaryColumns', 'secondaryColumns', \
'columnTitles', 'filterFields',)
class Cache(models.Model):
type = models.CharField(db_column='TYPE', max_length=250)
value = models.CharField(db_column='VALUE', max_length=250)
qurl = models.CharField(db_column='QURL', max_length=250)
modtime = models.DateTimeField(db_column='MODTIME')
usetime = models.DateTimeField(db_column='USETIME')
updmin = models.IntegerField(null=True, db_column='UPDMIN', blank=True)
data = models.TextField(db_column='DATA', blank=True)
class Meta:
db_table = u'cache'
class Certificates(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
cert = models.CharField(max_length=12000, db_column='CERT')
class Meta:
db_table = u'certificates'
class Classlist(models.Model):
class_field = models.CharField(max_length=90, db_column='CLASS', primary_key=True) # Field renamed because it was a Python reserved word.
name = models.CharField(max_length=180, db_column='NAME', primary_key=True)
rights = models.CharField(max_length=90, db_column='RIGHTS')
priority = models.IntegerField(null=True, db_column='PRIORITY', blank=True)
quota1 = models.BigIntegerField(null=True, db_column='QUOTA1', blank=True)
quota7 = models.BigIntegerField(null=True, db_column='QUOTA7', blank=True)
quota30 = models.BigIntegerField(null=True, db_column='QUOTA30', blank=True)
class Meta:
db_table = u'classlist'
unique_together = ('class_field', 'name')
class Cloudconfig(models.Model):
name = models.CharField(max_length=60, primary_key=True, db_column='NAME')
description = models.CharField(max_length=150, db_column='DESCRIPTION')
tier1 = models.CharField(max_length=60, db_column='TIER1')
tier1se = models.CharField(max_length=1200, db_column='TIER1SE')
relocation = models.CharField(max_length=30, db_column='RELOCATION', blank=True)
weight = models.IntegerField(db_column='WEIGHT')
server = models.CharField(max_length=300, db_column='SERVER')
status = models.CharField(max_length=60, db_column='STATUS')
transtimelo = models.IntegerField(db_column='TRANSTIMELO')
transtimehi = models.IntegerField(db_column='TRANSTIMEHI')
waittime = models.IntegerField(db_column='WAITTIME')
comment_field = models.CharField(max_length=600, db_column='COMMENT_', blank=True) # Field renamed because it was a Python reserved word.
space = models.IntegerField(db_column='SPACE')
moduser = models.CharField(max_length=90, db_column='MODUSER', blank=True)
modtime = models.DateTimeField(db_column='MODTIME')
validation = models.CharField(max_length=60, db_column='VALIDATION', blank=True)
mcshare = models.IntegerField(db_column='MCSHARE')
countries = models.CharField(max_length=240, db_column='COUNTRIES', blank=True)
fasttrack = models.CharField(max_length=60, db_column='FASTTRACK', blank=True)
nprestage = models.BigIntegerField(db_column='NPRESTAGE')
pilotowners = models.CharField(max_length=900, db_column='PILOTOWNERS', blank=True)
dn = models.CharField(max_length=300, db_column='DN', blank=True)
email = models.CharField(max_length=180, db_column='EMAIL', blank=True)
fairshare = models.CharField(max_length=384, db_column='FAIRSHARE', blank=True)
class Meta:
db_table = u'cloudconfig'
class Cloudspace(models.Model):
cloud = models.CharField(max_length=60, db_column='CLOUD', primary_key=True)
store = models.CharField(max_length=150, db_column='STORE', primary_key=True)
space = models.IntegerField(db_column='SPACE')
freespace = models.IntegerField(db_column='FREESPACE')
moduser = models.CharField(max_length=90, db_column='MODUSER')
modtime = models.DateTimeField(db_column='MODTIME')
class Meta:
db_table = u'cloudspace'
unique_together = ('cloud', 'store')
class Cloudtasks(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
taskname = models.CharField(max_length=384, db_column='TASKNAME', blank=True)
taskid = models.IntegerField(null=True, db_column='TASKID', blank=True)
cloud = models.CharField(max_length=60, db_column='CLOUD', blank=True)
status = models.CharField(max_length=60, db_column='STATUS', blank=True)
tmod = models.DateTimeField(db_column='TMOD')
tenter = models.DateTimeField(db_column='TENTER')
class Meta:
db_table = u'cloudtasks'
class Datasets(models.Model):
vuid = models.CharField(max_length=120, db_column='VUID', primary_key=True)
name = models.CharField(max_length=765, db_column='NAME')
version = models.CharField(max_length=30, db_column='VERSION', blank=True)
type = models.CharField(max_length=60, db_column='TYPE')
status = models.CharField(max_length=30, db_column='STATUS', blank=True)
numberfiles = models.IntegerField(null=True, db_column='NUMBERFILES', blank=True)
currentfiles = models.IntegerField(null=True, db_column='CURRENTFILES', blank=True)
creationdate = models.DateTimeField(null=True, db_column='CREATIONDATE', blank=True)
modificationdate = models.DateTimeField(db_column='MODIFICATIONDATE', primary_key=True)
moverid = models.BigIntegerField(db_column='MOVERID')
transferstatus = models.IntegerField(db_column='TRANSFERSTATUS')
subtype = models.CharField(max_length=15, db_column='SUBTYPE', blank=True)
class Meta:
db_table = u'datasets'
unique_together = ('vuid', 'modificationdate')
class DeftDataset(models.Model):
dataset_id = models.CharField(db_column='DATASET_ID', primary_key=True, max_length=255)
dataset_meta = models.BigIntegerField(db_column='DATASET_META', blank=True, null=True)
dataset_state = models.CharField(db_column='DATASET_STATE', max_length=16, blank=True)
dataset_source = models.BigIntegerField(db_column='DATASET_SOURCE', blank=True, null=True)
dataset_target = models.BigIntegerField(db_column='DATASET_TARGET', blank=True, null=True)
dataset_comment = models.CharField(db_column='DATASET_COMMENT', max_length=128, blank=True)
class Meta:
managed = False
db_table = 'deft_dataset'
class DeftMeta(models.Model):
meta_id = models.BigIntegerField(primary_key=True, db_column='META_ID')
meta_state = models.CharField(max_length=48, db_column='META_STATE', blank=True)
meta_comment = models.CharField(max_length=384, db_column='META_COMMENT', blank=True)
meta_req_ts = models.DateTimeField(null=True, db_column='META_REQ_TS', blank=True)
meta_upd_ts = models.DateTimeField(null=True, db_column='META_UPD_TS', blank=True)
meta_requestor = models.CharField(max_length=48, db_column='META_REQUESTOR', blank=True)
meta_manager = models.CharField(max_length=48, db_column='META_MANAGER', blank=True)
meta_vo = models.CharField(max_length=48, db_column='META_VO', blank=True)
class Meta:
db_table = u'deft_meta'
class DeftTask(models.Model):
task_id = models.BigIntegerField(primary_key=True, db_column='TASK_ID')
task_meta = models.BigIntegerField(null=True, db_column='TASK_META', blank=True)
task_state = models.CharField(max_length=48, db_column='TASK_STATE', blank=True)
task_param = models.TextField(db_column='TASK_PARAM', blank=True)
task_tag = models.CharField(max_length=48, db_column='TASK_TAG', blank=True)
task_comment = models.CharField(max_length=384, db_column='TASK_COMMENT', blank=True)
task_vo = models.CharField(max_length=48, db_column='TASK_VO', blank=True)
task_transpath = models.CharField(max_length=384, db_column='TASK_TRANSPATH', blank=True)
class Meta:
db_table = u'deft_task'
class Dslist(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
duid = models.CharField(max_length=120, db_column='DUID', blank=True)
name = models.CharField(max_length=600, db_column='NAME')
ugid = models.IntegerField(null=True, db_column='UGID', blank=True)
priority = models.IntegerField(null=True, db_column='PRIORITY', blank=True)
status = models.CharField(max_length=30, db_column='STATUS', blank=True)
lastuse = models.DateTimeField(db_column='LASTUSE')
pinstate = models.CharField(max_length=30, db_column='PINSTATE', blank=True)
pintime = models.DateTimeField(db_column='PINTIME')
lifetime = models.DateTimeField(db_column='LIFETIME')
site = models.CharField(max_length=180, db_column='SITE', blank=True)
par1 = models.CharField(max_length=90, db_column='PAR1', blank=True)
par2 = models.CharField(max_length=90, db_column='PAR2', blank=True)
par3 = models.CharField(max_length=90, db_column='PAR3', blank=True)
par4 = models.CharField(max_length=90, db_column='PAR4', blank=True)
par5 = models.CharField(max_length=90, db_column='PAR5', blank=True)
par6 = models.CharField(max_length=90, db_column='PAR6', blank=True)
class Meta:
db_table = u'dslist'
class Etask(models.Model):
taskid = models.IntegerField(primary_key=True, db_column='TASKID')
creationtime = models.DateTimeField(db_column='CREATIONTIME')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME')
taskname = models.CharField(max_length=768, db_column='TASKNAME', blank=True)
status = models.CharField(max_length=384, db_column='STATUS', blank=True)
username = models.CharField(max_length=768, db_column='USERNAME', blank=True)
usergroup = models.CharField(max_length=96, db_column='USERGROUP', blank=True)
userrole = models.CharField(max_length=96, db_column='USERROLE', blank=True)
actualpars = models.CharField(max_length=6000, db_column='ACTUALPARS', blank=True)
cpucount = models.IntegerField(db_column='CPUCOUNT', blank=True)
cpuunit = models.CharField(max_length=96, db_column='CPUUNIT', blank=True)
diskcount = models.IntegerField(db_column='DISKCOUNT', blank=True)
diskunit = models.CharField(max_length=96, db_column='DISKUNIT', blank=True)
ramcount = models.IntegerField(db_column='RAMCOUNT', blank=True)
ramunit = models.CharField(max_length=96, db_column='RAMUNIT', blank=True)
outip = models.CharField(max_length=9, db_column='OUTIP', blank=True)
tasktype = models.CharField(max_length=96, db_column='TASKTYPE', blank=True)
grid = models.CharField(max_length=96, db_column='GRID', blank=True)
transfk = models.IntegerField(db_column='TRANSFK', blank=True)
transuses = models.CharField(max_length=768, db_column='TRANSUSES', blank=True)
transhome = models.CharField(max_length=768, db_column='TRANSHOME', blank=True)
transpath = models.CharField(max_length=768, db_column='TRANSPATH', blank=True)
transformalpars = models.CharField(max_length=768, db_column='TRANSFORMALPARS', blank=True)
tier = models.CharField(max_length=36, db_column='TIER', blank=True)
ndone = models.IntegerField(db_column='NDONE', blank=True)
ntotal = models.IntegerField(db_column='NTOTAL', blank=True)
nevents = models.BigIntegerField(db_column='NEVENTS', blank=True)
relpriority = models.CharField(max_length=30, db_column='RELPRIORITY', blank=True)
expevtperjob = models.BigIntegerField(db_column='EXPEVTPERJOB', blank=True)
tasktransinfo = models.CharField(max_length=1536, db_column='TASKTRANSINFO', blank=True)
extid1 = models.BigIntegerField(db_column='EXTID1', blank=True)
reqid = models.BigIntegerField(db_column='REQID', blank=True)
expntotal = models.BigIntegerField(db_column='EXPNTOTAL', blank=True)
cmtconfig = models.CharField(max_length=768, db_column='CMTCONFIG', blank=True)
site = models.CharField(max_length=384, db_column='SITE', blank=True)
tasktype2 = models.CharField(max_length=192, db_column='TASKTYPE2', blank=True)
taskpriority = models.IntegerField(db_column='TASKPRIORITY', blank=True)
partid = models.CharField(max_length=192, db_column='PARTID', blank=True)
taskpars = models.CharField(max_length=3072, db_column='TASKPARS', blank=True)
fillstatus = models.CharField(max_length=192, db_column='FILLSTATUS', blank=True)
rw = models.BigIntegerField(db_column='RW', blank=True)
jobsremaining = models.BigIntegerField(db_column='JOBSREMAINING', blank=True)
cpuperjob = models.IntegerField(db_column='CPUPERJOB', blank=True)
class Meta:
db_table = u'etask'
class Filestable4(models.Model):
row_id = models.BigIntegerField(db_column='ROW_ID', primary_key=True)
pandaid = models.BigIntegerField(db_column='PANDAID')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME', primary_key=True)
guid = models.CharField(max_length=192, db_column='GUID', blank=True)
lfn = models.CharField(max_length=768, db_column='LFN', blank=True)
type = models.CharField(max_length=60, db_column='TYPE', blank=True)
dataset = models.CharField(max_length=765, db_column='DATASET', blank=True)
status = models.CharField(max_length=192, db_column='STATUS', blank=True)
proddblock = models.CharField(max_length=765, db_column='PRODDBLOCK', blank=True)
proddblocktoken = models.CharField(max_length=750, db_column='PRODDBLOCKTOKEN', blank=True)
dispatchdblock = models.CharField(max_length=765, db_column='DISPATCHDBLOCK', blank=True)
dispatchdblocktoken = models.CharField(max_length=750, db_column='DISPATCHDBLOCKTOKEN', blank=True)
destinationdblock = models.CharField(max_length=765, db_column='DESTINATIONDBLOCK', blank=True)
destinationdblocktoken = models.CharField(max_length=750, db_column='DESTINATIONDBLOCKTOKEN', blank=True)
destinationse = models.CharField(max_length=750, db_column='DESTINATIONSE', blank=True)
fsize = models.BigIntegerField(db_column='FSIZE')
md5sum = models.CharField(max_length=108, db_column='MD5SUM', blank=True)
checksum = models.CharField(max_length=108, db_column='CHECKSUM', blank=True)
scope = models.CharField(max_length=90, db_column='SCOPE', blank=True)
jeditaskid = models.BigIntegerField(null=True, db_column='JEDITASKID', blank=True)
datasetid = models.BigIntegerField(null=True, db_column='DATASETID', blank=True)
fileid = models.BigIntegerField(null=True, db_column='FILEID', blank=True)
attemptnr = models.IntegerField(null=True, db_column='ATTEMPTNR', blank=True)
class Meta:
db_table = u'filestable4'
unique_together = ('row_id', 'modificationtime')
class FilestableArch(models.Model):
row_id = models.BigIntegerField(db_column='ROW_ID', primary_key=True)
pandaid = models.BigIntegerField(db_column='PANDAID')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME', primary_key=True)
creationtime = models.DateTimeField(db_column='CREATIONTIME')
guid = models.CharField(max_length=64, db_column='GUID', blank=True)
lfn = models.CharField(max_length=256, db_column='LFN', blank=True)
type = models.CharField(max_length=20, db_column='TYPE', blank=True)
dataset = models.CharField(max_length=255, db_column='DATASET', blank=True)
status = models.CharField(max_length=64, db_column='STATUS', blank=True)
proddblock = models.CharField(max_length=255, db_column='PRODDBLOCK', blank=True)
proddblocktoken = models.CharField(max_length=250, db_column='PRODDBLOCKTOKEN', blank=True)
dispatchdblock = models.CharField(max_length=265, db_column='DISPATCHDBLOCK', blank=True)
dispatchdblocktoken = models.CharField(max_length=250, db_column='DISPATCHDBLOCKTOKEN', blank=True)
destinationdblock = models.CharField(max_length=265, db_column='DESTINATIONDBLOCK', blank=True)
destinationdblocktoken = models.CharField(max_length=250, db_column='DESTINATIONDBLOCKTOKEN', blank=True)
destinationse = models.CharField(max_length=250, db_column='DESTINATIONSE', blank=True)
fsize = models.BigIntegerField(db_column='FSIZE')
md5sum = models.CharField(max_length=40, db_column='MD5SUM', blank=True)
checksum = models.CharField(max_length=40, db_column='CHECKSUM', blank=True)
scope = models.CharField(max_length=30, db_column='SCOPE', blank=True)
jeditaskid = models.BigIntegerField(null=True, db_column='JEDITASKID', blank=True)
datasetid = models.BigIntegerField(null=True, db_column='DATASETID', blank=True)
fileid = models.BigIntegerField(null=True, db_column='FILEID', blank=True)
attemptnr = models.IntegerField(null=True, db_column='ATTEMPTNR', blank=True)
class Meta:
db_table = u'filestable_arch'
unique_together = ('row_id', 'modificationtime')
class Groups(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
name = models.CharField(max_length=180, db_column='NAME')
description = models.CharField(max_length=360, db_column='DESCRIPTION')
url = models.CharField(max_length=300, db_column='URL', blank=True)
classa = models.CharField(max_length=90, db_column='CLASSA', blank=True)
classp = models.CharField(max_length=90, db_column='CLASSP', blank=True)
classxp = models.CharField(max_length=90, db_column='CLASSXP', blank=True)
njobs1 = models.IntegerField(null=True, db_column='NJOBS1', blank=True)
njobs7 = models.IntegerField(null=True, db_column='NJOBS7', blank=True)
njobs30 = models.IntegerField(null=True, db_column='NJOBS30', blank=True)
cpua1 = models.BigIntegerField(null=True, db_column='CPUA1', blank=True)
cpua7 = models.BigIntegerField(null=True, db_column='CPUA7', blank=True)
cpua30 = models.BigIntegerField(null=True, db_column='CPUA30', blank=True)
cpup1 = models.BigIntegerField(null=True, db_column='CPUP1', blank=True)
cpup7 = models.BigIntegerField(null=True, db_column='CPUP7', blank=True)
cpup30 = models.BigIntegerField(null=True, db_column='CPUP30', blank=True)
cpuxp1 = models.BigIntegerField(null=True, db_column='CPUXP1', blank=True)
cpuxp7 = models.BigIntegerField(null=True, db_column='CPUXP7', blank=True)
cpuxp30 = models.BigIntegerField(null=True, db_column='CPUXP30', blank=True)
allcpua1 = models.BigIntegerField(null=True, db_column='ALLCPUA1', blank=True)
allcpua7 = models.BigIntegerField(null=True, db_column='ALLCPUA7', blank=True)
allcpua30 = models.BigIntegerField(null=True, db_column='ALLCPUA30', blank=True)
allcpup1 = models.BigIntegerField(null=True, db_column='ALLCPUP1', blank=True)
allcpup7 = models.BigIntegerField(null=True, db_column='ALLCPUP7', blank=True)
allcpup30 = models.BigIntegerField(null=True, db_column='ALLCPUP30', blank=True)
allcpuxp1 = models.BigIntegerField(null=True, db_column='ALLCPUXP1', blank=True)
allcpuxp7 = models.BigIntegerField(null=True, db_column='ALLCPUXP7', blank=True)
allcpuxp30 = models.BigIntegerField(null=True, db_column='ALLCPUXP30', blank=True)
quotaa1 = models.BigIntegerField(null=True, db_column='QUOTAA1', blank=True)
quotaa7 = models.BigIntegerField(null=True, db_column='QUOTAA7', blank=True)
quotaa30 = models.BigIntegerField(null=True, db_column='QUOTAA30', blank=True)
quotap1 = models.BigIntegerField(null=True, db_column='QUOTAP1', blank=True)
quotap7 = models.BigIntegerField(null=True, db_column='QUOTAP7', blank=True)
quotap30 = models.BigIntegerField(null=True, db_column='QUOTAP30', blank=True)
quotaxp1 = models.BigIntegerField(null=True, db_column='QUOTAXP1', blank=True)
quotaxp7 = models.BigIntegerField(null=True, db_column='QUOTAXP7', blank=True)
quotaxp30 = models.BigIntegerField(null=True, db_column='QUOTAXP30', blank=True)
allquotaa1 = models.BigIntegerField(null=True, db_column='ALLQUOTAA1', blank=True)
allquotaa7 = models.BigIntegerField(null=True, db_column='ALLQUOTAA7', blank=True)
allquotaa30 = models.BigIntegerField(null=True, db_column='ALLQUOTAA30', blank=True)
allquotap1 = models.BigIntegerField(null=True, db_column='ALLQUOTAP1', blank=True)
allquotap7 = models.BigIntegerField(null=True, db_column='ALLQUOTAP7', blank=True)
allquotap30 = models.BigIntegerField(null=True, db_column='ALLQUOTAP30', blank=True)
allquotaxp1 = models.BigIntegerField(null=True, db_column='ALLQUOTAXP1', blank=True)
allquotaxp7 = models.BigIntegerField(null=True, db_column='ALLQUOTAXP7', blank=True)
allquotaxp30 = models.BigIntegerField(null=True, db_column='ALLQUOTAXP30', blank=True)
space1 = models.IntegerField(null=True, db_column='SPACE1', blank=True)
space7 = models.IntegerField(null=True, db_column='SPACE7', blank=True)
space30 = models.IntegerField(null=True, db_column='SPACE30', blank=True)
class Meta:
db_table = u'groups'
class History(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
entrytime = models.DateTimeField(db_column='ENTRYTIME')
starttime = models.DateTimeField(db_column='STARTTIME')
endtime = models.DateTimeField(db_column='ENDTIME')
cpu = models.BigIntegerField(null=True, db_column='CPU', blank=True)
cpuxp = models.BigIntegerField(null=True, db_column='CPUXP', blank=True)
space = models.IntegerField(null=True, db_column='SPACE', blank=True)
class Meta:
db_table = u'history'
class Incidents(models.Model):
at_time = models.DateTimeField(primary_key=True, db_column='AT_TIME')
typekey = models.CharField(max_length=60, db_column='TYPEKEY', blank=True)
description = models.CharField(max_length=600, db_column='DESCRIPTION', blank=True)
class Meta:
db_table = u'incidents'
class InfomodelsSitestatus(models.Model):
id = models.BigIntegerField(primary_key=True, db_column='ID')
sitename = models.CharField(max_length=180, db_column='SITENAME', blank=True)
active = models.IntegerField(null=True, db_column='ACTIVE', blank=True)
class Meta:
db_table = u'infomodels_sitestatus'
class Installedsw(models.Model):
siteid = models.CharField(max_length=180, db_column='SITEID', primary_key=True)
cloud = models.CharField(max_length=30, db_column='CLOUD', blank=True)
release = models.CharField(max_length=30, db_column='RELEASE', primary_key=True)
cache = models.CharField(max_length=120, db_column='CACHE', primary_key=True)
validation = models.CharField(max_length=30, db_column='VALIDATION', blank=True)
cmtconfig = models.CharField(max_length=120, db_column='CMTCONFIG', primary_key=True)
class Meta:
db_table = u'installedsw'
unique_together = ('siteid', 'release', 'cache', 'cmtconfig')
class Jdllist(models.Model):
name = models.CharField(max_length=180, primary_key=True, db_column='NAME')
host = models.CharField(max_length=180, db_column='HOST', blank=True)
system = models.CharField(max_length=60, db_column='SYSTEM')
jdl = models.CharField(max_length=12000, db_column='JDL', blank=True)
class Meta:
db_table = u'jdllist'
class JediAuxStatusMintaskid(models.Model):
status = models.CharField(max_length=192, primary_key=True, db_column='STATUS')
min_jeditaskid = models.BigIntegerField(db_column='MIN_JEDITASKID')
class Meta:
db_table = u'jedi_aux_status_mintaskid'
class JediDatasetContents(models.Model):
jeditaskid = models.BigIntegerField(db_column='JEDITASKID', primary_key=True)
datasetid = models.BigIntegerField(db_column='DATASETID', primary_key=True)
fileid = models.BigIntegerField(db_column='FILEID', primary_key=True)
creationdate = models.DateTimeField(db_column='CREATIONDATE')
lastattempttime = models.DateTimeField(null=True, db_column='LASTATTEMPTTIME', blank=True)
lfn = models.CharField(max_length=768, db_column='LFN')
guid = models.CharField(max_length=192, db_column='GUID', blank=True)
type = models.CharField(max_length=60, db_column='TYPE')
status = models.CharField(max_length=192, db_column='STATUS')
fsize = models.BigIntegerField(null=True, db_column='FSIZE', blank=True)
checksum = models.CharField(max_length=108, db_column='CHECKSUM', blank=True)
scope = models.CharField(max_length=90, db_column='SCOPE', blank=True)
attemptnr = models.IntegerField(null=True, db_column='ATTEMPTNR', blank=True)
maxattempt = models.IntegerField(null=True, db_column='MAXATTEMPT', blank=True)
nevents = models.IntegerField(null=True, db_column='NEVENTS', blank=True)
keeptrack = models.IntegerField(null=True, db_column='KEEPTRACK', blank=True)
startevent = models.IntegerField(null=True, db_column='STARTEVENT', blank=True)
endevent = models.IntegerField(null=True, db_column='ENDEVENT', blank=True)
firstevent = models.IntegerField(null=True, db_column='FIRSTEVENT', blank=True)
boundaryid = models.BigIntegerField(null=True, db_column='BOUNDARYID', blank=True)
pandaid = models.BigIntegerField(db_column='PANDAID', blank=True)
class Meta:
db_table = u'jedi_dataset_contents'
unique_together = ('jeditaskid', 'datasetid', 'fileid')
class JediDatasets(models.Model):
jeditaskid = models.BigIntegerField(db_column='JEDITASKID', primary_key=True)
datasetid = models.BigIntegerField(db_column='DATASETID', primary_key=True)
datasetname = models.CharField(max_length=765, db_column='DATASETNAME')
type = models.CharField(max_length=60, db_column='TYPE')
creationtime = models.DateTimeField(db_column='CREATIONTIME')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME')
vo = models.CharField(max_length=48, db_column='VO', blank=True)
cloud = models.CharField(max_length=30, db_column='CLOUD', blank=True)
site = models.CharField(max_length=180, db_column='SITE', blank=True)
masterid = models.BigIntegerField(null=True, db_column='MASTERID', blank=True)
provenanceid = models.BigIntegerField(null=True, db_column='PROVENANCEID', blank=True)
containername = models.CharField(max_length=396, db_column='CONTAINERNAME', blank=True)
status = models.CharField(max_length=60, db_column='STATUS', blank=True)
state = models.CharField(max_length=60, db_column='STATE', blank=True)
statechecktime = models.DateTimeField(null=True, db_column='STATECHECKTIME', blank=True)
statecheckexpiration = models.DateTimeField(null=True, db_column='STATECHECKEXPIRATION', blank=True)
frozentime = models.DateTimeField(null=True, db_column='FROZENTIME', blank=True)
nfiles = models.IntegerField(null=True, db_column='NFILES', blank=True)
nfilestobeused = models.IntegerField(null=True, db_column='NFILESTOBEUSED', blank=True)
nfilesused = models.IntegerField(null=True, db_column='NFILESUSED', blank=True)
nevents = models.BigIntegerField(null=True, db_column='NEVENTS', blank=True)
neventstobeused = models.BigIntegerField(null=True, db_column='NEVENTSTOBEUSED', blank=True)
neventsused = models.BigIntegerField(null=True, db_column='NEVENTSUSED', blank=True)
lockedby = models.CharField(max_length=120, db_column='LOCKEDBY', blank=True)
lockedtime = models.DateTimeField(null=True, db_column='LOCKEDTIME', blank=True)
nfilesfinished = models.IntegerField(null=True, db_column='NFILESFINISHED', blank=True)
nfilesfailed = models.IntegerField(null=True, db_column='NFILESFAILED', blank=True)
attributes = models.CharField(max_length=300, db_column='ATTRIBUTES', blank=True)
streamname = models.CharField(max_length=60, db_column='STREAMNAME', blank=True)
storagetoken = models.CharField(max_length=180, db_column='STORAGETOKEN', blank=True)
destination = models.CharField(max_length=180, db_column='DESTINATION', blank=True)
nfilesonhold = models.IntegerField(null=True, db_column='NFILESONHOLD', blank=True)
templateid = models.BigIntegerField(db_column='TEMPLATEID', blank=True)
class Meta:
db_table = u'jedi_datasets'
unique_together = ('jeditaskid', 'datasetid')
class JediEvents(models.Model):
jeditaskid = models.BigIntegerField(db_column='JEDITASKID', primary_key=True)
pandaid = models.BigIntegerField(db_column='PANDAID', primary_key=True)
fileid = models.BigIntegerField(db_column='FILEID', primary_key=True)
job_processid = models.IntegerField(db_column='JOB_PROCESSID', primary_key=True)
def_min_eventid = models.IntegerField(null=True, db_column='DEF_MIN_EVENTID', blank=True)
def_max_eventid = models.IntegerField(null=True, db_column='DEF_MAX_EVENTID', blank=True)
processed_upto_eventid = models.IntegerField(null=True, db_column='PROCESSED_UPTO_EVENTID', blank=True)
datasetid = models.BigIntegerField(db_column='DATASETID', blank=True)
status = models.IntegerField(db_column='STATUS', blank=True)
attemptnr = models.IntegerField(db_column='ATTEMPTNR', blank=True)
class Meta:
db_table = u'jedi_events'
unique_together = ('jeditaskid', 'pandaid', 'fileid', 'job_processid')
class JediJobparamsTemplate(models.Model):
jeditaskid = models.BigIntegerField(primary_key=True, db_column='JEDITASKID')
jobparamstemplate = models.TextField(db_column='JOBPARAMSTEMPLATE', blank=True)
class Meta:
db_table = u'jedi_jobparams_template'
class JediJobRetryHistory(models.Model):
jeditaskid = models.BigIntegerField(db_column='JEDITASKID', primary_key=True)
oldpandaid = models.BigIntegerField(db_column='OLDPANDAID', primary_key=True)
newpandaid = models.BigIntegerField(db_column='NEWPANDAID', primary_key=True)
ins_utc_tstamp = models.BigIntegerField(db_column='INS_UTC_TSTAMP', blank=True)
relationtype = models.CharField(max_length=48, db_column='RELATIONTYPE')
class Meta:
db_table = u'jedi_job_retry_history'
unique_together = ('jeditaskid', 'oldpandaid', 'newpandaid')
class JediOutputTemplate(models.Model):
jeditaskid = models.BigIntegerField(db_column='JEDITASKID', primary_key=True)
datasetid = models.BigIntegerField(db_column='DATASETID', primary_key=True)
outtempid = models.BigIntegerField(db_column='OUTTEMPID', primary_key=True)
filenametemplate = models.CharField(max_length=768, db_column='FILENAMETEMPLATE')
maxserialnr = models.IntegerField(null=True, db_column='MAXSERIALNR', blank=True)
serialnr = models.IntegerField(null=True, db_column='SERIALNR', blank=True)
sourcename = models.CharField(max_length=768, db_column='SOURCENAME', blank=True)
streamname = models.CharField(max_length=60, db_column='STREAMNAME', blank=True)
outtype = models.CharField(max_length=60, db_column='OUTTYPE', blank=True)
class Meta:
db_table = u'jedi_output_template'
unique_together = ('jeditaskid', 'datasetid', 'outtempid')
class JediTaskparams(models.Model):
jeditaskid = models.BigIntegerField(primary_key=True, db_column='JEDITASKID')
taskparams = models.TextField(db_column='TASKPARAMS', blank=True)
class Meta:
db_table = u'jedi_taskparams'
class JediTasks(models.Model):
jeditaskid = models.BigIntegerField(primary_key=True, db_column='JEDITASKID')
taskname = models.CharField(max_length=384, db_column='TASKNAME', blank=True)
status = models.CharField(max_length=192, db_column='STATUS')
username = models.CharField(max_length=384, db_column='USERNAME')
creationdate = models.DateTimeField(db_column='CREATIONDATE')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME')
reqid = models.IntegerField(null=True, db_column='REQID', blank=True)
oldstatus = models.CharField(max_length=192, db_column='OLDSTATUS', blank=True)
cloud = models.CharField(max_length=30, db_column='CLOUD', blank=True)
site = models.CharField(max_length=180, db_column='SITE', blank=True)
starttime = models.DateTimeField(null=True, db_column='STARTTIME', blank=True)
endtime = models.DateTimeField(null=True, db_column='ENDTIME', blank=True)
frozentime = models.DateTimeField(null=True, db_column='FROZENTIME', blank=True)
prodsourcelabel = models.CharField(max_length=60, db_column='PRODSOURCELABEL', blank=True)
workinggroup = models.CharField(max_length=96, db_column='WORKINGGROUP', blank=True)
vo = models.CharField(max_length=48, db_column='VO', blank=True)
corecount = models.IntegerField(null=True, db_column='CORECOUNT', blank=True)
tasktype = models.CharField(max_length=192, db_column='TASKTYPE', blank=True)
processingtype = models.CharField(max_length=192, db_column='PROCESSINGTYPE', blank=True)
taskpriority = models.IntegerField(null=True, db_column='TASKPRIORITY', blank=True)
currentpriority = models.IntegerField(null=True, db_column='CURRENTPRIORITY', blank=True)
architecture = models.CharField(max_length=768, db_column='ARCHITECTURE', blank=True)
transuses = models.CharField(max_length=192, db_column='TRANSUSES', blank=True)
transhome = models.CharField(max_length=384, db_column='TRANSHOME', blank=True)
transpath = models.CharField(max_length=384, db_column='TRANSPATH', blank=True)
lockedby = models.CharField(max_length=120, db_column='LOCKEDBY', blank=True)
lockedtime = models.DateTimeField(null=True, db_column='LOCKEDTIME', blank=True)
termcondition = models.CharField(max_length=300, db_column='TERMCONDITION', blank=True)
splitrule = models.CharField(max_length=300, db_column='SPLITRULE', blank=True)
walltime = models.IntegerField(null=True, db_column='WALLTIME', blank=True)
walltimeunit = models.CharField(max_length=96, db_column='WALLTIMEUNIT', blank=True)
outdiskcount = models.IntegerField(null=True, db_column='OUTDISKCOUNT', blank=True)
outdiskunit = models.CharField(max_length=96, db_column='OUTDISKUNIT', blank=True)
workdiskcount = models.IntegerField(null=True, db_column='WORKDISKCOUNT', blank=True)
workdiskunit = models.CharField(max_length=96, db_column='WORKDISKUNIT', blank=True)
ramcount = models.IntegerField(null=True, db_column='RAMCOUNT', blank=True)
ramunit = models.CharField(max_length=96, db_column='RAMUNIT', blank=True)
iointensity = models.IntegerField(null=True, db_column='IOINTENSITY', blank=True)
iointensityunit = models.CharField(max_length=96, db_column='IOINTENSITYUNIT', blank=True)
workqueue_id = models.IntegerField(null=True, db_column='WORKQUEUE_ID', blank=True)
progress = models.IntegerField(null=True, db_column='PROGRESS', blank=True)
failurerate = models.IntegerField(null=True, db_column='FAILURERATE', blank=True)
errordialog = models.CharField(max_length=765, db_column='ERRORDIALOG', blank=True)
countrygroup = models.CharField(max_length=20, db_column='COUNTRYGROUP', blank=True)
parent_tid = models.BigIntegerField(db_column='PARENT_TID', blank=True)
eventservice = models.IntegerField(null=True, db_column='EVENTSERVICE', blank=True)
ticketid = models.CharField(max_length=50, db_column='TICKETID', blank=True)
ticketsystemtype = models.CharField(max_length=16, db_column='TICKETSYSTEMTYPE', blank=True)
statechangetime = models.DateTimeField(null=True, db_column='STATECHANGETIME', blank=True)
superstatus = models.CharField(max_length=64, db_column='SUPERSTATUS', blank=True)
campaign = models.CharField(max_length=72, db_column='CAMPAIGN', blank=True)
class Meta:
db_table = u'jedi_tasks'
class JediWorkQueue(models.Model):
queue_id = models.IntegerField(primary_key=True, db_column='QUEUE_ID')
queue_name = models.CharField(max_length=16, db_column='QUEUE_NAME')
queue_type = models.CharField(max_length=16, db_column='QUEUE_TYPE')
vo = models.CharField(max_length=16, db_column='VO')
status = models.CharField(max_length=64, db_column='STATUS', blank=True)
partitionid = models.IntegerField(null=True, db_column='PARTITIONID', blank=True)
stretchable = models.IntegerField(null=True, db_column='STRETCHABLE', blank=True)
queue_share = models.IntegerField(null=True, db_column='QUEUE_SHARE', blank=True)
queue_order = models.IntegerField(null=True, db_column='QUEUE_ORDER', blank=True)
criteria = models.CharField(max_length=256, db_column='CRITERIA', blank=True)
variables = models.CharField(max_length=256, db_column='VARIABLES', blank=True)
class Meta:
db_table = u'jedi_work_queue'
class Jobclass(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
name = models.CharField(max_length=90, db_column='NAME')
description = models.CharField(max_length=90, db_column='DESCRIPTION')
rights = models.CharField(max_length=90, db_column='RIGHTS', blank=True)
priority = models.IntegerField(null=True, db_column='PRIORITY', blank=True)
quota1 = models.BigIntegerField(null=True, db_column='QUOTA1', blank=True)
quota7 = models.BigIntegerField(null=True, db_column='QUOTA7', blank=True)
quota30 = models.BigIntegerField(null=True, db_column='QUOTA30', blank=True)
class Meta:
db_table = u'jobclass'
class Jobparamstable(models.Model):
pandaid = models.BigIntegerField(db_column='PANDAID', primary_key=True)
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME', primary_key=True)
jobparameters = models.TextField(db_column='JOBPARAMETERS', blank=True)
class Meta:
db_table = u'jobparamstable'
unique_together = ('pandaid', 'modificationtime')
class JobparamstableArch(models.Model):
pandaid = models.BigIntegerField(db_column='PANDAID')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME')
jobparameters = models.TextField(db_column='JOBPARAMETERS', blank=True)
class Meta:
db_table = u'jobparamstable_arch'
class JobsStatuslog(models.Model):
pandaid = models.BigIntegerField(db_column='PANDAID')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME')
jobstatus = models.CharField(max_length=45, db_column='JOBSTATUS')
prodsourcelabel = models.CharField(max_length=60, db_column='PRODSOURCELABEL', blank=True)
cloud = models.CharField(max_length=150, db_column='CLOUD', blank=True)
computingsite = models.CharField(max_length=384, db_column='COMPUTINGSITE', blank=True)
modificationhost = models.CharField(max_length=384, db_column='MODIFICATIONHOST', blank=True)
class Meta:
db_table = u'jobs_statuslog'
class Jobsarchived4WnlistStats(models.Model):
modificationtime = models.DateTimeField(primary_key=True, db_column='MODIFICATIONTIME')
computingsite = models.CharField(max_length=384, db_column='COMPUTINGSITE', blank=True)
modificationhost = models.CharField(max_length=384, db_column='MODIFICATIONHOST', blank=True)
jobstatus = models.CharField(max_length=45, db_column='JOBSTATUS')
transexitcode = models.CharField(max_length=384, db_column='TRANSEXITCODE', blank=True)
prodsourcelabel = models.CharField(max_length=60, db_column='PRODSOURCELABEL', blank=True)
num_of_jobs = models.IntegerField(null=True, db_column='NUM_OF_JOBS', blank=True)
max_modificationtime = models.DateTimeField(null=True, db_column='MAX_MODIFICATIONTIME', blank=True)
cur_date = models.DateTimeField(null=True, db_column='CUR_DATE', blank=True)
class Meta:
db_table = u'jobsarchived4_wnlist_stats'
class Jobsdebug(models.Model):
pandaid = models.BigIntegerField(primary_key=True, db_column='PANDAID')
stdout = models.CharField(max_length=6144, db_column='STDOUT', blank=True)
class Meta:
db_table = u'jobsdebug'
class Logstable(models.Model):
pandaid = models.IntegerField(primary_key=True, db_column='PANDAID')
log1 = models.TextField(db_column='LOG1')
log2 = models.TextField(db_column='LOG2')
log3 = models.TextField(db_column='LOG3')
log4 = models.TextField(db_column='LOG4')
class Meta:
db_table = u'logstable'
class Members(models.Model):
uname = models.CharField(max_length=90, db_column='UNAME', primary_key=True)
gname = models.CharField(max_length=90, db_column='GNAME', primary_key=True)
rights = models.CharField(max_length=90, db_column='RIGHTS', blank=True)
since = models.DateTimeField(db_column='SINCE')
class Meta:
db_table = u'members'
unique_together = ('uname', 'gname')
class Metatable(models.Model):
pandaid = models.BigIntegerField(db_column='PANDAID', primary_key=True)
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME', primary_key=True)
metadata = models.TextField(db_column='METADATA', blank=True)
class Meta:
db_table = u'metatable'
unique_together = ('pandaid', 'modificationtime')
class MetatableArch(models.Model):
pandaid = models.BigIntegerField(db_column='PANDAID', primary_key=True)
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME')
metadata = models.TextField(db_column='METADATA', blank=True)
class Meta:
db_table = u'metatable_arch'
class MvJobsactive4Stats(models.Model):
id = models.BigIntegerField(primary_key=True, db_column='ID')
cur_date = models.DateTimeField(db_column='CUR_DATE')
cloud = models.CharField(max_length=150, db_column='CLOUD', blank=True)
computingsite = models.CharField(max_length=384, db_column='COMPUTINGSITE', blank=True)
countrygroup = models.CharField(max_length=60, db_column='COUNTRYGROUP', blank=True)
workinggroup = models.CharField(max_length=60, db_column='WORKINGGROUP', blank=True)
relocationflag = models.IntegerField(null=True, db_column='RELOCATIONFLAG', blank=True)
jobstatus = models.CharField(max_length=45, db_column='JOBSTATUS')
processingtype = models.CharField(max_length=192, db_column='PROCESSINGTYPE', blank=True)
prodsourcelabel = models.CharField(max_length=60, db_column='PRODSOURCELABEL', blank=True)
currentpriority = models.IntegerField(null=True, db_column='CURRENTPRIORITY', blank=True)
num_of_jobs = models.IntegerField(null=True, db_column='NUM_OF_JOBS', blank=True)
vo = models.CharField(max_length=48, db_column='VO', blank=True)
workqueue_id = models.IntegerField(null=True, db_column='WORKQUEUE_ID', blank=True)
class Meta:
db_table = u'mv_jobsactive4_stats'
class OldSubcounter(models.Model):
subid = models.BigIntegerField(primary_key=True, db_column='SUBID')
class Meta:
db_table = u'old_subcounter'
class Pandaconfig(models.Model):
name = models.CharField(max_length=180, primary_key=True, db_column='NAME')
controller = models.CharField(max_length=60, db_column='CONTROLLER')
pathena = models.CharField(max_length=60, db_column='PATHENA', blank=True)
class Meta:
db_table = u'pandaconfig'
class PandaidsDeleted(models.Model):
pandaid = models.BigIntegerField(primary_key=True, db_column='PANDAID')
tstamp_datadel = models.DateTimeField(null=True, db_column='TSTAMP_DATADEL', blank=True)
class Meta:
db_table = u'pandaids_deleted'
class PandaidsModiftime(models.Model):
pandaid = models.BigIntegerField(db_column='PANDAID', primary_key=True)
modiftime = models.DateTimeField(db_column='MODIFTIME', primary_key=True)
class Meta:
db_table = u'pandaids_modiftime'
unique_together = ('pandaid', 'modiftime')
class Pandalog(models.Model):
bintime = models.DateTimeField(db_column='BINTIME', primary_key=True)
name = models.CharField(max_length=90, db_column='NAME', blank=True)
module = models.CharField(max_length=90, db_column='MODULE', blank=True)
loguser = models.CharField(max_length=240, db_column='LOGUSER', blank=True)
type = models.CharField(max_length=60, db_column='TYPE', blank=True)
pid = models.BigIntegerField(db_column='PID')
loglevel = models.IntegerField(db_column='LOGLEVEL')
levelname = models.CharField(max_length=90, db_column='LEVELNAME', blank=True)
time = models.CharField(max_length=90, db_column='TIME', blank=True)
filename = models.CharField(max_length=300, db_column='FILENAME', blank=True)
line = models.IntegerField(db_column='LINE')
message = models.CharField(max_length=12000, db_column='MESSAGE', blank=True)
class Meta:
db_table = u'pandalog'
class Passwords(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
pass_field = models.CharField(max_length=180, db_column='PASS') # Field renamed because it was a Python reserved word.
class Meta:
db_table = u'passwords'
class Pilotqueue(models.Model):
jobid = models.CharField(db_column='JOBID', max_length=100, primary_key=True)
tpid = models.CharField(max_length=180, db_column='TPID')
url = models.CharField(max_length=600, db_column='URL', blank=True)
nickname = models.CharField(max_length=180, db_column='NICKNAME', primary_key=True)
system = models.CharField(max_length=60, db_column='SYSTEM')
user_field = models.CharField(max_length=180, db_column='USER_') # Field renamed because it was a Python reserved word.
host = models.CharField(max_length=180, db_column='HOST')
submithost = models.CharField(max_length=180, db_column='SUBMITHOST')
queueid = models.CharField(max_length=180, db_column='QUEUEID')
type = models.CharField(max_length=60, db_column='TYPE')
pandaid = models.IntegerField(null=True, db_column='PANDAID', blank=True)
tcheck = models.DateTimeField(db_column='TCHECK')
state = models.CharField(max_length=90, db_column='STATE')
tstate = models.DateTimeField(db_column='TSTATE')
tenter = models.DateTimeField(db_column='TENTER')
tsubmit = models.DateTimeField(db_column='TSUBMIT')
taccept = models.DateTimeField(db_column='TACCEPT')
tschedule = models.DateTimeField(db_column='TSCHEDULE')
tstart = models.DateTimeField(db_column='TSTART')
tend = models.DateTimeField(db_column='TEND')
tdone = models.DateTimeField(db_column='TDONE')
tretrieve = models.DateTimeField(db_column='TRETRIEVE')
status = models.CharField(max_length=60, db_column='STATUS')
errcode = models.IntegerField(db_column='ERRCODE')
errinfo = models.CharField(max_length=450, db_column='ERRINFO')
message = models.CharField(max_length=12000, db_column='MESSAGE', blank=True)
schedd_name = models.CharField(max_length=180, db_column='SCHEDD_NAME')
workernode = models.CharField(max_length=180, db_column='WORKERNODE')
class Meta:
db_table = u'pilotqueue'
unique_together = ('jobid', 'nickname')
class PilotqueueBnl(models.Model):
jobid = models.CharField(max_length=300, db_column='JOBID')
tpid = models.CharField(max_length=180, primary_key=True, db_column='TPID')
url = models.CharField(max_length=600, db_column='URL')
nickname = models.CharField(max_length=180, db_column='NICKNAME')
system = models.CharField(max_length=60, db_column='SYSTEM')
user_field = models.CharField(max_length=180, db_column='USER_') # Field renamed because it was a Python reserved word.
host = models.CharField(max_length=180, db_column='HOST')
submithost = models.CharField(max_length=180, db_column='SUBMITHOST')
schedd_name = models.CharField(max_length=180, db_column='SCHEDD_NAME')
queueid = models.CharField(max_length=180, db_column='QUEUEID')
type = models.CharField(max_length=60, db_column='TYPE')
pandaid = models.IntegerField(null=True, db_column='PANDAID', blank=True)
tcheck = models.DateTimeField(db_column='TCHECK')
state = models.CharField(max_length=90, db_column='STATE')
tstate = models.DateTimeField(db_column='TSTATE')
tenter = models.DateTimeField(db_column='TENTER')
tsubmit = models.DateTimeField(db_column='TSUBMIT')
taccept = models.DateTimeField(db_column='TACCEPT')
tschedule = models.DateTimeField(db_column='TSCHEDULE')
tstart = models.DateTimeField(db_column='TSTART')
tend = models.DateTimeField(db_column='TEND')
tdone = models.DateTimeField(db_column='TDONE')
tretrieve = models.DateTimeField(db_column='TRETRIEVE')
status = models.CharField(max_length=60, db_column='STATUS')
errcode = models.IntegerField(db_column='ERRCODE')
errinfo = models.CharField(max_length=450, db_column='ERRINFO')
message = models.CharField(max_length=12000, db_column='MESSAGE', blank=True)
workernode = models.CharField(max_length=180, db_column='WORKERNODE')
class Meta:
db_table = u'pilotqueue_bnl'
class Pilottoken(models.Model):
token = models.CharField(max_length=192, primary_key=True, db_column='TOKEN')
schedulerhost = models.CharField(max_length=300, db_column='SCHEDULERHOST', blank=True)
scheduleruser = models.CharField(max_length=450, db_column='SCHEDULERUSER', blank=True)
usages = models.IntegerField(db_column='USAGES')
created = models.DateTimeField(db_column='CREATED')
expires = models.DateTimeField(db_column='EXPIRES')
schedulerid = models.CharField(max_length=240, db_column='SCHEDULERID', blank=True)
class Meta:
db_table = u'pilottoken'
class Pilottype(models.Model):
name = models.CharField(max_length=180, primary_key=True, db_column='NAME')
script = models.CharField(max_length=180, db_column='SCRIPT')
url = models.CharField(max_length=450, db_column='URL')
system = models.CharField(max_length=180, db_column='SYSTEM')
class Meta:
db_table = u'pilottype'
class PoolCollLock(models.Model):
id = models.CharField(max_length=150, primary_key=True, db_column='ID')
collection = models.CharField(max_length=1500, db_column='COLLECTION', blank=True)
client_info = models.CharField(max_length=1500, db_column='CLIENT_INFO', blank=True)
locktype = models.CharField(max_length=60, db_column='LOCKTYPE', blank=True)
timestamp = models.DateTimeField(null=True, db_column='TIMESTAMP', blank=True)
class Meta:
db_table = u'pool_coll_lock'
class PoolCollectionData(models.Model):
id = models.DecimalField(decimal_places=0, primary_key=True, db_column='ID', max_digits=11)
oid_1 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='OID_1', blank=True)
oid_2 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='OID_2', blank=True)
var_1_oid_1 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_1_OID_1', blank=True)
var_1_oid_2 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_1_OID_2', blank=True)
var_2_oid_1 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_2_OID_1', blank=True)
var_2_oid_2 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_2_OID_2', blank=True)
var_3 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_3', blank=True)
var_4 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_4', blank=True)
var_5 = models.FloatField(null=True, db_column='VAR_5', blank=True)
var_6 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_6', blank=True)
var_7 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_7', blank=True)
var_8 = models.FloatField(null=True, db_column='VAR_8', blank=True)
var_9 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_9', blank=True)
var_10 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_10', blank=True)
var_11 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_11', blank=True)
var_12 = models.FloatField(null=True, db_column='VAR_12', blank=True)
var_13 = models.FloatField(null=True, db_column='VAR_13', blank=True)
var_14 = models.FloatField(null=True, db_column='VAR_14', blank=True)
var_15 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_15', blank=True)
var_16 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_16', blank=True)
var_17 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_17', blank=True)
var_18 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_18', blank=True)
var_19 = models.FloatField(null=True, db_column='VAR_19', blank=True)
var_20 = models.FloatField(null=True, db_column='VAR_20', blank=True)
var_21 = models.FloatField(null=True, db_column='VAR_21', blank=True)
var_22 = models.FloatField(null=True, db_column='VAR_22', blank=True)
var_23 = models.FloatField(null=True, db_column='VAR_23', blank=True)
var_24 = models.FloatField(null=True, db_column='VAR_24', blank=True)
var_25 = models.FloatField(null=True, db_column='VAR_25', blank=True)
var_26 = models.FloatField(null=True, db_column='VAR_26', blank=True)
var_27 = models.FloatField(null=True, db_column='VAR_27', blank=True)
var_28 = models.FloatField(null=True, db_column='VAR_28', blank=True)
var_29 = models.FloatField(null=True, db_column='VAR_29', blank=True)
var_30 = models.FloatField(null=True, db_column='VAR_30', blank=True)
var_31 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_31', blank=True)
var_32 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_32', blank=True)
var_33 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_33', blank=True)
var_34 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_34', blank=True)
var_35 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_35', blank=True)
var_36 = models.FloatField(null=True, db_column='VAR_36', blank=True)
var_37 = models.FloatField(null=True, db_column='VAR_37', blank=True)
var_38 = models.FloatField(null=True, db_column='VAR_38', blank=True)
var_39 = models.FloatField(null=True, db_column='VAR_39', blank=True)
var_40 = models.FloatField(null=True, db_column='VAR_40', blank=True)
var_41 = models.FloatField(null=True, db_column='VAR_41', blank=True)
var_42 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_42', blank=True)
var_43 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_43', blank=True)
var_44 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_44', blank=True)
var_45 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_45', blank=True)
var_46 = models.FloatField(null=True, db_column='VAR_46', blank=True)
var_47 = models.FloatField(null=True, db_column='VAR_47', blank=True)
var_48 = models.FloatField(null=True, db_column='VAR_48', blank=True)
var_49 = models.FloatField(null=True, db_column='VAR_49', blank=True)
var_50 = models.FloatField(null=True, db_column='VAR_50', blank=True)
var_51 = models.FloatField(null=True, db_column='VAR_51', blank=True)
var_52 = models.FloatField(null=True, db_column='VAR_52', blank=True)
var_53 = models.FloatField(null=True, db_column='VAR_53', blank=True)
var_54 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_54', blank=True)
var_55 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_55', blank=True)
var_56 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_56', blank=True)
var_57 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_57', blank=True)
var_58 = models.FloatField(null=True, db_column='VAR_58', blank=True)
var_59 = models.FloatField(null=True, db_column='VAR_59', blank=True)
var_60 = models.FloatField(null=True, db_column='VAR_60', blank=True)
var_61 = models.FloatField(null=True, db_column='VAR_61', blank=True)
var_62 = models.FloatField(null=True, db_column='VAR_62', blank=True)
var_63 = models.FloatField(null=True, db_column='VAR_63', blank=True)
var_64 = models.FloatField(null=True, db_column='VAR_64', blank=True)
var_65 = models.FloatField(null=True, db_column='VAR_65', blank=True)
var_66 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_66', blank=True)
var_67 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_67', blank=True)
var_68 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_68', blank=True)
var_69 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_69', blank=True)
var_70 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_70', blank=True)
var_71 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_71', blank=True)
var_72 = models.FloatField(null=True, db_column='VAR_72', blank=True)
var_73 = models.FloatField(null=True, db_column='VAR_73', blank=True)
var_74 = models.FloatField(null=True, db_column='VAR_74', blank=True)
var_75 = models.FloatField(null=True, db_column='VAR_75', blank=True)
var_76 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_76', blank=True)
var_77 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_77', blank=True)
var_78 = models.FloatField(null=True, db_column='VAR_78', blank=True)
var_79 = models.FloatField(null=True, db_column='VAR_79', blank=True)
var_80 = models.FloatField(null=True, db_column='VAR_80', blank=True)
var_81 = models.FloatField(null=True, db_column='VAR_81', blank=True)
var_82 = models.FloatField(null=True, db_column='VAR_82', blank=True)
var_83 = models.FloatField(null=True, db_column='VAR_83', blank=True)
var_84 = models.FloatField(null=True, db_column='VAR_84', blank=True)
var_85 = models.FloatField(null=True, db_column='VAR_85', blank=True)
var_86 = models.FloatField(null=True, db_column='VAR_86', blank=True)
var_87 = models.FloatField(null=True, db_column='VAR_87', blank=True)
var_88 = models.FloatField(null=True, db_column='VAR_88', blank=True)
var_89 = models.FloatField(null=True, db_column='VAR_89', blank=True)
var_90 = models.FloatField(null=True, db_column='VAR_90', blank=True)
var_91 = models.FloatField(null=True, db_column='VAR_91', blank=True)
var_92 = models.FloatField(null=True, db_column='VAR_92', blank=True)
var_93 = models.FloatField(null=True, db_column='VAR_93', blank=True)
var_94 = models.FloatField(null=True, db_column='VAR_94', blank=True)
var_95 = models.FloatField(null=True, db_column='VAR_95', blank=True)
var_96 = models.FloatField(null=True, db_column='VAR_96', blank=True)
var_97 = models.FloatField(null=True, db_column='VAR_97', blank=True)
var_98 = models.FloatField(null=True, db_column='VAR_98', blank=True)
var_99 = models.FloatField(null=True, db_column='VAR_99', blank=True)
var_100 = models.FloatField(null=True, db_column='VAR_100', blank=True)
var_101 = models.FloatField(null=True, db_column='VAR_101', blank=True)
var_102 = models.FloatField(null=True, db_column='VAR_102', blank=True)
var_103 = models.FloatField(null=True, db_column='VAR_103', blank=True)
var_104 = models.FloatField(null=True, db_column='VAR_104', blank=True)
var_105 = models.FloatField(null=True, db_column='VAR_105', blank=True)
var_106 = models.FloatField(null=True, db_column='VAR_106', blank=True)
var_107 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_107', blank=True)
var_108 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_108', blank=True)
var_109 = models.FloatField(null=True, db_column='VAR_109', blank=True)
var_110 = models.FloatField(null=True, db_column='VAR_110', blank=True)
var_111 = models.FloatField(null=True, db_column='VAR_111', blank=True)
var_112 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_112', blank=True)
var_113 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_113', blank=True)
var_114 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_114', blank=True)
var_115 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_115', blank=True)
var_116 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_116', blank=True)
var_117 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_117', blank=True)
var_118 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_118', blank=True)
var_119 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_119', blank=True)
var_120 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_120', blank=True)
var_121 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_121', blank=True)
var_122 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_122', blank=True)
var_123 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_123', blank=True)
var_124 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_124', blank=True)
var_125 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_125', blank=True)
var_126 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_126', blank=True)
var_127 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_127', blank=True)
var_128 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_128', blank=True)
var_129 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_129', blank=True)
var_130 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_130', blank=True)
var_131 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_131', blank=True)
var_132 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_132', blank=True)
var_133 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_133', blank=True)
var_134 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_134', blank=True)
var_135 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_135', blank=True)
var_136 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_136', blank=True)
var_137 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_137', blank=True)
var_138 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_138', blank=True)
var_139 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_139', blank=True)
var_140 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_140', blank=True)
var_141 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_141', blank=True)
var_142 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_142', blank=True)
var_143 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_143', blank=True)
var_144 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_144', blank=True)
var_145 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_145', blank=True)
var_146 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_146', blank=True)
var_147 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_147', blank=True)
var_148 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_148', blank=True)
var_149 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_149', blank=True)
var_150 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_150', blank=True)
var_151 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_151', blank=True)
var_152 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_152', blank=True)
var_153 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_153', blank=True)
var_154 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_154', blank=True)
var_155 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_155', blank=True)
var_156 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_156', blank=True)
var_157 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_157', blank=True)
var_158 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_158', blank=True)
var_159 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_159', blank=True)
var_160 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_160', blank=True)
var_161 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_161', blank=True)
var_162 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_162', blank=True)
var_163 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_163', blank=True)
var_164 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_164', blank=True)
var_165 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_165', blank=True)
var_166 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_166', blank=True)
var_167 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_167', blank=True)
var_168 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_168', blank=True)
var_169 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_169', blank=True)
var_170 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_170', blank=True)
var_171 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_171', blank=True)
var_172 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_172', blank=True)
var_173 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_173', blank=True)
var_174 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_174', blank=True)
var_175 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_175', blank=True)
var_176 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_176', blank=True)
var_177 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_177', blank=True)
var_178 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_178', blank=True)
var_179 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_179', blank=True)
var_180 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_180', blank=True)
var_181 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_181', blank=True)
var_182 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_182', blank=True)
var_183 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_183', blank=True)
var_184 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_184', blank=True)
var_185 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_185', blank=True)
var_186 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_186', blank=True)
var_187 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_187', blank=True)
var_188 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_188', blank=True)
var_189 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_189', blank=True)
var_190 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_190', blank=True)
var_191 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_191', blank=True)
var_192 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_192', blank=True)
var_193 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_193', blank=True)
var_194 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_194', blank=True)
var_195 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_195', blank=True)
var_196 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_196', blank=True)
var_197 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_197', blank=True)
var_198 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_198', blank=True)
var_199 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_199', blank=True)
var_200 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_200', blank=True)
var_201 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_201', blank=True)
var_202 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_202', blank=True)
var_203 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_203', blank=True)
var_204 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_204', blank=True)
var_205 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_205', blank=True)
var_206 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_206', blank=True)
var_207 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_207', blank=True)
var_208 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_208', blank=True)
var_209 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_209', blank=True)
var_210 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_210', blank=True)
var_211 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_211', blank=True)
var_212 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_212', blank=True)
var_213 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_213', blank=True)
var_214 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_214', blank=True)
var_215 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_215', blank=True)
var_216 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_216', blank=True)
var_217 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_217', blank=True)
var_218 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_218', blank=True)
var_219 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_219', blank=True)
var_220 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_220', blank=True)
var_221 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_221', blank=True)
var_222 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_222', blank=True)
var_223 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_223', blank=True)
var_224 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_224', blank=True)
var_225 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_225', blank=True)
var_226 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_226', blank=True)
var_227 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_227', blank=True)
var_228 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_228', blank=True)
var_229 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_229', blank=True)
var_230 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_230', blank=True)
var_231 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_231', blank=True)
var_232 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_232', blank=True)
var_233 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_233', blank=True)
var_234 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_234', blank=True)
var_235 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_235', blank=True)
var_236 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_236', blank=True)
var_237 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_237', blank=True)
var_238 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_238', blank=True)
var_239 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_239', blank=True)
var_240 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_240', blank=True)
var_241 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_241', blank=True)
var_242 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_242', blank=True)
var_243 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_243', blank=True)
var_244 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_244', blank=True)
var_245 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_245', blank=True)
var_246 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_246', blank=True)
var_247 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_247', blank=True)
var_248 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_248', blank=True)
var_249 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_249', blank=True)
var_250 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_250', blank=True)
var_251 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_251', blank=True)
var_252 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_252', blank=True)
var_253 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_253', blank=True)
var_254 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_254', blank=True)
var_255 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_255', blank=True)
var_256 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_256', blank=True)
var_257 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_257', blank=True)
var_258 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_258', blank=True)
var_259 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_259', blank=True)
var_260 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_260', blank=True)
var_261 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_261', blank=True)
var_262 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_262', blank=True)
var_263 = models.FloatField(null=True, db_column='VAR_263', blank=True)
class Meta:
db_table = u'pool_collection_data'
class PoolCollectionData1(models.Model):
id = models.DecimalField(decimal_places=0, primary_key=True, db_column='ID', max_digits=11)
oid_1 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='OID_1', blank=True)
oid_2 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='OID_2', blank=True)
var_1_oid_1 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_1_OID_1', blank=True)
var_1_oid_2 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_1_OID_2', blank=True)
var_2_oid_1 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_2_OID_1', blank=True)
var_2_oid_2 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_2_OID_2', blank=True)
var_3 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_3', blank=True)
var_4 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_4', blank=True)
var_5 = models.FloatField(null=True, db_column='VAR_5', blank=True)
var_6 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_6', blank=True)
var_7 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_7', blank=True)
var_8 = models.FloatField(null=True, db_column='VAR_8', blank=True)
var_9 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_9', blank=True)
var_10 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_10', blank=True)
var_11 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_11', blank=True)
var_12 = models.FloatField(null=True, db_column='VAR_12', blank=True)
var_13 = models.FloatField(null=True, db_column='VAR_13', blank=True)
var_14 = models.FloatField(null=True, db_column='VAR_14', blank=True)
var_15 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_15', blank=True)
var_16 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_16', blank=True)
var_17 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_17', blank=True)
var_18 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_18', blank=True)
var_19 = models.FloatField(null=True, db_column='VAR_19', blank=True)
var_20 = models.FloatField(null=True, db_column='VAR_20', blank=True)
var_21 = models.FloatField(null=True, db_column='VAR_21', blank=True)
var_22 = models.FloatField(null=True, db_column='VAR_22', blank=True)
var_23 = models.FloatField(null=True, db_column='VAR_23', blank=True)
var_24 = models.FloatField(null=True, db_column='VAR_24', blank=True)
var_25 = models.FloatField(null=True, db_column='VAR_25', blank=True)
var_26 = models.FloatField(null=True, db_column='VAR_26', blank=True)
var_27 = models.FloatField(null=True, db_column='VAR_27', blank=True)
var_28 = models.FloatField(null=True, db_column='VAR_28', blank=True)
var_29 = models.FloatField(null=True, db_column='VAR_29', blank=True)
var_30 = models.FloatField(null=True, db_column='VAR_30', blank=True)
var_31 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_31', blank=True)
var_32 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_32', blank=True)
var_33 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_33', blank=True)
var_34 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_34', blank=True)
var_35 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_35', blank=True)
var_36 = models.FloatField(null=True, db_column='VAR_36', blank=True)
var_37 = models.FloatField(null=True, db_column='VAR_37', blank=True)
var_38 = models.FloatField(null=True, db_column='VAR_38', blank=True)
var_39 = models.FloatField(null=True, db_column='VAR_39', blank=True)
var_40 = models.FloatField(null=True, db_column='VAR_40', blank=True)
var_41 = models.FloatField(null=True, db_column='VAR_41', blank=True)
var_42 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_42', blank=True)
var_43 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_43', blank=True)
var_44 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_44', blank=True)
var_45 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_45', blank=True)
var_46 = models.FloatField(null=True, db_column='VAR_46', blank=True)
var_47 = models.FloatField(null=True, db_column='VAR_47', blank=True)
var_48 = models.FloatField(null=True, db_column='VAR_48', blank=True)
var_49 = models.FloatField(null=True, db_column='VAR_49', blank=True)
var_50 = models.FloatField(null=True, db_column='VAR_50', blank=True)
var_51 = models.FloatField(null=True, db_column='VAR_51', blank=True)
var_52 = models.FloatField(null=True, db_column='VAR_52', blank=True)
var_53 = models.FloatField(null=True, db_column='VAR_53', blank=True)
var_54 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_54', blank=True)
var_55 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_55', blank=True)
var_56 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_56', blank=True)
var_57 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_57', blank=True)
var_58 = models.FloatField(null=True, db_column='VAR_58', blank=True)
var_59 = models.FloatField(null=True, db_column='VAR_59', blank=True)
var_60 = models.FloatField(null=True, db_column='VAR_60', blank=True)
var_61 = models.FloatField(null=True, db_column='VAR_61', blank=True)
var_62 = models.FloatField(null=True, db_column='VAR_62', blank=True)
var_63 = models.FloatField(null=True, db_column='VAR_63', blank=True)
var_64 = models.FloatField(null=True, db_column='VAR_64', blank=True)
var_65 = models.FloatField(null=True, db_column='VAR_65', blank=True)
var_66 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_66', blank=True)
var_67 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_67', blank=True)
var_68 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_68', blank=True)
var_69 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_69', blank=True)
var_70 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_70', blank=True)
var_71 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_71', blank=True)
var_72 = models.FloatField(null=True, db_column='VAR_72', blank=True)
var_73 = models.FloatField(null=True, db_column='VAR_73', blank=True)
var_74 = models.FloatField(null=True, db_column='VAR_74', blank=True)
var_75 = models.FloatField(null=True, db_column='VAR_75', blank=True)
var_76 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_76', blank=True)
var_77 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_77', blank=True)
var_78 = models.FloatField(null=True, db_column='VAR_78', blank=True)
var_79 = models.FloatField(null=True, db_column='VAR_79', blank=True)
var_80 = models.FloatField(null=True, db_column='VAR_80', blank=True)
var_81 = models.FloatField(null=True, db_column='VAR_81', blank=True)
var_82 = models.FloatField(null=True, db_column='VAR_82', blank=True)
var_83 = models.FloatField(null=True, db_column='VAR_83', blank=True)
var_84 = models.FloatField(null=True, db_column='VAR_84', blank=True)
var_85 = models.FloatField(null=True, db_column='VAR_85', blank=True)
var_86 = models.FloatField(null=True, db_column='VAR_86', blank=True)
var_87 = models.FloatField(null=True, db_column='VAR_87', blank=True)
var_88 = models.FloatField(null=True, db_column='VAR_88', blank=True)
var_89 = models.FloatField(null=True, db_column='VAR_89', blank=True)
var_90 = models.FloatField(null=True, db_column='VAR_90', blank=True)
var_91 = models.FloatField(null=True, db_column='VAR_91', blank=True)
var_92 = models.FloatField(null=True, db_column='VAR_92', blank=True)
var_93 = models.FloatField(null=True, db_column='VAR_93', blank=True)
var_94 = models.FloatField(null=True, db_column='VAR_94', blank=True)
var_95 = models.FloatField(null=True, db_column='VAR_95', blank=True)
var_96 = models.FloatField(null=True, db_column='VAR_96', blank=True)
var_97 = models.FloatField(null=True, db_column='VAR_97', blank=True)
var_98 = models.FloatField(null=True, db_column='VAR_98', blank=True)
var_99 = models.FloatField(null=True, db_column='VAR_99', blank=True)
var_100 = models.FloatField(null=True, db_column='VAR_100', blank=True)
var_101 = models.FloatField(null=True, db_column='VAR_101', blank=True)
var_102 = models.FloatField(null=True, db_column='VAR_102', blank=True)
var_103 = models.FloatField(null=True, db_column='VAR_103', blank=True)
var_104 = models.FloatField(null=True, db_column='VAR_104', blank=True)
var_105 = models.FloatField(null=True, db_column='VAR_105', blank=True)
var_106 = models.FloatField(null=True, db_column='VAR_106', blank=True)
var_107 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_107', blank=True)
var_108 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_108', blank=True)
var_109 = models.FloatField(null=True, db_column='VAR_109', blank=True)
var_110 = models.FloatField(null=True, db_column='VAR_110', blank=True)
var_111 = models.FloatField(null=True, db_column='VAR_111', blank=True)
var_112 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_112', blank=True)
var_113 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_113', blank=True)
var_114 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_114', blank=True)
var_115 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_115', blank=True)
var_116 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_116', blank=True)
var_117 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_117', blank=True)
var_118 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_118', blank=True)
var_119 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_119', blank=True)
var_120 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_120', blank=True)
var_121 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_121', blank=True)
var_122 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_122', blank=True)
var_123 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_123', blank=True)
var_124 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_124', blank=True)
var_125 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_125', blank=True)
var_126 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_126', blank=True)
var_127 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_127', blank=True)
var_128 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_128', blank=True)
var_129 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_129', blank=True)
var_130 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_130', blank=True)
var_131 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_131', blank=True)
var_132 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_132', blank=True)
var_133 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_133', blank=True)
var_134 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_134', blank=True)
var_135 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_135', blank=True)
var_136 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_136', blank=True)
var_137 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_137', blank=True)
var_138 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_138', blank=True)
var_139 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_139', blank=True)
var_140 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_140', blank=True)
var_141 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_141', blank=True)
var_142 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_142', blank=True)
var_143 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_143', blank=True)
var_144 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_144', blank=True)
var_145 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_145', blank=True)
var_146 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_146', blank=True)
var_147 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_147', blank=True)
var_148 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_148', blank=True)
var_149 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_149', blank=True)
var_150 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_150', blank=True)
var_151 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_151', blank=True)
var_152 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_152', blank=True)
var_153 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_153', blank=True)
var_154 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_154', blank=True)
var_155 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_155', blank=True)
var_156 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_156', blank=True)
var_157 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_157', blank=True)
var_158 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_158', blank=True)
var_159 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_159', blank=True)
var_160 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_160', blank=True)
var_161 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_161', blank=True)
var_162 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_162', blank=True)
var_163 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_163', blank=True)
var_164 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_164', blank=True)
var_165 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_165', blank=True)
var_166 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_166', blank=True)
var_167 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_167', blank=True)
var_168 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_168', blank=True)
var_169 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_169', blank=True)
var_170 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_170', blank=True)
var_171 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_171', blank=True)
var_172 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_172', blank=True)
var_173 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_173', blank=True)
var_174 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_174', blank=True)
var_175 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_175', blank=True)
var_176 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_176', blank=True)
var_177 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_177', blank=True)
var_178 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_178', blank=True)
var_179 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_179', blank=True)
var_180 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_180', blank=True)
var_181 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_181', blank=True)
var_182 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_182', blank=True)
var_183 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_183', blank=True)
var_184 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_184', blank=True)
var_185 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_185', blank=True)
var_186 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_186', blank=True)
var_187 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_187', blank=True)
var_188 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_188', blank=True)
var_189 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_189', blank=True)
var_190 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_190', blank=True)
var_191 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_191', blank=True)
var_192 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_192', blank=True)
var_193 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_193', blank=True)
var_194 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_194', blank=True)
var_195 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_195', blank=True)
var_196 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_196', blank=True)
var_197 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_197', blank=True)
var_198 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_198', blank=True)
var_199 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_199', blank=True)
var_200 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_200', blank=True)
var_201 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_201', blank=True)
var_202 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_202', blank=True)
var_203 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_203', blank=True)
var_204 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_204', blank=True)
var_205 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_205', blank=True)
var_206 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_206', blank=True)
var_207 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_207', blank=True)
var_208 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_208', blank=True)
var_209 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_209', blank=True)
var_210 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_210', blank=True)
var_211 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_211', blank=True)
var_212 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_212', blank=True)
var_213 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_213', blank=True)
var_214 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_214', blank=True)
var_215 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_215', blank=True)
var_216 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_216', blank=True)
var_217 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_217', blank=True)
var_218 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_218', blank=True)
var_219 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_219', blank=True)
var_220 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_220', blank=True)
var_221 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_221', blank=True)
var_222 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_222', blank=True)
var_223 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_223', blank=True)
var_224 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_224', blank=True)
var_225 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_225', blank=True)
var_226 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_226', blank=True)
var_227 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_227', blank=True)
var_228 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_228', blank=True)
var_229 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_229', blank=True)
var_230 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_230', blank=True)
var_231 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_231', blank=True)
var_232 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_232', blank=True)
var_233 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_233', blank=True)
var_234 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_234', blank=True)
var_235 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_235', blank=True)
var_236 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_236', blank=True)
var_237 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_237', blank=True)
var_238 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_238', blank=True)
var_239 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_239', blank=True)
var_240 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_240', blank=True)
var_241 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_241', blank=True)
var_242 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_242', blank=True)
var_243 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_243', blank=True)
var_244 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_244', blank=True)
var_245 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_245', blank=True)
var_246 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_246', blank=True)
var_247 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_247', blank=True)
var_248 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_248', blank=True)
var_249 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_249', blank=True)
var_250 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_250', blank=True)
var_251 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_251', blank=True)
var_252 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_252', blank=True)
var_253 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_253', blank=True)
var_254 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_254', blank=True)
var_255 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_255', blank=True)
var_256 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_256', blank=True)
var_257 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_257', blank=True)
var_258 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_258', blank=True)
var_259 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_259', blank=True)
var_260 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_260', blank=True)
var_261 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_261', blank=True)
var_262 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_262', blank=True)
var_263 = models.FloatField(null=True, db_column='VAR_263', blank=True)
class Meta:
db_table = u'pool_collection_data_1'
class PoolCollections(models.Model):
collection_name = models.CharField(db_column='COLLECTION_NAME', primary_key=True, max_length=255)
data_table_name = models.CharField(max_length=1200, db_column='DATA_TABLE_NAME', blank=True)
links_table_name = models.CharField(max_length=1200, db_column='LINKS_TABLE_NAME', blank=True)
records_written = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='RECORDS_WRITTEN', blank=True)
records_deleted = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='RECORDS_DELETED', blank=True)
child_collection_name = models.CharField(max_length=1200, db_column='CHILD_COLLECTION_NAME', blank=True)
foreign_key_name = models.CharField(max_length=1200, db_column='FOREIGN_KEY_NAME', blank=True)
class Meta:
db_table = u'pool_collections'
class PoolCollectionsDesc(models.Model):
collection_name = models.CharField(max_length=255, primary_key=True, db_column='COLLECTION_NAME')
variable_name = models.CharField(max_length=1200, db_column='VARIABLE_NAME', blank=True)
variable_type = models.CharField(max_length=1200, db_column='VARIABLE_TYPE', blank=True)
variable_maximum_size = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VARIABLE_MAXIMUM_SIZE', blank=True)
variable_size_is_fixed = models.CharField(max_length=15, db_column='VARIABLE_SIZE_IS_FIXED', blank=True)
variable_position = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VARIABLE_POSITION', blank=True)
variable_annotation = models.CharField(max_length=12000, db_column='VARIABLE_ANNOTATION', blank=True)
class Meta:
db_table = u'pool_collections_desc'
class ProdsysComm(models.Model):
comm_task = models.BigIntegerField(primary_key=True, db_column='COMM_TASK')
comm_meta = models.BigIntegerField(null=True, db_column='COMM_META', blank=True)
comm_owner = models.CharField(max_length=48, db_column='COMM_OWNER', blank=True)
comm_cmd = models.CharField(max_length=768, db_column='COMM_CMD', blank=True)
comm_ts = models.BigIntegerField(null=True, db_column='COMM_TS', blank=True)
class Meta:
db_table = u'prodsys_comm'
class Productiondatasets(models.Model):
name = models.CharField(max_length=255, primary_key=True, db_column='NAME')
version = models.IntegerField(null=True, db_column='VERSION', blank=True)
vuid = models.CharField(max_length=120, db_column='VUID')
files = models.IntegerField(null=True, db_column='FILES', blank=True)
gb = models.IntegerField(null=True, db_column='GB', blank=True)
events = models.IntegerField(null=True, db_column='EVENTS', blank=True)
site = models.CharField(max_length=30, db_column='SITE', blank=True)
sw_release = models.CharField(max_length=60, db_column='SW_RELEASE', blank=True)
geometry = models.CharField(max_length=60, db_column='GEOMETRY', blank=True)
jobid = models.IntegerField(null=True, db_column='JOBID', blank=True)
pandaid = models.IntegerField(null=True, db_column='PANDAID', blank=True)
prodtime = models.DateTimeField(null=True, db_column='PRODTIME', blank=True)
timestamp = models.IntegerField(null=True, db_column='TIMESTAMP', blank=True)
class Meta:
db_table = u'productiondatasets'
class Proxykey(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
dn = models.CharField(max_length=300, db_column='DN')
credname = models.CharField(max_length=120, db_column='CREDNAME')
created = models.DateTimeField(db_column='CREATED')
expires = models.DateTimeField(db_column='EXPIRES')
origin = models.CharField(max_length=240, db_column='ORIGIN')
myproxy = models.CharField(max_length=240, db_column='MYPROXY')
class Meta:
db_table = u'proxykey'
class Redirect(models.Model):
service = models.CharField(db_column='SERVICE', max_length=30)
type = models.CharField(db_column='TYPE', max_length=30)
site = models.CharField(db_column='SITE', max_length=30)
description = models.CharField(db_column='DESCRIPTION', max_length=120)
url = models.CharField(db_column='URL', primary_key=True, max_length=250)
testurl = models.CharField(db_column='TESTURL', max_length=250, blank=True)
response = models.CharField(db_column='RESPONSE', max_length=30)
aliveresponse = models.CharField(db_column='ALIVERESPONSE', max_length=30)
responsetime = models.IntegerField(db_column='RESPONSETIME', blank=True, null=True)
rank = models.IntegerField(db_column='RANK', blank=True, null=True)
performance = models.IntegerField(db_column='PERFORMANCE', blank=True, null=True)
status = models.CharField(db_column='STATUS', max_length=30)
log = models.CharField(db_column='LOG', max_length=250, blank=True)
statustime = models.DateTimeField(db_column='STATUSTIME')
usetime = models.DateTimeField(db_column='USETIME')
class Meta:
db_table = u'redirect'
class Savedpages(models.Model):
name = models.CharField(max_length=90, db_column='NAME', primary_key=True)
flag = models.CharField(max_length=60, db_column='FLAG', primary_key=True)
hours = models.IntegerField(db_column='HOURS', primary_key=True)
html = models.TextField(db_column='HTML')
lastmod = models.DateTimeField(null=True, db_column='LASTMOD', blank=True)
interval = models.IntegerField(null=True, db_column='INTERVAL', blank=True)
class Meta:
db_table = u'savedpages'
unique_together = ('name', 'flag', 'hours')
class Servicelist(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
name = models.CharField(max_length=180, db_column='NAME')
host = models.CharField(max_length=300, db_column='HOST', blank=True)
pid = models.IntegerField(null=True, db_column='PID', blank=True)
userid = models.CharField(max_length=120, db_column='USERID', blank=True)
type = models.CharField(max_length=90, db_column='TYPE', blank=True)
grp = models.CharField(max_length=60, db_column='GRP', blank=True)
description = models.CharField(max_length=600, db_column='DESCRIPTION', blank=True)
url = models.CharField(max_length=600, db_column='URL', blank=True)
testurl = models.CharField(max_length=600, db_column='TESTURL', blank=True)
response = models.CharField(max_length=600, db_column='RESPONSE', blank=True)
tresponse = models.IntegerField(null=True, db_column='TRESPONSE', blank=True)
tstart = models.DateTimeField(db_column='TSTART')
tstop = models.DateTimeField(db_column='TSTOP')
tcheck = models.DateTimeField(db_column='TCHECK')
cyclesec = models.IntegerField(null=True, db_column='CYCLESEC', blank=True)
status = models.CharField(max_length=60, db_column='STATUS')
lastmod = models.DateTimeField(db_column='LASTMOD')
config = models.CharField(max_length=600, db_column='CONFIG', blank=True)
message = models.CharField(max_length=12000, db_column='MESSAGE', blank=True)
restartcmd = models.CharField(max_length=12000, db_column='RESTARTCMD', blank=True)
doaction = models.CharField(max_length=12000, db_column='DOACTION', blank=True)
class Meta:
db_table = u'servicelist'
class Siteaccess(models.Model):
id = models.BigIntegerField(primary_key=True, db_column='ID')
dn = models.CharField(max_length=300, db_column='DN', blank=True)
pandasite = models.CharField(max_length=300, db_column='PANDASITE', blank=True)
poffset = models.BigIntegerField(db_column='POFFSET')
rights = models.CharField(max_length=90, db_column='RIGHTS', blank=True)
status = models.CharField(max_length=60, db_column='STATUS', blank=True)
workinggroups = models.CharField(max_length=300, db_column='WORKINGGROUPS', blank=True)
created = models.DateTimeField(null=True, db_column='CREATED', blank=True)
class Meta:
db_table = u'siteaccess'
class Sitedata(models.Model):
site = models.CharField(max_length=90, db_column='SITE', primary_key=True)
flag = models.CharField(max_length=60, db_column='FLAG', primary_key=True)
hours = models.IntegerField(db_column='HOURS', primary_key=True)
nwn = models.IntegerField(null=True, db_column='NWN', blank=True)
memmin = models.IntegerField(null=True, db_column='MEMMIN', blank=True)
memmax = models.IntegerField(null=True, db_column='MEMMAX', blank=True)
si2000min = models.IntegerField(null=True, db_column='SI2000MIN', blank=True)
si2000max = models.IntegerField(null=True, db_column='SI2000MAX', blank=True)
os = models.CharField(max_length=90, db_column='OS', blank=True)
space = models.CharField(max_length=90, db_column='SPACE', blank=True)
minjobs = models.IntegerField(null=True, db_column='MINJOBS', blank=True)
maxjobs = models.IntegerField(null=True, db_column='MAXJOBS', blank=True)
laststart = models.DateTimeField(null=True, db_column='LASTSTART', blank=True)
lastend = models.DateTimeField(null=True, db_column='LASTEND', blank=True)
lastfail = models.DateTimeField(null=True, db_column='LASTFAIL', blank=True)
lastpilot = models.DateTimeField(null=True, db_column='LASTPILOT', blank=True)
lastpid = models.IntegerField(null=True, db_column='LASTPID', blank=True)
nstart = models.IntegerField(db_column='NSTART')
finished = models.IntegerField(db_column='FINISHED')
failed = models.IntegerField(db_column='FAILED')
defined = models.IntegerField(db_column='DEFINED')
assigned = models.IntegerField(db_column='ASSIGNED')
waiting = models.IntegerField(db_column='WAITING')
activated = models.IntegerField(db_column='ACTIVATED')
holding = models.IntegerField(db_column='HOLDING')
running = models.IntegerField(db_column='RUNNING')
transferring = models.IntegerField(db_column='TRANSFERRING')
getjob = models.IntegerField(db_column='GETJOB')
updatejob = models.IntegerField(db_column='UPDATEJOB')
lastmod = models.DateTimeField(db_column='LASTMOD')
ncpu = models.IntegerField(null=True, db_column='NCPU', blank=True)
nslot = models.IntegerField(null=True, db_column='NSLOT', blank=True)
class Meta:
db_table = u'sitedata'
unique_together = ('site', 'flag', 'hours')
class Siteddm(models.Model):
name = models.CharField(max_length=180, primary_key=True, db_column='NAME')
incmd = models.CharField(max_length=180, db_column='INCMD')
inpath = models.CharField(max_length=600, db_column='INPATH', blank=True)
inopts = models.CharField(max_length=180, db_column='INOPTS', blank=True)
outcmd = models.CharField(max_length=180, db_column='OUTCMD')
outopts = models.CharField(max_length=180, db_column='OUTOPTS', blank=True)
outpath = models.CharField(max_length=600, db_column='OUTPATH')
class Meta:
db_table = u'siteddm'
class Sitehistory(models.Model):
site = models.CharField(max_length=90, db_column='SITE', primary_key=True)
flag = models.CharField(max_length=60, db_column='FLAG', primary_key=True)
time = models.DateTimeField(db_column='TIME', primary_key=True)
hours = models.IntegerField(db_column='HOURS', primary_key=True)
nwn = models.IntegerField(null=True, db_column='NWN', blank=True)
memmin = models.IntegerField(null=True, db_column='MEMMIN', blank=True)
memmax = models.IntegerField(null=True, db_column='MEMMAX', blank=True)
si2000min = models.IntegerField(null=True, db_column='SI2000MIN', blank=True)
si2000max = models.IntegerField(null=True, db_column='SI2000MAX', blank=True)
si2000a = models.IntegerField(null=True, db_column='SI2000A', blank=True)
si2000p = models.IntegerField(null=True, db_column='SI2000P', blank=True)
walla = models.IntegerField(null=True, db_column='WALLA', blank=True)
wallp = models.IntegerField(null=True, db_column='WALLP', blank=True)
os = models.CharField(max_length=90, db_column='OS')
space = models.CharField(max_length=90, db_column='SPACE')
minjobs = models.IntegerField(null=True, db_column='MINJOBS', blank=True)
maxjobs = models.IntegerField(null=True, db_column='MAXJOBS', blank=True)
laststart = models.DateTimeField(null=True, db_column='LASTSTART', blank=True)
lastend = models.DateTimeField(null=True, db_column='LASTEND', blank=True)
lastfail = models.DateTimeField(null=True, db_column='LASTFAIL', blank=True)
lastpilot = models.DateTimeField(null=True, db_column='LASTPILOT', blank=True)
lastpid = models.IntegerField(null=True, db_column='LASTPID', blank=True)
nstart = models.IntegerField(db_column='NSTART')
finished = models.IntegerField(db_column='FINISHED')
failed = models.IntegerField(db_column='FAILED')
defined = models.IntegerField(db_column='DEFINED')
assigned = models.IntegerField(db_column='ASSIGNED')
waiting = models.IntegerField(db_column='WAITING')
activated = models.IntegerField(db_column='ACTIVATED')
running = models.IntegerField(db_column='RUNNING')
getjob = models.IntegerField(db_column='GETJOB')
updatejob = models.IntegerField(db_column='UPDATEJOB')
subtot = models.IntegerField(db_column='SUBTOT')
subdef = models.IntegerField(db_column='SUBDEF')
subdone = models.IntegerField(db_column='SUBDONE')
filemods = models.IntegerField(db_column='FILEMODS')
ncpu = models.IntegerField(null=True, db_column='NCPU', blank=True)
nslot = models.IntegerField(null=True, db_column='NSLOT', blank=True)
class Meta:
db_table = u'sitehistory'
unique_together = ('site', 'time', 'flag', 'hours')
class Sitesinfo(models.Model):
name = models.CharField(db_column='NAME', primary_key=True, max_length=120)
nick = models.CharField(db_column='NICK', max_length=20)
contact = models.CharField(db_column='CONTACT', max_length=30, blank=True)
email = models.CharField(db_column='EMAIL', max_length=30, blank=True)
status = models.CharField(db_column='STATUS', max_length=12, blank=True)
lrc = models.CharField(db_column='LRC', max_length=120, blank=True)
gridcat = models.IntegerField(db_column='GRIDCAT', blank=True, null=True)
monalisa = models.CharField(db_column='MONALISA', max_length=20, blank=True)
computingsite = models.CharField(db_column='COMPUTINGSITE', max_length=20, blank=True)
mainsite = models.CharField(db_column='MAINSITE', max_length=20, blank=True)
home = models.CharField(db_column='HOME', max_length=120, blank=True)
ganglia = models.CharField(db_column='GANGLIA', max_length=120, blank=True)
goc = models.CharField(db_column='GOC', max_length=20, blank=True)
gocconfig = models.IntegerField(db_column='GOCCONFIG', blank=True, null=True)
prodsys = models.CharField(db_column='PRODSYS', max_length=20, blank=True)
dq2svc = models.CharField(db_column='DQ2SVC', max_length=20, blank=True)
usage = models.CharField(db_column='USAGE', max_length=40, blank=True)
updtime = models.IntegerField(db_column='UPDTIME', blank=True, null=True)
ndatasets = models.IntegerField(db_column='NDATASETS', blank=True, null=True)
nfiles = models.IntegerField(db_column='NFILES', blank=True, null=True)
timestamp = models.IntegerField(db_column='TIMESTAMP', blank=True, null=True)
class Meta:
db_table = u'sitesinfo'
class Sitestats(models.Model):
cloud = models.CharField(max_length=30, primary_key=True, db_column='CLOUD')
site = models.CharField(max_length=180, db_column='SITE', blank=True)
at_time = models.DateTimeField(null=True, db_column='AT_TIME', blank=True)
twidth = models.IntegerField(null=True, db_column='TWIDTH', blank=True)
tjob = models.IntegerField(null=True, db_column='TJOB', blank=True)
tgetjob = models.IntegerField(null=True, db_column='TGETJOB', blank=True)
tstagein = models.IntegerField(null=True, db_column='TSTAGEIN', blank=True)
trun = models.IntegerField(null=True, db_column='TRUN', blank=True)
tstageout = models.IntegerField(null=True, db_column='TSTAGEOUT', blank=True)
twait = models.IntegerField(null=True, db_column='TWAIT', blank=True)
nusers = models.IntegerField(null=True, db_column='NUSERS', blank=True)
nwn = models.IntegerField(null=True, db_column='NWN', blank=True)
njobs = models.IntegerField(null=True, db_column='NJOBS', blank=True)
nfinished = models.IntegerField(null=True, db_column='NFINISHED', blank=True)
nfailed = models.IntegerField(null=True, db_column='NFAILED', blank=True)
nfailapp = models.IntegerField(null=True, db_column='NFAILAPP', blank=True)
nfailsys = models.IntegerField(null=True, db_column='NFAILSYS', blank=True)
nfaildat = models.IntegerField(null=True, db_column='NFAILDAT', blank=True)
ntimeout = models.IntegerField(null=True, db_column='NTIMEOUT', blank=True)
efficiency = models.IntegerField(null=True, db_column='EFFICIENCY', blank=True)
siteutil = models.IntegerField(null=True, db_column='SITEUTIL', blank=True)
jobtype = models.CharField(max_length=90, db_column='JOBTYPE', blank=True)
proctype = models.CharField(max_length=270, db_column='PROCTYPE', blank=True)
username = models.CharField(max_length=270, db_column='USERNAME', blank=True)
ngetjob = models.IntegerField(null=True, db_column='NGETJOB', blank=True)
nupdatejob = models.IntegerField(null=True, db_column='NUPDATEJOB', blank=True)
release = models.CharField(max_length=270, db_column='RELEASE', blank=True)
nevents = models.BigIntegerField(null=True, db_column='NEVENTS', blank=True)
spectype = models.CharField(max_length=270, db_column='SPECTYPE', blank=True)
tsetup = models.IntegerField(null=True, db_column='TSETUP', blank=True)
class Meta:
db_table = u'sitestats'
class Submithosts(models.Model):
name = models.CharField(max_length=180, db_column='NAME')
nickname = models.CharField(max_length=60, db_column='NICKNAME')
host = models.CharField(max_length=180, primary_key=True, db_column='HOST')
system = models.CharField(max_length=180, db_column='SYSTEM')
rundir = models.CharField(max_length=600, db_column='RUNDIR')
runurl = models.CharField(max_length=600, db_column='RUNURL')
jdltxt = models.CharField(max_length=12000, db_column='JDLTXT', blank=True)
pilotqueue = models.CharField(max_length=60, db_column='PILOTQUEUE', blank=True)
outurl = models.CharField(max_length=600, db_column='OUTURL', blank=True)
class Meta:
db_table = u'submithosts'
class Sysconfig(models.Model):
name = models.CharField(max_length=180, db_column='NAME', primary_key=True)
system = models.CharField(max_length=60, db_column='SYSTEM', primary_key=True)
config = models.CharField(max_length=12000, db_column='CONFIG', blank=True)
class Meta:
db_table = u'sysconfig'
unique_together = ('name', 'system')
class TM4RegionsReplication(models.Model):
tier2 = models.CharField(max_length=150, primary_key=True, db_column='TIER2')
cloud = models.CharField(max_length=90, db_column='CLOUD')
percentage = models.FloatField(null=True, db_column='PERCENTAGE', blank=True)
tier1 = models.CharField(max_length=150, db_column='TIER1')
nsubs = models.IntegerField(null=True, db_column='NSUBS', blank=True)
subsoption = models.CharField(max_length=960, db_column='SUBSOPTION', blank=True)
status = models.CharField(max_length=36, db_column='STATUS', blank=True)
timestamp = models.IntegerField(null=True, db_column='TIMESTAMP', blank=True)
stream_pattern = models.CharField(max_length=96, db_column='STREAM_PATTERN', blank=True)
nreplicas = models.IntegerField(null=True, db_column='NREPLICAS', blank=True)
nsubs_aod = models.IntegerField(null=True, db_column='NSUBS_AOD', blank=True)
nsubs_dpd = models.IntegerField(null=True, db_column='NSUBS_DPD', blank=True)
upd_flag = models.CharField(max_length=12, db_column='UPD_FLAG', blank=True)
esd = models.IntegerField(null=True, db_column='ESD', blank=True)
esd_subsoption = models.CharField(max_length=960, db_column='ESD_SUBSOPTION', blank=True)
desd = models.IntegerField(null=True, db_column='DESD', blank=True)
desd_subsoption = models.CharField(max_length=960, db_column='DESD_SUBSOPTION', blank=True)
prim_flag = models.IntegerField(null=True, db_column='PRIM_FLAG', blank=True)
t2group = models.BigIntegerField(null=True, db_column='T2GROUP', blank=True)
class Meta:
db_table = u't_m4regions_replication'
class TTier2Groups(models.Model):
name = models.CharField(max_length=36, primary_key=True, db_column='NAME')
gid = models.BigIntegerField(null=True, db_column='GID', blank=True)
ntup_share = models.BigIntegerField(null=True, db_column='NTUP_SHARE', blank=True)
timestmap = models.BigIntegerField(null=True, db_column='TIMESTMAP', blank=True)
class Meta:
db_table = u't_tier2_groups'
class Tablepart4Copying(models.Model):
table_name = models.CharField(max_length=90, db_column='TABLE_NAME', primary_key=True)
partition_name = models.CharField(max_length=90, db_column='PARTITION_NAME', primary_key=True)
copied_to_arch = models.CharField(max_length=30, db_column='COPIED_TO_ARCH')
copying_done_on = models.DateTimeField(null=True, db_column='COPYING_DONE_ON', blank=True)
deleted_on = models.DateTimeField(null=True, db_column='DELETED_ON', blank=True)
data_verif_passed = models.CharField(max_length=9, db_column='DATA_VERIF_PASSED', blank=True)
data_verified_on = models.DateTimeField(null=True, db_column='DATA_VERIFIED_ON', blank=True)
class Meta:
db_table = u'tablepart4copying'
unique_together = ('table_name', 'partition_name')
class Taginfo(models.Model):
tag = models.CharField(max_length=90, primary_key=True, db_column='TAG')
description = models.CharField(max_length=300, db_column='DESCRIPTION')
nqueues = models.IntegerField(db_column='NQUEUES')
queues = models.CharField(max_length=12000, db_column='QUEUES', blank=True)
class Meta:
db_table = u'taginfo'
class Tags(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
name = models.CharField(max_length=60, db_column='NAME')
description = models.CharField(max_length=180, db_column='DESCRIPTION')
ugid = models.IntegerField(null=True, db_column='UGID', blank=True)
type = models.CharField(max_length=30, db_column='TYPE')
itemid = models.IntegerField(null=True, db_column='ITEMID', blank=True)
created = models.DateTimeField(db_column='CREATED')
class Meta:
db_table = u'tags'
class Transfercosts(models.Model):
sourcesite = models.CharField(db_column='SOURCESITE', max_length=256)
destsite = models.CharField(db_column='DESTSITE', max_length=256)
type = models.CharField(db_column='TYPE', max_length=256)
status = models.CharField(db_column='STATUS', max_length=64, blank=True)
last_update = models.DateTimeField(db_column='LAST_UPDATE', blank=True, null=True)
cost = models.BigIntegerField(db_column='COST')
max_cost = models.BigIntegerField(db_column='MAX_COST', blank=True, null=True)
min_cost = models.BigIntegerField(db_column='MIN_COST', blank=True, null=True)
class Meta:
db_table = u'transfercosts'
class TransfercostsHistory(models.Model):
sourcesite = models.CharField(db_column='SOURCESITE', primary_key=True, max_length=255)
destsite = models.CharField(max_length=768, db_column='DESTSITE')
type = models.CharField(max_length=768, db_column='TYPE', blank=True)
status = models.CharField(max_length=192, db_column='STATUS', blank=True)
last_update = models.DateTimeField(null=True, db_column='LAST_UPDATE', blank=True)
cost = models.BigIntegerField(db_column='COST')
max_cost = models.BigIntegerField(null=True, db_column='MAX_COST', blank=True)
min_cost = models.BigIntegerField(null=True, db_column='MIN_COST', blank=True)
class Meta:
db_table = u'transfercosts_history'
class TriggersDebug(models.Model):
when = models.DateTimeField(primary_key=True, db_column='WHEN')
what = models.CharField(max_length=300, db_column='WHAT', blank=True)
value = models.CharField(max_length=600, db_column='VALUE', blank=True)
class Meta:
db_table = u'triggers_debug'
class Usagereport(models.Model):
entry = models.IntegerField(primary_key=True, db_column='ENTRY')
flag = models.CharField(max_length=60, db_column='FLAG')
hours = models.IntegerField(null=True, db_column='HOURS', blank=True)
tstart = models.DateTimeField(null=True, db_column='TSTART', blank=True)
tend = models.DateTimeField(null=True, db_column='TEND', blank=True)
tinsert = models.DateTimeField(db_column='TINSERT')
site = models.CharField(max_length=90, db_column='SITE')
nwn = models.IntegerField(null=True, db_column='NWN', blank=True)
class Meta:
db_table = u'usagereport'
class Usercacheusage(models.Model):
username = models.CharField(max_length=384, db_column='USERNAME')
filename = models.CharField(db_column='FILENAME', max_length=255, primary_key=True)
hostname = models.CharField(max_length=192, db_column='HOSTNAME', primary_key=True)
creationtime = models.DateTimeField(db_column='CREATIONTIME', primary_key=True)
modificationtime = models.DateTimeField(null=True, db_column='MODIFICATIONTIME', blank=True)
filesize = models.BigIntegerField(null=True, db_column='FILESIZE', blank=True)
checksum = models.CharField(max_length=108, db_column='CHECKSUM', blank=True)
aliasname = models.CharField(max_length=768, db_column='ALIASNAME', blank=True)
class Meta:
db_table = u'usercacheusage'
unique_together = ('filename', 'hostname', 'creationtime')
class Users(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
name = models.CharField(max_length=180, db_column='NAME')
dn = models.CharField(max_length=450, db_column='DN', blank=True)
email = models.CharField(max_length=180, db_column='EMAIL', blank=True)
url = models.CharField(max_length=300, db_column='URL', blank=True)
location = models.CharField(max_length=180, db_column='LOCATION', blank=True)
classa = models.CharField(max_length=90, db_column='CLASSA', blank=True)
classp = models.CharField(max_length=90, db_column='CLASSP', blank=True)
classxp = models.CharField(max_length=90, db_column='CLASSXP', blank=True)
sitepref = models.CharField(max_length=180, db_column='SITEPREF', blank=True)
gridpref = models.CharField(max_length=60, db_column='GRIDPREF', blank=True)
queuepref = models.CharField(max_length=180, db_column='QUEUEPREF', blank=True)
scriptcache = models.CharField(max_length=300, db_column='SCRIPTCACHE', blank=True)
types = models.CharField(max_length=180, db_column='TYPES', blank=True)
sites = models.CharField(max_length=750, db_column='SITES', blank=True)
njobsa = models.IntegerField(null=True, db_column='NJOBSA', blank=True)
njobsp = models.IntegerField(null=True, db_column='NJOBSP', blank=True)
njobs1 = models.IntegerField(null=True, db_column='NJOBS1', blank=True)
njobs7 = models.IntegerField(null=True, db_column='NJOBS7', blank=True)
njobs30 = models.IntegerField(null=True, db_column='NJOBS30', blank=True)
cpua1 = models.BigIntegerField(null=True, db_column='CPUA1', blank=True)
cpua7 = models.BigIntegerField(null=True, db_column='CPUA7', blank=True)
cpua30 = models.BigIntegerField(null=True, db_column='CPUA30', blank=True)
cpup1 = models.BigIntegerField(null=True, db_column='CPUP1', blank=True)
cpup7 = models.BigIntegerField(null=True, db_column='CPUP7', blank=True)
cpup30 = models.BigIntegerField(null=True, db_column='CPUP30', blank=True)
cpuxp1 = models.BigIntegerField(null=True, db_column='CPUXP1', blank=True)
cpuxp7 = models.BigIntegerField(null=True, db_column='CPUXP7', blank=True)
cpuxp30 = models.BigIntegerField(null=True, db_column='CPUXP30', blank=True)
quotaa1 = models.BigIntegerField(null=True, db_column='QUOTAA1', blank=True)
quotaa7 = models.BigIntegerField(null=True, db_column='QUOTAA7', blank=True)
quotaa30 = models.BigIntegerField(null=True, db_column='QUOTAA30', blank=True)
quotap1 = models.BigIntegerField(null=True, db_column='QUOTAP1', blank=True)
quotap7 = models.BigIntegerField(null=True, db_column='QUOTAP7', blank=True)
quotap30 = models.BigIntegerField(null=True, db_column='QUOTAP30', blank=True)
quotaxp1 = models.BigIntegerField(null=True, db_column='QUOTAXP1', blank=True)
quotaxp7 = models.BigIntegerField(null=True, db_column='QUOTAXP7', blank=True)
quotaxp30 = models.BigIntegerField(null=True, db_column='QUOTAXP30', blank=True)
space1 = models.IntegerField(null=True, db_column='SPACE1', blank=True)
space7 = models.IntegerField(null=True, db_column='SPACE7', blank=True)
space30 = models.IntegerField(null=True, db_column='SPACE30', blank=True)
lastmod = models.DateTimeField(db_column='LASTMOD')
firstjob = models.DateTimeField(db_column='FIRSTJOB')
latestjob = models.DateTimeField(db_column='LATESTJOB')
pagecache = models.TextField(db_column='PAGECACHE', blank=True)
cachetime = models.DateTimeField(db_column='CACHETIME')
ncurrent = models.IntegerField(db_column='NCURRENT')
jobid = models.IntegerField(db_column='JOBID')
status = models.CharField(max_length=60, db_column='STATUS', blank=True)
vo = models.CharField(max_length=60, db_column='VO', blank=True)
class Meta:
db_table = u'users'
##FIXME: reenable this after proper dbproxies are introduced!### db_table = u'"ATLAS_PANDAMETA"."USERS"'
allColumns = COLUMNS['ActiveUsers-all']
primaryColumns = [ 'name']
secondaryColumns = []
orderColumns = ORDER_COLUMNS['ActiveUsers-all']
columnTitles = COL_TITLES['ActiveUsers-all']
filterFields = FILTERS['ActiveUsers-all']
def __str__(self):
return 'User: ' + str(self.name) + '[' + str(self.status) + ']'
class Userstats(models.Model):
name = models.CharField(max_length=180, db_column='NAME', primary_key=True)
label = models.CharField(max_length=60, db_column='LABEL', blank=True)
yr = models.IntegerField(db_column='YR', primary_key=True)
mo = models.IntegerField(db_column='MO', primary_key=True)
jobs = models.BigIntegerField(null=True, db_column='JOBS', blank=True)
idlo = models.BigIntegerField(null=True, db_column='IDLO', blank=True)
idhi = models.BigIntegerField(null=True, db_column='IDHI', blank=True)
info = models.CharField(max_length=300, db_column='INFO', blank=True)
class Meta:
db_table = u'userstats'
unique_together = ('name', 'yr', 'mo')
class Usersubs(models.Model):
datasetname = models.CharField(max_length=255, db_column='DATASETNAME', primary_key=True)
site = models.CharField(max_length=192, db_column='SITE', primary_key=True)
creationdate = models.DateTimeField(null=True, db_column='CREATIONDATE', blank=True)
modificationdate = models.DateTimeField(null=True, db_column='MODIFICATIONDATE', blank=True)
nused = models.IntegerField(null=True, db_column='NUSED', blank=True)
state = models.CharField(max_length=90, db_column='STATE', blank=True)
class Meta:
db_table = u'usersubs'
unique_together = ('datasetname', 'site')
class VoToSite(models.Model):
site_name = models.CharField(max_length=96, db_column='SITE_NAME', primary_key=True)
queue = models.CharField(max_length=192, db_column='QUEUE', primary_key=True)
vo_name = models.CharField(max_length=96, db_column='VO_NAME', primary_key=True)
class Meta:
db_table = u'vo_to_site'
unique_together = ('site_name', 'queue', 'vo_name')
class Vorspassfail(models.Model):
site_name = models.CharField(max_length=96, primary_key=True, db_column='SITE_NAME')
passfail = models.CharField(max_length=12, db_column='PASSFAIL')
last_checked = models.DateTimeField(null=True, db_column='LAST_CHECKED', blank=True)
class Meta:
db_table = u'vorspassfail'
class Wndata(models.Model):
site = models.CharField(max_length=90, db_column='SITE', primary_key=True)
wn = models.CharField(max_length=150, db_column='WN', primary_key=True)
flag = models.CharField(max_length=60, db_column='FLAG', primary_key=True)
hours = models.IntegerField(db_column='HOURS', primary_key=True)
mem = models.IntegerField(null=True, db_column='MEM', blank=True)
si2000 = models.IntegerField(null=True, db_column='SI2000', blank=True)
os = models.CharField(max_length=90, db_column='OS', blank=True)
space = models.CharField(max_length=90, db_column='SPACE', blank=True)
maxjobs = models.IntegerField(null=True, db_column='MAXJOBS', blank=True)
laststart = models.DateTimeField(null=True, db_column='LASTSTART', blank=True)
lastend = models.DateTimeField(null=True, db_column='LASTEND', blank=True)
lastfail = models.DateTimeField(null=True, db_column='LASTFAIL', blank=True)
lastpilot = models.DateTimeField(null=True, db_column='LASTPILOT', blank=True)
lastpid = models.IntegerField(null=True, db_column='LASTPID', blank=True)
nstart = models.IntegerField(db_column='NSTART')
finished = models.IntegerField(db_column='FINISHED')
failed = models.IntegerField(db_column='FAILED')
holding = models.IntegerField(db_column='HOLDING')
running = models.IntegerField(db_column='RUNNING')
transferring = models.IntegerField(db_column='TRANSFERRING')
getjob = models.IntegerField(db_column='GETJOB')
updatejob = models.IntegerField(db_column='UPDATEJOB')
lastmod = models.DateTimeField(db_column='LASTMOD')
ncpu = models.IntegerField(null=True, db_column='NCPU', blank=True)
ncpucurrent = models.IntegerField(null=True, db_column='NCPUCURRENT', blank=True)
nslot = models.IntegerField(null=True, db_column='NSLOT', blank=True)
nslotcurrent = models.IntegerField(null=True, db_column='NSLOTCURRENT', blank=True)
class Meta:
db_table = u'wndata'
unique_together = ('site', 'wn', 'flag', 'hours')
| apache-2.0 |
rrohan/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 114 | 25281 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong format,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
clf.fit(X, y, check_input=False)
coef_false = clf.coef_
clf.fit(X, y, check_input=True)
coef_true = clf.coef_
assert_raises(AssertionError, assert_array_almost_equal,
coef_true, coef_false)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
| bsd-3-clause |
samzhang111/scikit-learn | setup.py | 76 | 9370 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
UltronAI/Deep-Learning | Pattern-Recognition/hw2-Feature-Selection/skfeature/function/similarity_based/SPEC.py | 1 | 3628 | import numpy.matlib
import numpy as np
from scipy.sparse import *
from sklearn.metrics.pairwise import rbf_kernel
from numpy import linalg as LA
def spec(X, **kwargs):
"""
This function implements the SPEC feature selection
Input
-----
X: {numpy array}, shape (n_samples, n_features)
input data
kwargs: {dictionary}
style: {int}
style == -1, the first feature ranking function, use all eigenvalues
style == 0, the second feature ranking function, use all except the 1st eigenvalue
style >= 2, the third feature ranking function, use the first k except 1st eigenvalue
W: {sparse matrix}, shape (n_samples, n_samples}
input affinity matrix
Output
------
w_fea: {numpy array}, shape (n_features,)
SPEC feature score for each feature
Reference
---------
Zhao, Zheng and Liu, Huan. "Spectral Feature Selection for Supervised and Unsupervised Learning." ICML 2007.
"""
if 'style' not in kwargs:
kwargs['style'] = 0
if 'W' not in kwargs:
kwargs['W'] = rbf_kernel(X, gamma=1)
style = kwargs['style']
W = kwargs['W']
if type(W) is numpy.ndarray:
W = csc_matrix(W)
n_samples, n_features = X.shape
# build the degree matrix
X_sum = np.array(W.sum(axis=1))
D = np.zeros((n_samples, n_samples))
for i in range(n_samples):
D[i, i] = X_sum[i]
# build the laplacian matrix
L = D - W
d1 = np.power(np.array(W.sum(axis=1)), -0.5)
d1[np.isinf(d1)] = 0
d2 = np.power(np.array(W.sum(axis=1)), 0.5)
v = np.dot(np.diag(d2[:, 0]), np.ones(n_samples))
v = v/LA.norm(v)
# build the normalized laplacian matrix
L_hat = (np.matlib.repmat(d1, 1, n_samples)) * np.array(L) * np.matlib.repmat(np.transpose(d1), n_samples, 1)
# calculate and construct spectral information
s, U = np.linalg.eigh(L_hat)
s = np.flipud(s)
U = np.fliplr(U)
# begin to select features
w_fea = np.ones(n_features)*1000
for i in range(n_features):
f = X[:, i]
F_hat = np.dot(np.diag(d2[:, 0]), f)
l = LA.norm(F_hat)
if l < 100*np.spacing(1):
w_fea[i] = 1000
continue
else:
F_hat = F_hat/l
a = np.array(np.dot(np.transpose(F_hat), U))
a = np.multiply(a, a)
a = np.transpose(a)
# use f'Lf formulation
if style == -1:
w_fea[i] = np.sum(a * s)
# using all eigenvalues except the 1st
elif style == 0:
a1 = a[0:n_samples-1]
w_fea[i] = np.sum(a1 * s[0:n_samples-1])/(1-np.power(np.dot(np.transpose(F_hat), v), 2))
# use first k except the 1st
else:
a1 = a[n_samples-style:n_samples-1]
w_fea[i] = np.sum(a1 * (2-s[n_samples-style: n_samples-1]))
if style != -1 and style != 0:
w_fea[w_fea == 1000] = -1000
return w_fea
def feature_ranking(score, **kwargs):
if 'style' not in kwargs:
kwargs['style'] = 0
style = kwargs['style']
# if style = -1 or 0, ranking features in descending order, the higher the score, the more important the feature is
if style == -1 or style == 0:
idx = np.argsort(score, 0)
return idx[::-1]
# if style != -1 and 0, ranking features in ascending order, the lower the score, the more important the feature is
elif style != -1 and style != 0:
idx = np.argsort(score, 0)
return idx | mit |
adexin/Python-Machine-Learning-Samples | Logistic_regression/Ecommerce_logpredict/logistic_predict.py | 1 | 1439 | import numpy as np
import pandas as pd
def get_data():
df = pd.read_csv('ecommerce_data.csv')
data = df.as_matrix()
# Input data
X = data[:, :-1]
# Output data
Y = data[:, -1]
# Normalize data (X - mu)/sigma
X1data = X[:, 1]
X1mean = X[:, 1].mean() # mu
X1stdev = X[:, 1].std() # sigma
X[:, 1] = (X1data - X1mean) / X1stdev
X2data = X[:, 2]
X2mean = X[:, 2].mean() # mu
X2stdev = X[:, 2].std() # sigma
X[:, 2] = (X2data - X2mean) / X2stdev
N, D = X.shape
X2 = np.zeros((N, D+3))
# copy all data and exclude time column, prepare for one-hot encoding
X2[:, 0:(D - 1)] = X[:, 0:(D - 1)]
for n in range(N):
# data in column is 0, 1, 2, 3
# 0 - [1,0,0,0]
# 1 - [0,1,0,0]
# 2 - [0,0,1,0]
# 3 - [0,0,0,1]
t = int(X[n, (D - 1)]) # Get value from column
X2[n, t+D-1] = 1 # Set value into one-hot encoding column
return X2, Y
def get_binary_data():
X, Y = get_data()
X2 = X[Y <= 1]
Y2 = Y[Y <= 1]
return X2, Y2
X, Y = get_binary_data()
D = X.shape[1]
W = np.random.randn(D)
b = 0
def sigmoid(a):
return 1 / (1 - np.exp(-a))
def forward(X, W, b):
return sigmoid(X.dot(W) + b)
P_Y_given_X = forward(X, W, b)
predictions = np.round(P_Y_given_X)
def classification_rate(Y, P):
return np.mean(Y == P)
print("Score: ", classification_rate(Y, predictions))
| mit |
ldirer/scikit-learn | sklearn/decomposition/__init__.py | 66 | 1433 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, non_negative_factorization
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
KasperPRasmussen/bokeh | examples/howto/interactive_bubble/gapminder.py | 6 | 4182 | import pandas as pd
from jinja2 import Template
from bokeh.util.browser import view
from bokeh.models import (
ColumnDataSource, Plot, Circle, Range1d,
LinearAxis, HoverTool, Text,
SingleIntervalTicker, CustomJS, Slider
)
from bokeh.palettes import Spectral6
from bokeh.plotting import vplot
from bokeh.resources import JSResources
from bokeh.embed import file_html
from data import process_data
fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions = process_data()
sources = {}
region_color = regions_df['region_color']
region_color.name = 'region_color'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
new_df = pd.concat([fertility, life, population, region_color], axis=1)
sources['_' + str(year)] = ColumnDataSource(new_df)
dictionary_of_sources = dict(zip([x for x in years], ['_%s' % x for x in years]))
js_source_array = str(dictionary_of_sources).replace("'", "")
xdr = Range1d(1, 9)
ydr = Range1d(20, 100)
plot = Plot(
x_range=xdr,
y_range=ydr,
title="",
plot_width=800,
plot_height=400,
outline_line_color=None,
toolbar_location=None,
)
AXIS_FORMATS = dict(
minor_tick_in=None,
minor_tick_out=None,
major_tick_in=None,
major_label_text_font_size="10pt",
major_label_text_font_style="normal",
axis_label_text_font_size="10pt",
axis_line_color='#AAAAAA',
major_tick_line_color='#AAAAAA',
major_label_text_color='#666666',
major_tick_line_cap="round",
axis_line_cap="round",
axis_line_width=1,
major_tick_line_width=1,
)
xaxis = LinearAxis(ticker=SingleIntervalTicker(interval=1), axis_label="Children per woman (total fertility)", **AXIS_FORMATS)
yaxis = LinearAxis(ticker=SingleIntervalTicker(interval=20), axis_label="Life expectancy at birth (years)", **AXIS_FORMATS)
plot.add_layout(xaxis, 'below')
plot.add_layout(yaxis, 'left')
# ### Add the background year text
# We add this first so it is below all the other glyphs
text_source = ColumnDataSource({'year': ['%s' % years[0]]})
text = Text(x=2, y=35, text='year', text_font_size='150pt', text_color='#EEEEEE')
plot.add_glyph(text_source, text)
# Add the circle
renderer_source = sources['_%s' % years[0]]
circle_glyph = Circle(
x='fertility', y='life', size='population',
fill_color='region_color', fill_alpha=0.8,
line_color='#7c7e71', line_width=0.5, line_alpha=0.5)
circle_renderer = plot.add_glyph(renderer_source, circle_glyph)
# Add the hover (only against the circle and not other plot elements)
tooltips = "@index"
plot.add_tools(HoverTool(tooltips=tooltips, renderers=[circle_renderer]))
# Add the legend
text_x = 7
text_y = 95
for i, region in enumerate(regions):
plot.add_glyph(Text(x=text_x, y=text_y, text=[region], text_font_size='10pt', text_color='#666666'))
plot.add_glyph(Circle(x=text_x - 0.1, y=text_y + 2, fill_color=Spectral6[i], size=10, line_color=None, fill_alpha=0.8))
text_y = text_y - 5
# Add the slider
code = """
var year = slider.get('value'),
sources = %s,
new_source_data = sources[year].get('data');
renderer_source.set('data', new_source_data);
text_source.set('data', {'year': [String(year)]});
""" % js_source_array
callback = CustomJS(args=sources, code=code)
slider = Slider(start=years[0], end=years[-1], value=1, step=1, title="Year", callback=callback, name='testy')
callback.args["renderer_source"] = renderer_source
callback.args["slider"] = slider
callback.args["text_source"] = text_source
# Stick the plot and the slider together
layout = vplot(plot, slider)
# Open our custom template
with open('gapminder_template.jinja', 'r') as f:
template = Template(f.read())
# Use inline resources, render the html and open
js_resources = JSResources(mode='inline')
title = "Bokeh - Gapminder Bubble Plot"
html = file_html(layout, resources=(js_resources, None), title=title, template=template)
output_file = 'gapminder.html'
with open(output_file, 'w') as f:
f.write(html)
view(output_file)
| bsd-3-clause |
tknapen/reward_np_analysis | workflows/motion_correction.py | 1 | 16736 | from spynoza.nodes import EPI_file_selector
def _extend_motion_parameters(moco_par_file, tr, sg_args = {'window_length': 120, 'deriv':0, 'polyorder':3, 'mode':'nearest'}):
import os.path as op
import numpy as np
from sklearn import decomposition
from scipy.signal import savgol_filter
ext_out_file = moco_par_file[:-7] + 'ext_moco_pars.par'
new_out_file = moco_par_file[:-7] + 'new_moco_pars.par'
sg_args['window_length'] = int(sg_args['window_length'] / tr)
# Window must be odd-shaped
if sg_args['window_length'] % 2 == 0:
sg_args['window_length'] += 1
moco_pars = np.loadtxt(moco_par_file)
moco_pars = moco_pars - savgol_filter(moco_pars, axis = 0, **sg_args)
dt_moco_pars = np.diff(np.vstack((np.ones((1,6)), moco_pars)), axis = 0)
ddt_moco_pars = np.diff(np.vstack((np.ones((1,6)), dt_moco_pars)), axis = 0)
ext_moco_pars = np.hstack((moco_pars, dt_moco_pars, ddt_moco_pars))
# blow up using abs(), perform pca and take original number of 18 components
amp = np.hstack((moco_pars, dt_moco_pars, ddt_moco_pars, dt_moco_pars**2, ddt_moco_pars**2))
pca = decomposition.PCA(n_components = 18)
pca.fit(amp)
new_moco_pars = pca.transform(amp)
np.savetxt(new_out_file, new_moco_pars, fmt='%f', delimiter='\t')
np.savetxt(ext_out_file, ext_moco_pars, fmt='%f', delimiter='\t')
return new_out_file, ext_out_file
def select_target_T2(T2_file_list, target_session):
target_T2 = [T2 for T2 in T2_file_list if target_session in T2][0]
return target_T2
def select_target_epi(epi_file_list, T2_file_list, target_session, which_file):
from spynoza.nodes import EPI_file_selector
target_T2 = [T2 for T2 in T2_file_list if target_session in T2][0]
all_target_epis = [epi for epi in epi_file_list if target_session in epi]
target_epi = EPI_file_selector(which_file, all_target_epis)
print("XXXXX " + target_epi)
return target_epi
def select_T2_for_epi(epi_file, T2_file_list):
import os.path as op
epi_filename = op.split(epi_file)[-1]
T2_sessions = [op.split(T2)[-1].split('_inplaneT2')[0] for T2 in T2_file_list]
which_T2_file = [T2 for (T2f, T2) in zip(T2_sessions, T2_file_list) if T2f in epi_filename][0]
return which_T2_file
def find_all_epis_for_inplane_anats(epi_file_list, inplane_anats, inplane_anat_suffix = '_inplaneT2_brain.nii.gz'):
'''selects epi nifti files that correspond to the session of each of inplane_anats.
Parameters
----------
epi_file_list : list
list of nifti, or other filenames
inplane_anats : list
list of nifti filenames
inplane_anat_suffix : string
string that, when taken from the inplane_anat's filename, leaves the session's label.
Returns:
list of lists, with epi_file_list files distributed among len(inplane_anats) sublists.
'''
import os.path as op
session_labels = [op.split(ipa)[-1].split(inplane_anat_suffix)[0] for ipa in inplane_anats]
output_lists = []
for sl in session_labels:
output_lists.append([epi for epi in epi_file_list if sl in epi])
return output_list
def create_motion_correction_workflow(analysis_info, name = 'moco'):
"""uses sub-workflows to perform different registration steps.
Requires fsl and freesurfer tools
Parameters
----------
name : string
name of workflow
Example
-------
>>> motion_correction_workflow = create_motion_correction_workflow('motion_correction_workflow')
>>> motion_correction_workflow.inputs.inputspec.output_directory = '/data/project/raw/BIDS/sj_1/'
>>> motion_correction_workflow.inputs.inputspec.in_files = ['sub-001.nii.gz','sub-002.nii.gz']
>>> motion_correction_workflow.inputs.inputspec.which_file_is_EPI_space = 'middle'
Inputs::
inputspec.output_directory : directory in which to sink the result files
inputspec.in_files : list of functional files
inputspec.which_file_is_EPI_space : determines which file is the 'standard EPI space'
Outputs::
outputspec.EPI_space_file : standard EPI space file, one timepoint
outputspec.motion_corrected_files : motion corrected files
outputspec.motion_correction_plots : motion correction plots
outputspec.motion_correction_parameters : motion correction parameters
"""
import os
import os.path as op
import nipype.pipeline as pe
import nipype.interfaces.fsl as fsl
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
from nipype.interfaces.utility import Function, IdentityInterface
import nipype.interfaces.utility as niu
########################################################################################
# nodes
########################################################################################
input_node = pe.Node(IdentityInterface(fields=[
'in_files',
'inplane_T2_files',
'T2_files_reg_matrices',
'output_directory',
'which_file_is_EPI_space',
'sub_id',
'tr']), name='inputspec')
output_node = pe.Node(IdentityInterface(fields=([
'motion_corrected_files',
'EPI_space_file',
'T2_space_file',
'motion_correction_plots',
'motion_correction_parameters',
'extended_motion_correction_parameters',
'new_motion_correction_parameters'])), name='outputspec')
EPI_file_selector_node = pe.Node(Function(input_names=['which_file', 'in_files'], output_names='raw_EPI_space_file',
function=EPI_file_selector), name='EPI_file_selector_node')
# motion_correct_EPI_space = pe.Node(interface=fsl.MCFLIRT(
# save_mats = True,
# stats_imgs = True,
# save_plots = True,
# save_rms = True,
# cost = 'normmi',
# interpolation = 'sinc',
# dof = 6,
# # ref_vol = 0
# ), name='realign_space')
# mean_bold = pe.Node(interface=fsl.maths.MeanImage(dimension='T'), name='mean_space')
# new approach, which should aid in the joint motion correction of
# multiple sessions together, by pre-registering each run.
# the strategy would be to, for each run, take the first TR
# and FLIRT-align (6dof) it to the EPI_space file.
# then we can use this as an --infile argument to mcflirt.
select_target_T2_node = pe.Node(Function(input_names=['T2_file_list', 'target_session'], output_names=['which_T2'],
function=select_target_T2), name='select_target_T2_node')
select_target_T2_node.inputs.target_session = analysis_info['target_session']
# select_target_epi_node = pe.Node(Function(input_names=['epi_file_list', 'T2_file_list', 'target_session', 'which_file'], output_names=['target_epi'],
# function=select_target_epi), name='select_target_epi_node')
# select_target_epi_node.inputs.target_session = analysis_info['target_session']
select_T2_for_epi_node = pe.MapNode(Function(input_names=['epi_file', 'T2_file_list'], output_names=['which_T2_file'],
function=select_T2_for_epi), name='select_T2_for_epi_node', iterfield = ['epi_file'])
select_T2_mat_for_epi_node = pe.MapNode(Function(input_names=['epi_file', 'T2_file_list'], output_names=['which_T2_file'],
function=select_T2_for_epi), name='select_T2_mat_for_epi_node', iterfield = ['epi_file'])
bet_T2_node = pe.MapNode(interface=
fsl.BET(frac = analysis_info['T2_bet_f_value'],
vertical_gradient = analysis_info['T2_bet_g_value'],
functional=False, mask = True), name='bet_T2', iterfield=['in_file'])
bet_epi_node = pe.MapNode(interface=
fsl.BET(frac = analysis_info['T2_bet_f_value'],
vertical_gradient = analysis_info['T2_bet_g_value'],
functional=True, mask = True), name='bet_epi', iterfield=['in_file'])
motion_correct_all = pe.MapNode(interface=fsl.MCFLIRT(
save_mats = True,
save_plots = True,
cost = 'normmi',
interpolation = 'sinc',
stats_imgs = True,
dof = 6
), name='realign_all',
iterfield = ['in_file', 'ref_file'])
plot_motion = pe.MapNode(interface=fsl.PlotMotionParams(in_source='fsl'),
name='plot_motion',
iterfield=['in_file'])
extend_motion_pars = pe.MapNode(Function(input_names=['moco_par_file', 'tr'], output_names=['new_out_file', 'ext_out_file'],
function=_extend_motion_parameters), name='extend_motion_pars', iterfield = ['moco_par_file'])
# registration node is set up for rigid-body within-modality reg
# reg_flirt_N = pe.MapNode(fsl.FLIRT(cost_func='normcorr', output_type = 'NIFTI_GZ',# dof = 6, schedule = op.abspath(op.join(os.environ['FSLDIR'], 'etc', 'flirtsch', 'sch2D_6dof')),
# interp = 'sinc', dof = 6),
# name = 'reg_flirt_N', iterfield = ['in_file'])
regapply_moco_node = pe.MapNode(interface=
fsl.ApplyXfm(interp = 'spline'), name='regapply_moco_node', iterfield=['in_file', 'in_matrix_file'])
resample_epis = pe.MapNode(fsl.maths.MathsCommand(args = ' -subsamp2offc '), name='resample_epis', iterfield = ['in_file'])
resample_target_T2 = pe.Node(fsl.maths.MathsCommand(args = ' -subsamp2offc '), name='resample_target_T2')
rename = pe.Node(niu.Rename(format_string='session_EPI_space',
keep_ext=True),
name='namer')
rename_T2 = pe.Node(niu.Rename(format_string='session_T2_space',
keep_ext=True),
name='namer_T2')
########################################################################################
# workflow
########################################################################################
motion_correction_workflow = pe.Workflow(name=name)
motion_correction_workflow.connect(input_node, 'in_files', bet_epi_node, 'in_file')
motion_correction_workflow.connect(input_node, 'inplane_T2_files', bet_T2_node, 'in_file')
# select example func data, and example T2 space
# motion_correction_workflow.connect(input_node, 'which_file_is_EPI_space', select_target_epi_node, 'which_file')
# motion_correction_workflow.connect(bet_epi_node, 'out_file', select_target_epi_node, 'epi_file_list')
# motion_correction_workflow.connect(bet_T2_node, 'out_file', select_target_epi_node, 'T2_file_list')
motion_correction_workflow.connect(bet_T2_node, 'out_file', select_target_T2_node, 'T2_file_list')
# motion correct and average the standard EPI file
# motion_correction_workflow.connect(select_target_epi_node, 'target_epi', motion_correct_EPI_space, 'in_file')
# motion_correction_workflow.connect(motion_correct_EPI_space, 'out_file', mean_bold, 'in_file')
# output node, for later saving
# motion_correction_workflow.connect(mean_bold, 'out_file', output_node, 'EPI_space_file')
motion_correction_workflow.connect(select_target_T2_node, 'which_T2', output_node, 'T2_space_file')
# find the relevant T2 files for each of the epi files
motion_correction_workflow.connect(bet_epi_node, 'out_file', select_T2_for_epi_node, 'epi_file')
motion_correction_workflow.connect(bet_T2_node, 'out_file', select_T2_for_epi_node, 'T2_file_list')
# find the relevant T2 registration file for each of the epi files
motion_correction_workflow.connect(bet_epi_node, 'out_file', select_T2_mat_for_epi_node, 'epi_file')
motion_correction_workflow.connect(input_node, 'T2_files_reg_matrices', select_T2_mat_for_epi_node, 'T2_file_list')
# motion correction across runs
# motion_correction_workflow.connect(prereg_flirt_N, 'out_matrix_file', motion_correct_all, 'init')
motion_correction_workflow.connect(bet_epi_node, 'out_file', motion_correct_all, 'in_file')
motion_correction_workflow.connect(select_T2_for_epi_node, 'which_T2_file', motion_correct_all, 'ref_file')
# motion_correction_workflow.connect(mean_bold, 'out_file', motion_correct_all, 'ref_file')
# the registration
# motion_correction_workflow.connect(select_T2_for_epi_node, 'which_T2_file', reg_flirt_N, 'in_file')
# motion_correction_workflow.connect(select_target_T2_node, 'which_T2', reg_flirt_N, 'reference')
# output of motion correction of all files
motion_correction_workflow.connect(motion_correct_all, 'par_file', output_node, 'motion_correction_parameters')
motion_correction_workflow.connect(motion_correct_all, 'out_file', regapply_moco_node, 'in_file')
# registration has already been done by hand. This registration matrix is in the datasource, and applied here.
motion_correction_workflow.connect(select_T2_mat_for_epi_node, 'which_T2_file', regapply_moco_node, 'in_matrix_file')
motion_correction_workflow.connect(select_target_T2_node, 'which_T2', regapply_moco_node, 'reference')
motion_correction_workflow.connect(regapply_moco_node, 'out_file', resample_epis, 'in_file')
motion_correction_workflow.connect(resample_epis, 'out_file', output_node, 'motion_corrected_files')
motion_correction_workflow.connect(motion_correct_all, 'par_file', extend_motion_pars, 'moco_par_file')
motion_correction_workflow.connect(input_node, 'tr', extend_motion_pars, 'tr')
motion_correction_workflow.connect(extend_motion_pars, 'ext_out_file', output_node, 'extended_motion_correction_parameters')
motion_correction_workflow.connect(extend_motion_pars, 'new_out_file', output_node, 'new_motion_correction_parameters')
motion_correction_workflow.connect(rename, 'out_file', output_node, 'EPI_space_file')
########################################################################################
# Plot the estimated motion parameters
########################################################################################
plot_motion.iterables = ('plot_type', ['rotations', 'translations'])
motion_correction_workflow.connect(motion_correct_all, 'par_file', plot_motion, 'in_file')
motion_correction_workflow.connect(plot_motion, 'out_file', output_node, 'motion_correction_plots')
########################################################################################
# outputs via datasink
########################################################################################
datasink = pe.Node(nio.DataSink(), name='sinker')
datasink.inputs.parameterization = False
# first link the workflow's output_directory into the datasink.
motion_correction_workflow.connect(input_node, 'output_directory', datasink, 'base_directory')
motion_correction_workflow.connect(input_node, 'sub_id', datasink, 'container')
motion_correction_workflow.connect(select_target_T2_node, 'which_T2', resample_target_T2, 'in_file')
motion_correction_workflow.connect(resample_target_T2, 'out_file', rename, 'in_file')
motion_correction_workflow.connect(rename, 'out_file', datasink, 'reg')
motion_correction_workflow.connect(select_target_T2_node, 'which_T2', rename_T2, 'in_file')
motion_correction_workflow.connect(rename_T2, 'out_file', datasink, 'reg.@T2')
# motion_correction_workflow.connect(regapply_moco_node, 'out_file', datasink, 'mcf.hr')
motion_correction_workflow.connect(resample_epis, 'out_file', datasink, 'mcf')
motion_correction_workflow.connect(motion_correct_all, 'par_file', datasink, 'mcf.motion_pars')
motion_correction_workflow.connect(plot_motion, 'out_file', datasink, 'mcf.motion_plots')
motion_correction_workflow.connect(extend_motion_pars, 'ext_out_file', datasink, 'mcf.ext_motion_pars')
motion_correction_workflow.connect(extend_motion_pars, 'new_out_file', datasink, 'mcf.new_motion_pars')
motion_correction_workflow.connect(bet_T2_node, 'out_file', datasink, 'mcf.T2s')
# motion_correction_workflow.connect(motion_correct_all, 'out_file', datasink, 'mcf.hr_per_session')
# motion_correction_workflow.connect(reg_flirt_N, 'out_file', datasink, 'mcf.T2_per_session')
return motion_correction_workflow
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.