repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
iABC2XYZ/abc | Scripts/Codes/main2.py | 2 | 3935 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 7 08:48:30 2017
@author: A
"""
import numpy as np
import os
import matplotlib.pyplot as plt
import pandas as pd
import tushare as ts
import shutil
def ReadDataCode(indexCode,fileKind):
fileName=indexCode+'.'+fileKind+'.txt'
fileName='./HAHA/Parallel-Get-master/'+fileName #
if os.path.exists(fileName):
return np.flip(np.loadtxt(fileName),0)
else:
return 0
def UpdateCodeIndex():
basicStock=ts.get_stock_basics()
indexBasicStock=basicStock.index
with open('idCode.txt','w') as fid:
for iIndexBasicStock in indexBasicStock:
fid.writelines([iIndexBasicStock,'\n'])
def ReadCodeIndex():
with open('idCode.txt','r') as fid:
idCode=fid.readlines()
for nIdCode in xrange(len(idCode)):
idCode[nIdCode]=idCode[nIdCode].strip('\n')
return idCode
def UpdateHistDataCode():
idCode=ReadCodeIndex()
if os.path.exists('data'):
shutil.rmtree('data')
os.mkdir('data')
os.mkdir('data/date')
os.mkdir('data/open')
os.mkdir('data/high')
os.mkdir('data/close')
os.mkdir('data/low')
os.mkdir('data/volume')
os.mkdir('data/price_change')
os.mkdir('data/p_change')
os.mkdir('data/ma5')
os.mkdir('data/ma10')
os.mkdir('data/ma20')
os.mkdir('data/v_ma10')
os.mkdir('data/v_ma20')
os.mkdir('data/turnover')
nCount=1
for iIdCode in idCode:
nCount+=1
print nCount
dataCode=ts.get_hist_data(iIdCode)
if dataCode is None:
print "******************"
continue
fileDate='data/date/'+iIdCode+'.date'
fileOpen='data/open/'+iIdCode+'.open'
fileHigh='data/high/'+iIdCode+'.high'
fileClose='data/close/'+iIdCode+'.close'
fileLow='data/low/'+iIdCode+'.low'
fileVolume='data/volume/'+iIdCode+'.volume'
filePrice_change='data/price_change/'+iIdCode+'.price_change'
fileP_change='data/p_change/'+iIdCode+'.p_change'
fileMa5='data/ma5/'+iIdCode+'.ma5'
fileMa10='data/ma10/'+iIdCode+'.ma10'
fileV_ma10='data/v_ma10/'+iIdCode+'.v_ma10'
fileV_ma20='data/v_ma20/'+iIdCode+'.v_ma20'
fileMa20='data/ma20/'+iIdCode+'.ma20'
fileTurnover='data/turnover/'+iIdCode+'.turnover'
with open(fileDate,'w') as fid:
for iDate in dataCode.index:
fid.writelines(iDate)
fid.writelines('\n')
dataCode.open.to_csv(fileOpen,index=False)
dataCode.high.to_csv(fileHigh,index=False)
dataCode.close.to_csv(fileClose,index=False)
dataCode.low.to_csv(fileLow,index=False)
dataCode.volume.to_csv(fileVolume,index=False)
dataCode.price_change.to_csv(filePrice_change,index=False)
dataCode.p_change.to_csv(fileP_change,index=False)
dataCode.ma5.to_csv(fileMa5,index=False)
dataCode.ma10.to_csv(fileMa10,index=False)
dataCode.ma20.to_csv(fileMa20,index=False)
dataCode.v_ma10.to_csv(fileV_ma10,index=False)
dataCode.v_ma20.to_csv(fileV_ma20,index=False)
dataCode.turnover.to_csv(fileTurnover,index=False)
flagUpdateHist=0
if (flagUpdateHist==1):
UpdateHistDataCode()
def ReadHistDataCode(indexCode,kindCode):
fileName='./data/'+kindCode.lower()+'/'+indexCode+'.'+kindCode.lower()
if(kindCode.lower()=='date'):
with open(fileName,'r') as fid:
dataCodeTmp=fid.readlines()
nDataCodeTmp=len(dataCodeTmp)
dataCode=np.copy(dataCodeTmp)
for nLine in xrange(nDataCodeTmp):
dataCode[nLine]=dataCodeTmp[nDataCodeTmp-nLine-1]
else:
dataCode=np.flip(np.loadtxt(fileName),0)
return dataCode
idCode=ReadCodeIndex()
#print(idCode)
aa=ReadHistDataCode('300014','date')
print aa
| gpl-3.0 |
jmetzen/scikit-learn | sklearn/linear_model/ransac.py | 25 | 14262 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
kevingo/ml-az | chapter3/simple_linear_regression.py | 1 | 1426 | # data pre-processing
# Import Library
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
# import data
dataset = pd.read_csv('Salary_Data.csv')
X = dataset.iloc[:, :-1].values # independent variable
Y = dataset.iloc[:, 1].values # dependent variable
# Split data into training / testing dataset
from sklearn.cross_validation import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.3, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
print X_train"""
# Fitting simple linear regression model to the Training Set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, Y_train)
# Predict the test set results
Y_predict = regressor.predict(X_test)
# Visulization the Train Set results
plt.scatter(X_train, Y_train, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title(' Salary V.S. Experience (Training Set)')
plt.xlabel('Years of experience')
plt.ylabel('Salary')
plt.show()
# Visulization the Test Set results
plt.scatter(X_test, Y_test, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title(' Salary V.S. Experience (Test Set)')
plt.xlabel('Years of experience')
plt.ylabel('Salary')
plt.show()
| mit |
dolaameng/keras | examples/variational_autoencoder_deconv.py | 1 | 6615 | '''This script demonstrates how to build a variational autoencoder
with Keras and deconvolution layers.
Reference: "Auto-Encoding Variational Bayes" https://arxiv.org/abs/1312.6114
'''
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, Lambda, Flatten, Reshape
from keras.layers import Convolution2D, Deconvolution2D
from keras.models import Model
from keras import backend as K
from keras import objectives
from keras.datasets import mnist
# input image dimensions
img_rows, img_cols, img_chns = 28, 28, 1
# number of convolutional filters to use
nb_filters = 64
# convolution kernel size
nb_conv = 3
batch_size = 100
if K.image_dim_ordering() == 'th':
original_img_size = (img_chns, img_rows, img_cols)
else:
original_img_size = (img_rows, img_cols, img_chns)
latent_dim = 2
intermediate_dim = 128
epsilon_std = 1.0
nb_epoch = 5
x = Input(batch_shape=(batch_size,) + original_img_size)
conv_1 = Convolution2D(img_chns, 2, 2, border_mode='same', activation='relu')(x)
conv_2 = Convolution2D(nb_filters, 2, 2,
border_mode='same', activation='relu',
subsample=(2, 2))(conv_1)
conv_3 = Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='same', activation='relu',
subsample=(1, 1))(conv_2)
conv_4 = Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='same', activation='relu',
subsample=(1, 1))(conv_3)
flat = Flatten()(conv_4)
hidden = Dense(intermediate_dim, activation='relu')(flat)
z_mean = Dense(latent_dim)(hidden)
z_log_var = Dense(latent_dim)(hidden)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(batch_size, latent_dim),
mean=0., std=epsilon_std)
return z_mean + K.exp(z_log_var) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
decoder_hid = Dense(intermediate_dim, activation='relu')
decoder_upsample = Dense(nb_filters * 14 * 14, activation='relu')
if K.image_dim_ordering() == 'th':
output_shape = (batch_size, nb_filters, 14, 14)
else:
output_shape = (batch_size, 14, 14, nb_filters)
decoder_reshape = Reshape(output_shape[1:])
decoder_deconv_1 = Deconvolution2D(nb_filters, nb_conv, nb_conv,
output_shape,
border_mode='same',
subsample=(1, 1),
activation='relu')
decoder_deconv_2 = Deconvolution2D(nb_filters, nb_conv, nb_conv,
output_shape,
border_mode='same',
subsample=(1, 1),
activation='relu')
if K.image_dim_ordering() == 'th':
output_shape = (batch_size, nb_filters, 29, 29)
else:
output_shape = (batch_size, 29, 29, nb_filters)
decoder_deconv_3_upsamp = Deconvolution2D(nb_filters, 2, 2,
output_shape,
border_mode='valid',
subsample=(2, 2),
activation='relu')
decoder_mean_squash = Convolution2D(img_chns, 2, 2,
border_mode='valid',
activation='sigmoid')
hid_decoded = decoder_hid(z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)
def vae_loss(x, x_decoded_mean):
# NOTE: binary_crossentropy expects a batch_size by dim
# for x and x_decoded_mean, so we MUST flatten these!
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = img_rows * img_cols * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
vae = Model(x, x_decoded_mean_squash)
vae.compile(optimizer='rmsprop', loss=vae_loss)
vae.summary()
# train the VAE on MNIST digits
(x_train, _), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_train = x_train.reshape((x_train.shape[0],) + original_img_size)
x_test = x_test.astype('float32') / 255.
x_test = x_test.reshape((x_test.shape[0],) + original_img_size)
print('x_train.shape:', x_train.shape)
vae.fit(x_train, x_train,
shuffle=True,
nb_epoch=nb_epoch,
batch_size=batch_size,
validation_data=(x_test, x_test))
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.colorbar()
plt.show()
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,))
_hid_decoded = decoder_hid(decoder_input)
_up_decoded = decoder_upsample(_hid_decoded)
_reshape_decoded = decoder_reshape(_up_decoded)
_deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
_deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
_x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
_x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)
generator = Model(decoder_input, _x_decoded_mean_squash)
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# we will sample n points within [-15, 15] standard deviations
grid_x = np.linspace(-15, 15, n)
grid_y = np.linspace(-15, 15, n)
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
z_sample = np.tile(z_sample, batch_size).reshape(batch_size, 2)
x_decoded = generator.predict(z_sample, batch_size=batch_size)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure)
plt.show()
| mit |
Fireblend/scikit-learn | sklearn/tests/test_grid_search.py | 83 | 28713 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
kagayakidan/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
skkwan/IC10X2 | infrared_and_lightcurve/Smaller_annuli_fluxes_170301.py | 1 | 8515 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 15 20:21:21 2017
@author: stephaniekwan
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 1 21:53:55 2017
@author: stephaniekwan
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 21 17:27:53 2016
Updated 2016 Aug 4 to include WIRC points
Updated 2016 Sep 7 to reformat
@author: stephaniekwan
IC-10 X-2 Spitzer IRAC light curves.Updated on August 4th to include July 18th
Palomar JHK band measurements.
Coordinates: 0:20:20.940 +59:17:59.00
Circle radius: 3 arcsec. Annulus radii: 5 arcsec inner, 10 arcsec outer
"""
import numpy as np
import matplotlib.pyplot as plt
from jdcal import gcal2jd
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
# Use LaTeX font
plt.rc({'weight' : 'normal',
'size' : 15})
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
#plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
plt.rc('text', usetex = True)
index = np.array(['4424960 2004-07-23', '33204224 2010-01-29',
'33203968 2010-02-19', '33203456 2010-03-10',
'33202944 2010-09-09' '33202432 2010-10-04', '33201920 2010-10-14',
'42321152 2011-09-24', '42321408 2012-04-04', '52576256 2015-03-11',
'52577280 2016-03-23', '2016-07-18'])
gdates = np.array([(2004,07,23), (2010,01,29), (2010,02,19), (2010,03,10),
(2010,9,9), (2010,10,4), (2010,10,14), (2011,9,24),
(2012,04,04), (2015,03,11), (2016,03,23)])
# Convert to MJD dates
jdates = np.zeros(len(gdates))
for i in range(len(gdates)):
jdates[i] = gcal2jd(gdates[i][0], gdates[i][1], gdates[i][2])[1]
WIRCjdate = gcal2jd(2016,7,18)[1]
# Mean counts in circle (units: counts/sr per pixel), for 4.5 and 3.6 microns
# Mean counts in annulus: in units of MJr/sr per pixel
circMean36 = np.array([2.235913,
1.9806753,
1.8627226,
1.9333704,
1.9806753,
1.9242988,
1.8619019,
1.8695578,
1.9416175,
1.8303715,
1.8961317])
annMean36 = np.array([1.502455, 1.4441012, 1.4349068, 1.4300396,
1.4441012, 1.4369512, 1.4367747,
1.4509853, 1.4649935, 1.4423924, 1.4682426])
annSD36 = np.array([0.323036, 0.33284634, 0.30873036, 0.27726872,
0.33284634, 0.29375085, 0.31357359,
0.32412101, 0.30720197, 0.28204827, 0.28241972])
circMean45 = np.array([1.6294469, 1.3514017, 1.2583814, 1.2950296,
1.3514017, 1.2898556, 1.2250279,
1.2813393, 1.343888, 1.2231404, 1.2529148])
annMean45 = np.array([1.0128354, 0.93392948, 0.94994089, 0.96776315,
0.93392948, 0.93146131,0.91232822, 0.96418034,
1.0059549, 0.93307992, 0.94233364])
annSD45 = np.array([0.18814292, 0.19965652, 0.19302296, 0.18062225,
0.19965652, 0.18025225, 0.18849567, 0.19213017,
0.18247341, 0.19707077, 0.20098456])
circMean58 = np.array([2.4857705]) #only for '4424960 2004-07-23'
circMean80 = np.array([5.6362584]) # " "
annMean58 = np.array([2.2773678])
annMean80 = np.array([5.8670916])
# Standard deviation in annulus counts (counts/sr per pixel)
annSD58 = np.array([0.34377934])
annSD80 = np.array([0.81536177])
# Number of pixels in circle
circNpx36 = np.array([54,52,54,55,52,54,55,56,53,56,55])
circNpx45 = np.array([54,52,54,55,52,54,55,56,53,56,55])
circNpx58, circNpx80 = np.array([54]), np.array([54])
# Calculate number of non-background counts in the circle (counts/sr)
circCounts36 = (circMean36 - annMean36) * circNpx36
circCounts45 = (circMean45 - annMean45) * circNpx45
circCounts58 = (circMean58 - annMean58) * circNpx58
circCounts80 = (circMean80 - annMean80) * circNpx80
# Conversion between steradians and arcsecond. 1 steradian is 4.25 *^ 10 arcseconds
srOverArcSec = 1/(4.25 * 10**10)
# 1 pixel has 0.3600 arcsec^2. Convert "counts" (counts/sr) to counts
circFlux36 = circCounts36 * 0.3600 * srOverArcSec * 10**9
circFlux45 = circCounts45 * 0.3600 * srOverArcSec * 10**9
circFlux58 = circCounts58 * 0.3600 * srOverArcSec * 10**9
circFlux80 = circCounts80 * 0.3600 * srOverArcSec * 10**9
# Estimation of error: standard dev. in annulus counts times area of circle
fluxError36 = annSD36 * np.sqrt(circNpx36) * srOverArcSec * 10**9 * 0.3600
fluxError45 = annSD45 * np.sqrt(circNpx45) * srOverArcSec * 10**9 * 0.3600
fluxError58 = annSD58 * np.sqrt(circNpx58) * srOverArcSec * 10**9 * 0.3600
fluxError80 = annSD80 * np.sqrt(circNpx80) * srOverArcSec * 10**9 * 0.3600
# JHK fluxes and errors (in mJy)
jFlux, jErr = 0.3822, 0.05623
hFlux, hErr = 0.34596, 0.02698
kFlux, kErr = 0.396159, 0.0773288
circFlux58, circFlux80 = np.array([0.21036669]), np.array([0.19616618])
fluxError58, fluxError80 = np.array([0.03456009]), np.array([0.03161511])
# JHK fluxes and errors (in mJy)
jFlux, jErr = 0.3822, 0.05623
hFlux, hErr = 0.34596, 0.02698
kFlux, kErr = 0.396159, 0.0773288
# 2MASS fluxes upper limits (in mJy)
j2Flux, h2Flux, k2Flux = 0.4192, 0.7084, 0.4207
j2FluxErr = 0.0593
upperLimDate = gcal2jd(2000,9,16)[1]
dates2 = np.array([upperLimDate, WIRCjdate])
jFluxes = np.array([j2Flux, jFlux])
hFluxes = np.array([h2Flux, hFlux])
kFluxes = np.array([k2Flux, kFlux])
# Plot light curves
fig, ax = plt.subplots()
plt.hold(True)
plt.scatter(dates2, jFluxes, facecolors = 'none', marker = '<', s = 30,
edgecolors = 'navy')
plt.scatter(dates2, hFluxes, facecolors = 'none', marker = 's', s = 30,
edgecolors = 'royalblue')
plt.scatter(dates2, kFluxes, facecolors = 'none', marker = '>', s = 30,
edgecolors = 'lightskyblue')
plt.scatter(jdates, circFlux36, color = 'black', marker = 'o', s = 15)
plt.scatter(jdates, circFlux45, color = 'grey', marker = 'v', s = 15)
plt.scatter(jdates[0], circFlux58, facecolors = 'none', edgecolors =
'darkgrey', marker = 'D', s = 22)
plt.scatter(jdates[0], circFlux80, facecolors ='none', edgecolors = 'black',
marker = 'o', s = 25)
plt.xlim([51500,59500])
plt.ylim([0.00,0.80])
plt.legend(('J', 'H', 'K$_s$','[3.6]', '[4.5]', '[5.8]', '[8.0]'),
scatterpoints = 1,
loc = 'upper right',
title = 'Filter/Channel',
fontsize = 13,
frameon = False)
# Plot time of burst and label it
plt.axvline(x = 55337.8, color = 'k', ls = 'dashed')
#plt.text(55500, 0.45, "2010 May outburst", rotation=90, fontsize=13)
# Plot error bars
plt.errorbar(WIRCjdate, kFlux, kErr, color = 'lightskyblue')
plt.errorbar(WIRCjdate, hFlux, hErr, color = 'royalblue')
plt.errorbar(WIRCjdate, jFlux, jErr, color = 'navy')
plt.errorbar(dates2[0], j2Flux, 0.0593, color = 'navy')
plt.errorbar(jdates, circFlux36, yerr = fluxError36, linestyle = 'None',
color = 'black')
plt.errorbar(jdates, circFlux45, yerr = fluxError45, linestyle = 'None',
color = 'grey')
plt.errorbar(jdates[0], circFlux58, yerr = fluxError58, linestyle = 'None',
color = 'darkgrey')
plt.errorbar(jdates[0], circFlux80, yerr = fluxError80, linestyle = 'None',
color = 'black')
plt.xlabel('Time (MJD)', fontsize = 14)
plt.ylabel('Flux density (mJy)', fontsize = 14)
ax.arrow(dates2[0], kFluxes[0], 0.0, -0.08, head_width = 150,
head_length = 0.02, fc ='lightskyblue', ec ='lightskyblue')
ax.arrow(dates2[0], hFluxes[0], 0.0, -0.08, head_width = 150,
head_length = 0.02, fc = 'royalblue', ec ='royalblue')
ax.arrow(WIRCjdate, 0.7, 0.0, -0.15, head_width = 300, head_length = 0.03,
fc = 'k', ec = 'k', linestyle = '-')
#plt.text(57250, 0.75, 'TripleSpec spectroscopy', rotation = 'vertical',
# fontsize = 12, color = 'red')
x1, x2, y1, y2 = 55100, 56100, 0.01, 0.27
axins = zoomed_inset_axes(ax,1.8,loc=9)
axins.set_xlim(x1,x2)
axins.set_ylim(y1,y2)
plt.scatter(jdates[1:9], circFlux36[1:9], color = 'black', marker = 'o',
s = 15)
plt.errorbar(jdates[1:9], circFlux36[1:9], yerr = fluxError36[1:9],
linestyle = 'None', color = 'black')
plt.scatter(jdates[1:9], circFlux45[1:9], color = 'grey', marker = 'v', s = 15)
plt.errorbar(jdates[1:9], circFlux45[1:9], yerr = fluxError45[1:9],
linestyle = 'None', color = 'grey')
plt.axvline(x = 55337.8, color = 'k', ls = 'dashed')
plt.xticks(np.arange(x1, x2, 400))
mark_inset(ax, axins, loc1 = 3, loc2 = 4, fc = "none", ec = "0.6")
fig.savefig("170215_IC10_X2_smoothed_lc.pdf") | mit |
chmullig/pystan | pystan/external/pymc/plots.py | 5 | 13334 | # pymc git commit: 6115726122d46267c86d16de635941daa37eb357
# =======
# License
# =======
#
# PyMC is distributed under the Apache License, Version 2.0
#
# Copyright (c) 2006 Christopher J. Fonnesbeck (Academic Free License)
# Copyright (c) 2007-2008 Christopher J. Fonnesbeck, Anand Prabhakar Patil, David Huard (Academic Free License)
# Copyright (c) 2009-2013 The PyMC developers (see contributors to pymc-devs on GitHub)
# All rights reserved.
from pylab import *
try:
import matplotlib.gridspec as gridspec
except ImportError:
gridspec = None
import numpy as np
from scipy.stats import kde
from .stats import *
from .trace import *
__all__ = ['traceplot', 'kdeplot', 'kde2plot', 'forestplot', 'autocorrplot']
def traceplot(trace, vars=None):
if vars is None:
vars = trace.varnames
if isinstance(trace, MultiTrace):
trace = trace.combined()
n = len(vars)
f, ax = subplots(n, 2, squeeze=False)
for i, v in enumerate(vars):
d = np.squeeze(trace[v])
if trace[v].dtype.kind == 'i':
ax[i, 0].hist(d, bins=sqrt(d.size))
else:
kdeplot_op(ax[i, 0], d)
ax[i, 0].set_title(str(v))
ax[i, 1].plot(d, alpha=.35)
ax[i, 0].set_ylabel("frequency")
ax[i, 1].set_ylabel("sample value")
return f
def kdeplot_op(ax, data):
data = np.atleast_2d(data.T).T
for i in range(data.shape[1]):
d = data[:, i]
density = kde.gaussian_kde(d)
l = np.min(d)
u = np.max(d)
x = np.linspace(0, 1, 100) * (u - l) + l
ax.plot(x, density(x))
def kde2plot_op(ax, x, y, grid=200):
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
grid = grid * 1j
X, Y = np.mgrid[xmin:xmax:grid, ymin:ymax:grid]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = kde.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.imshow(np.rot90(Z), cmap=cm.gist_earth_r,
extent=[xmin, xmax, ymin, ymax])
def kdeplot(data):
f, ax = subplots(1, 1, squeeze=True)
kdeplot_op(ax, data)
return f
def kde2plot(x, y, grid=200):
f, ax = subplots(1, 1, squeeze=True)
kde2plot_op(ax, x, y, grid)
return f
def autocorrplot(trace, vars=None, fontmap = None, max_lag=100):
"""Bar plot of the autocorrelation function for a trace"""
try:
# MultiTrace
traces = trace.traces
except AttributeError:
# NpTrace
traces = [trace]
if fontmap is None: fontmap = {1:10, 2:8, 3:6, 4:5, 5:4}
if vars is None:
vars = traces[0].varnames
# Extract sample data
samples = [{v:trace[v] for v in vars} for trace in traces]
chains = len(traces)
n = len(samples[0])
f, ax = subplots(n, chains, squeeze=False)
max_lag = min(len(samples[0][vars[0]])-1, max_lag)
for i, v in enumerate(vars):
for j in xrange(chains):
d = np.squeeze(samples[j][v])
ax[i,j].acorr(d, detrend=mlab.detrend_mean, maxlags=max_lag)
if not j:
ax[i, j].set_ylabel("correlation")
ax[i, j].set_xlabel("lag")
if chains > 1:
ax[i, j].set_title("chain {0}".format(j+1))
# Smaller tick labels
tlabels = gca().get_xticklabels()
setp(tlabels, 'fontsize', fontmap[1])
tlabels = gca().get_yticklabels()
setp(tlabels, 'fontsize', fontmap[1])
def var_str(name, shape):
"""Return a sequence of strings naming the element of the tallyable object.
This is a support function for forestplot.
:Example:
>>> var_str('theta', (4,))
['theta[1]', 'theta[2]', 'theta[3]', 'theta[4]']
"""
size = prod(shape)
ind = (indices(shape) + 1).reshape(-1, size)
names = ['[' + ','.join(map(str, i)) + ']' for i in zip(*ind)]
# if len(name)>12:
# name = '\n'.join(name.split('_'))
# name += '\n'
names[0] = '%s %s' % (name, names[0])
return names
def forestplot(trace_obj, vars=None, alpha=0.05, quartiles=True, rhat=True,
main=None, xtitle=None, xrange=None, ylabels=None, chain_spacing=0.05, vline=0):
""" Forest plot (model summary plot)
Generates a "forest plot" of 100*(1-alpha)% credible intervals for either the
set of variables in a given model, or a specified set of nodes.
:Arguments:
trace_obj: NpTrace or MultiTrace object
Trace(s) from an MCMC sample.
vars: list
List of variables to plot (defaults to None, which results in all
variables plotted).
alpha (optional): float
Alpha value for (1-alpha)*100% credible intervals (defaults to 0.05).
quartiles (optional): bool
Flag for plotting the interquartile range, in addition to the
(1-alpha)*100% intervals (defaults to True).
rhat (optional): bool
Flag for plotting Gelman-Rubin statistics. Requires 2 or more
chains (defaults to True).
main (optional): string
Title for main plot. Passing False results in titles being
suppressed; passing None (default) results in default titles.
xtitle (optional): string
Label for x-axis. Defaults to no label
xrange (optional): list or tuple
Range for x-axis. Defaults to matplotlib's best guess.
ylabels (optional): list
User-defined labels for each variable. If not provided, the node
__name__ attributes are used.
chain_spacing (optional): float
Plot spacing between chains (defaults to 0.05).
vline (optional): numeric
Location of vertical reference line (defaults to 0).
"""
if not gridspec:
print_(
'\nYour installation of matplotlib is not recent enough to support summary_plot; this function is disabled until matplotlib is updated.')
return
# Quantiles to be calculated
qlist = [100 * alpha / 2, 50, 100 * (1 - alpha / 2)]
if quartiles:
qlist = [100 * alpha / 2, 25, 50, 75, 100 * (1 - alpha / 2)]
# Range for x-axis
plotrange = None
# Number of chains
chains = None
# Gridspec
gs = None
# Subplots
interval_plot = None
rhat_plot = None
try:
# First try MultiTrace type
traces = trace_obj.traces
if rhat and len(traces) > 1:
from .diagnostics import gelman_rubin
R = gelman_rubin(trace_obj)
if vars is not None:
R = {v: R[v] for v in vars}
else:
rhat = False
except AttributeError:
# Single NpTrace
traces = [trace_obj]
# Can't calculate Gelman-Rubin with a single trace
rhat = False
if vars is None:
vars = traces[0].varnames
# Empty list for y-axis labels
labels = []
chains = len(traces)
if gs is None:
# Initialize plot
if rhat and chains > 1:
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
else:
gs = gridspec.GridSpec(1, 1)
# Subplot for confidence intervals
interval_plot = subplot(gs[0])
for j, tr in enumerate(traces):
# Get quantiles
trace_quantiles = quantiles(tr, qlist)
hpd_intervals = hpd(tr, alpha)
# Counter for current variable
var = 1
for varname in vars:
var_quantiles = trace_quantiles[varname]
quants = var_quantiles.values()
var_hpd = hpd_intervals[varname].T
# Substitute HPD interval for quantile
quants[0] = var_hpd[0].T
quants[-1] = var_hpd[1].T
# Ensure x-axis contains range of current interval
if plotrange:
plotrange = [min(
plotrange[0],
np.min(quants)),
max(plotrange[1],
np.max(quants))]
else:
plotrange = [np.min(quants), np.max(quants)]
# Number of elements in current variable
value = tr[varname][0]
k = np.size(value)
# Append variable name(s) to list
if not j:
if k > 1:
names = var_str(varname, shape(value))
labels += names
else:
labels.append(varname)
# labels.append('\n'.join(varname.split('_')))
# Add spacing for each chain, if more than one
e = [0] + [(chain_spacing * ((i + 2) / 2)) *
(-1) ** i for i in range(chains - 1)]
# Deal with multivariate nodes
if k > 1:
for i, q in enumerate(np.transpose(quants).squeeze()):
# Y coordinate with jitter
y = -(var + i) + e[j]
if quartiles:
# Plot median
plot(q[2], y, 'bo', markersize=4)
# Plot quartile interval
errorbar(
x=(q[1],
q[3]),
y=(y,
y),
linewidth=2,
color="blue")
else:
# Plot median
plot(q[1], y, 'bo', markersize=4)
# Plot outer interval
errorbar(
x=(q[0],
q[-1]),
y=(y,
y),
linewidth=1,
color="blue")
else:
# Y coordinate with jitter
y = -var + e[j]
if quartiles:
# Plot median
plot(quants[2], y, 'bo', markersize=4)
# Plot quartile interval
errorbar(
x=(quants[1],
quants[3]),
y=(y,
y),
linewidth=2,
color="blue")
else:
# Plot median
plot(quants[1], y, 'bo', markersize=4)
# Plot outer interval
errorbar(
x=(quants[0],
quants[-1]),
y=(y,
y),
linewidth=1,
color="blue")
# Increment index
var += k
labels = ylabels or labels
# Update margins
left_margin = np.max([len(x) for x in labels]) * 0.015
gs.update(left=left_margin, right=0.95, top=0.9, bottom=0.05)
# Define range of y-axis
ylim(-var + 0.5, -0.5)
datarange = plotrange[1] - plotrange[0]
xlim(plotrange[0] - 0.05 * datarange, plotrange[1] + 0.05 * datarange)
# Add variable labels
yticks([-(l + 1) for l in range(len(labels))], labels)
# Add title
if main is not False:
plot_title = main or str(int((
1 - alpha) * 100)) + "% Credible Intervals"
title(plot_title)
# Add x-axis label
if xtitle is not None:
xlabel(xtitle)
# Constrain to specified range
if xrange is not None:
xlim(*xrange)
# Remove ticklines on y-axes
for ticks in interval_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in interval_plot.spines.iteritems():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
# Reference line
axvline(vline, color='k', linestyle='--')
# Genenerate Gelman-Rubin plot
if rhat and chains > 1:
# If there are multiple chains, calculate R-hat
rhat_plot = subplot(gs[1])
if main is not False:
title("R-hat")
# Set x range
xlim(0.9, 2.1)
# X axis labels
xticks((1.0, 1.5, 2.0), ("1", "1.5", "2+"))
yticks([-(l + 1) for l in range(len(labels))], "")
i = 1
for varname in vars:
value = traces[0][varname][0]
k = np.size(value)
if k > 1:
plot([min(r, 2) for r in R[varname]], [-(j + i)
for j in range(k)], 'bo', markersize=4)
else:
plot(min(R[varname], 2), -i, 'bo', markersize=4)
i += k
# Define range of y-axis
ylim(-i + 0.5, -0.5)
# Remove ticklines on y-axes
for ticks in rhat_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in rhat_plot.spines.iteritems():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
return gs
| gpl-3.0 |
keiserlab/e3fp-paper | project/analysis/comparison/make_cv_summary_table.py | 1 | 3421 | """Make table summarizing cross-validation results from E3FP/ECFP variants.
Author: Seth Axen
E-mail: [email protected]
"""
import re
import os
import glob
import numpy as np
import pandas as pd
CV_BASEDIR = os.path.join(os.environ['E3FP_PROJECT'], 'crossvalidation',
'sea')
E3FP_NOSTEREO_REPEAT_DIRS = glob.glob(os.path.join(CV_BASEDIR,
"e3fp-nostereo*"))
E3FP_REPEAT_DIRS = [x for x in glob.glob(os.path.join(CV_BASEDIR, "e3fp*"))
if x not in E3FP_NOSTEREO_REPEAT_DIRS]
E2FP_STEREO_REPEAT_DIRS = glob.glob(os.path.join(CV_BASEDIR, "e2fp-stereo*"))
E2FP_REPEAT_DIRS = [x for x in glob.glob(os.path.join(CV_BASEDIR, "e2fp*"))
if x not in E2FP_STEREO_REPEAT_DIRS]
ECFP_CHIRAL_REPEAT_DIRS = glob.glob(os.path.join(CV_BASEDIR, "ecfp4-chiral*"))
ECFP_REPEAT_DIRS = [x for x in glob.glob(os.path.join(CV_BASEDIR, "ecfp4*"))
if x not in ECFP_CHIRAL_REPEAT_DIRS]
E3FP_RDKIT_REPEAT_DIRS = glob.glob(os.path.join(CV_BASEDIR, "e3fp-rdkit*"))
CVSTATS_FILE_NAME = "table_1.txt"
def stats_from_cv_dirs(cv_dirs):
aurocs = []
auprcs = []
target_aurocs = []
target_auprcs = []
target_perc_pos = []
for cv_dir in cv_dirs:
log_file = os.path.join(cv_dir, "log.txt")
with open(log_file, "rU") as f:
for line in f:
try:
m = re.search(('Target.*AUROC of (0\.\d+).*AUPRC of '
'(0\.\d+)\.\s\((0\.\d+)'), line)
auroc, auprc, perc_pos = [float(m.group(i))
for i in range(1, 4)]
target_aurocs.append(auroc)
target_auprcs.append(auprc)
target_perc_pos.append(perc_pos)
continue
except AttributeError:
pass
try:
m = re.search(
'Fold.*AUROC of (0\.\d+).*AUPRC of (0\.\d+)', line)
auroc, auprc = float(m.group(1)), float(m.group(2))
aurocs.append(auroc)
auprcs.append(auprc)
except AttributeError:
pass
return ((np.mean(auprcs), np.std(auprcs)),
(np.mean(aurocs), np.std(aurocs)),
(np.mean(target_auprcs), np.std(target_auprcs)),
(np.mean(target_aurocs), np.std(target_aurocs)),
(np.mean(target_perc_pos), np.std(target_perc_pos)))
if __name__ == "__main__":
names = ["ECFP4", "ECFP4-Chiral", "E2FP", "E2FP-Stereo", "E3FP-NoStereo",
"E3FP", "E3FP-RDKit"]
dirs_list = [ECFP_REPEAT_DIRS, ECFP_CHIRAL_REPEAT_DIRS,
E2FP_REPEAT_DIRS, E2FP_STEREO_REPEAT_DIRS,
E3FP_NOSTEREO_REPEAT_DIRS,
E3FP_REPEAT_DIRS, E3FP_RDKIT_REPEAT_DIRS]
stats = []
for dirs in dirs_list:
stats.append(stats_from_cv_dirs(dirs))
stats_strs = [["{:.4f} +/- {:.4f}".format(*pair) for pair in row]
for row in stats]
df = pd.DataFrame(stats_strs, columns=[
"Mean Fold AUPRC", "Mean Fold AUROC", "Mean Target AUPRC",
"Mean Target AUROC", "Positive Data Pairs"])
df['Name'] = names
df.set_index('Name', inplace=True)
with open(CVSTATS_FILE_NAME, "w") as f:
df.to_csv(f, sep='\t')
| lgpl-3.0 |
TomAugspurger/pandas | pandas/tests/indexes/ranges/test_setops.py | 2 | 8393 | from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas import Index, Int64Index, RangeIndex
import pandas._testing as tm
class TestRangeIndexSetOps:
def test_intersection(self, sort):
# intersect with Int64Index
index = RangeIndex(start=0, stop=20, step=2)
other = Index(np.arange(1, 6))
result = index.intersection(other, sort=sort)
expected = Index(np.sort(np.intersect1d(index.values, other.values)))
tm.assert_index_equal(result, expected)
result = other.intersection(index, sort=sort)
expected = Index(
np.sort(np.asarray(np.intersect1d(index.values, other.values)))
)
tm.assert_index_equal(result, expected)
# intersect with increasing RangeIndex
other = RangeIndex(1, 6)
result = index.intersection(other, sort=sort)
expected = Index(np.sort(np.intersect1d(index.values, other.values)))
tm.assert_index_equal(result, expected)
# intersect with decreasing RangeIndex
other = RangeIndex(5, 0, -1)
result = index.intersection(other, sort=sort)
expected = Index(np.sort(np.intersect1d(index.values, other.values)))
tm.assert_index_equal(result, expected)
# reversed (GH 17296)
result = other.intersection(index, sort=sort)
tm.assert_index_equal(result, expected)
# GH 17296: intersect two decreasing RangeIndexes
first = RangeIndex(10, -2, -2)
other = RangeIndex(5, -4, -1)
expected = first.astype(int).intersection(other.astype(int), sort=sort)
result = first.intersection(other, sort=sort).astype(int)
tm.assert_index_equal(result, expected)
# reversed
result = other.intersection(first, sort=sort).astype(int)
tm.assert_index_equal(result, expected)
index = RangeIndex(5)
# intersect of non-overlapping indices
other = RangeIndex(5, 10, 1)
result = index.intersection(other, sort=sort)
expected = RangeIndex(0, 0, 1)
tm.assert_index_equal(result, expected)
other = RangeIndex(-1, -5, -1)
result = index.intersection(other, sort=sort)
expected = RangeIndex(0, 0, 1)
tm.assert_index_equal(result, expected)
# intersection of empty indices
other = RangeIndex(0, 0, 1)
result = index.intersection(other, sort=sort)
expected = RangeIndex(0, 0, 1)
tm.assert_index_equal(result, expected)
result = other.intersection(index, sort=sort)
tm.assert_index_equal(result, expected)
# intersection of non-overlapping values based on start value and gcd
index = RangeIndex(1, 10, 2)
other = RangeIndex(0, 10, 4)
result = index.intersection(other, sort=sort)
expected = RangeIndex(0, 0, 1)
tm.assert_index_equal(result, expected)
def test_union_noncomparable(self, sort):
# corner case, non-Int64Index
index = RangeIndex(start=0, stop=20, step=2)
other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object)
result = index.union(other, sort=sort)
expected = Index(np.concatenate((index, other)))
tm.assert_index_equal(result, expected)
result = other.union(index, sort=sort)
expected = Index(np.concatenate((other, index)))
tm.assert_index_equal(result, expected)
@pytest.fixture(
params=[
(
RangeIndex(0, 10, 1),
RangeIndex(0, 10, 1),
RangeIndex(0, 10, 1),
RangeIndex(0, 10, 1),
),
(
RangeIndex(0, 10, 1),
RangeIndex(5, 20, 1),
RangeIndex(0, 20, 1),
Int64Index(range(20)),
),
(
RangeIndex(0, 10, 1),
RangeIndex(10, 20, 1),
RangeIndex(0, 20, 1),
Int64Index(range(20)),
),
(
RangeIndex(0, -10, -1),
RangeIndex(0, -10, -1),
RangeIndex(0, -10, -1),
RangeIndex(0, -10, -1),
),
(
RangeIndex(0, -10, -1),
RangeIndex(-10, -20, -1),
RangeIndex(-19, 1, 1),
Int64Index(range(0, -20, -1)),
),
(
RangeIndex(0, 10, 2),
RangeIndex(1, 10, 2),
RangeIndex(0, 10, 1),
Int64Index(list(range(0, 10, 2)) + list(range(1, 10, 2))),
),
(
RangeIndex(0, 11, 2),
RangeIndex(1, 12, 2),
RangeIndex(0, 12, 1),
Int64Index(list(range(0, 11, 2)) + list(range(1, 12, 2))),
),
(
RangeIndex(0, 21, 4),
RangeIndex(-2, 24, 4),
RangeIndex(-2, 24, 2),
Int64Index(list(range(0, 21, 4)) + list(range(-2, 24, 4))),
),
(
RangeIndex(0, -20, -2),
RangeIndex(-1, -21, -2),
RangeIndex(-19, 1, 1),
Int64Index(list(range(0, -20, -2)) + list(range(-1, -21, -2))),
),
(
RangeIndex(0, 100, 5),
RangeIndex(0, 100, 20),
RangeIndex(0, 100, 5),
Int64Index(range(0, 100, 5)),
),
(
RangeIndex(0, -100, -5),
RangeIndex(5, -100, -20),
RangeIndex(-95, 10, 5),
Int64Index(list(range(0, -100, -5)) + [5]),
),
(
RangeIndex(0, -11, -1),
RangeIndex(1, -12, -4),
RangeIndex(-11, 2, 1),
Int64Index(list(range(0, -11, -1)) + [1, -11]),
),
(RangeIndex(0), RangeIndex(0), RangeIndex(0), RangeIndex(0)),
(
RangeIndex(0, -10, -2),
RangeIndex(0),
RangeIndex(0, -10, -2),
RangeIndex(0, -10, -2),
),
(
RangeIndex(0, 100, 2),
RangeIndex(100, 150, 200),
RangeIndex(0, 102, 2),
Int64Index(range(0, 102, 2)),
),
(
RangeIndex(0, -100, -2),
RangeIndex(-100, 50, 102),
RangeIndex(-100, 4, 2),
Int64Index(list(range(0, -100, -2)) + [-100, 2]),
),
(
RangeIndex(0, -100, -1),
RangeIndex(0, -50, -3),
RangeIndex(-99, 1, 1),
Int64Index(list(range(0, -100, -1))),
),
(
RangeIndex(0, 1, 1),
RangeIndex(5, 6, 10),
RangeIndex(0, 6, 5),
Int64Index([0, 5]),
),
(
RangeIndex(0, 10, 5),
RangeIndex(-5, -6, -20),
RangeIndex(-5, 10, 5),
Int64Index([0, 5, -5]),
),
(
RangeIndex(0, 3, 1),
RangeIndex(4, 5, 1),
Int64Index([0, 1, 2, 4]),
Int64Index([0, 1, 2, 4]),
),
(
RangeIndex(0, 10, 1),
Int64Index([]),
RangeIndex(0, 10, 1),
RangeIndex(0, 10, 1),
),
(
RangeIndex(0),
Int64Index([1, 5, 6]),
Int64Index([1, 5, 6]),
Int64Index([1, 5, 6]),
),
]
)
def unions(self, request):
"""Inputs and expected outputs for RangeIndex.union tests"""
return request.param
def test_union_sorted(self, unions):
idx1, idx2, expected_sorted, expected_notsorted = unions
res1 = idx1.union(idx2, sort=None)
tm.assert_index_equal(res1, expected_sorted, exact=True)
res1 = idx1.union(idx2, sort=False)
tm.assert_index_equal(res1, expected_notsorted, exact=True)
res2 = idx2.union(idx1, sort=None)
res3 = idx1._int64index.union(idx2, sort=None)
tm.assert_index_equal(res2, expected_sorted, exact=True)
tm.assert_index_equal(res3, expected_sorted)
| bsd-3-clause |
lenovor/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 127 | 37672 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
Winterflower/mdf | mdf/regression/differs.py | 3 | 18094 | """
Classes for collecting data and determining differences,
for use with mdf.regression.run.
"""
import sys
import os
from ..builders import DataFrameBuilder
from ..nodes import MDFNode
import numpy as np
import pandas as pa
import xlwt
import logging
from datetime import datetime
_log = logging.getLogger(__name__)
if sys.version_info[0] > 2:
basestring = str
def _to_range(row, col, sheet=None):
"""returns an Excel range string, e.g. 0, 0 => A1"""
cell = ""
while col >= 26:
cell = "%s%s" % (chr(ord("A") + (col % 26)), cell)
col = (col // 26) - 1
cell = "%s%s" % (chr(ord("A") + (col % 26)), cell)
cell += "%d" % (row + 1)
if sheet is not None:
cell = "%s!%s" % (sheet.name, cell)
return cell
class Differ(object):
"""
Differ objects are the same a mdf builders with the
addition that they have the ability to diff result
sets.
When mdf.regression.run is called, each differ
object is called for each date in the regression
with the context being evaluated.
When finished, lhs.diff(rhs, lhs_ctx, rhs_ctx) is
called to diff the data collected in the regression
differ object.
"""
def diff(self, rhs, lhs_ctx, rhs_ctx):
"""
returns a tuple:
(is_different, brief_description, long_description, filename)
brief_description should be a one line human readable string
that describes any differences found, suitable for inclusion
in a diff report.
long_description describes any difference found in more detail
and may be included in the details section of a regression
report.
filename may be None or the name of a file containing a more
detailed diff report.
"""
raise NotImplementedError
class DataFrameDiffer(DataFrameBuilder, Differ):
"""
Subclass of Differ and DataFrameBuilder to be used
for collecting a dataframe of node values and then
comparing with another.
"""
def __init__(self, nodes, dtype=object, sparse_fill_value=None, xls_filename=None):
self.nodes_or_names = nodes
nodes = self._get_nodes(nodes)
DataFrameBuilder.__init__(self, nodes, dtype=dtype, sparse_fill_value=sparse_fill_value)
Differ.__init__(self)
self.__tolerances = {}
self.__xls_filename = xls_filename
#
# This is so nodes can be passed in as a list of names (including module/package/class)
# and the nodes will be found using that, instead of passing a node instance in.
#
# When the differ is pickled the node may exist in the target environment but the pickled
# node format remembers what class the node was implemented on as well as the class it's
# bound too. If that's different in two instances it won't find the node and so the
# regression will fail. By always getting the nodes by name it will get the correct node.
#
def _get_nodes(self, nodes_or_names):
nodes = []
for n in nodes_or_names:
if isinstance(n, basestring) and "." in n:
name = n
# import the module if necessary
components = name.split(".")
modulename = components[0]
try:
__import__(modulename)
except ImportError:
pass
module = sys.modules.get(modulename, None)
while modulename in sys.modules and len(components) > 1:
module = sys.modules[modulename]
components.pop(0)
modulename = ".".join((modulename, components[0]))
try:
__import__(modulename)
except ImportError:
pass
if not components or not module:
raise Exception("Node not found: '%s'" % name)
# get the class and then the node from the module
obj = module
while components:
attr = components.pop(0)
obj = getattr(obj, attr)
n = obj
# check by this point we have a node
if not isinstance(n, MDFNode):
raise Exception("Node not found: %s" % n)
nodes.append(n)
return nodes
def __setstate__(self, state):
# fix up the nodes again from their names
state["nodes"] = self._get_nodes(state["nodes_or_names"])
self.__dict__.update(state)
def __getstate__(self):
# don't pickle the nodes - get them again when unpickling
state = dict(self.__dict__)
state.pop("nodes", None)
return state
def set_tolerance(self, tolerance, abs=True, node=None):
"""
Sets the tolerance for comparing values.
When abs is True a difference is considered
significant when::
abs(lhs - rhs) > tolerance
Or if abs is False a difference is considered
significant when::
abs((lhs / rhs) - 1.0) > tolerance
If node is None the tolerance applies to all values,
othereise the tolerance only applies to values
derived from that specific node.
"""
assert node is None or node in self.nodes
self.__tolerances[node] = (tolerance, abs)
def get_tolerance(self, node=None):
"""
returns the tolerance set using set_tolerance
as a tuple (tolerance, abs)
"""
tolerance = self.__tolerances.get(node, None)
if tolerance is None:
tolerance = self.__tolerances.get(None, (0.0, True))
return tolerance
def diff(self, other, ctx, other_ctx):
"""
Returns a tuple (is_different, brief_description, long_description, detail_filename)
that describes the difference between self and other.
If applicable and if a path was passed to the ctor then additional details
describing differences will be written to a file, and that filename is
returned as part of the diff.
"""
lhs_data = self.get_dataframe(ctx)
rhs_data = other.get_dataframe(other_ctx)
# Coerce python datetime indexes to pandas DatetimeIndex
# TODO: Remove this once pandas 0.7.3 compatibility is no longer needed
def _coerce_dt_index(index):
if len(index) > 0 and (not isinstance(index, pa.DatetimeIndex)):
# If first and last index entries are python datetimes, assume that the index contains only datetimes
if isinstance(index[0], datetime) and isinstance(index[-1], datetime):
return pa.DatetimeIndex(index)
# Return the original index if no modifications were done
return index
lhs_data.index = _coerce_dt_index(lhs_data.index)
rhs_data.index = _coerce_dt_index(rhs_data.index)
# diff each node's values individually
is_different = False
brief_description = ""
long_description = ""
different_nodes = []
details_filename = None
def _cols_are_similar(lhs_col, rhs_col):
lhs_col, rhs_col = str(lhs_col), str(rhs_col)
if "." in lhs_col:
unused, lhs_col = lhs_col.rsplit(".", 1)
if "." in rhs_col:
unused, rhs_col = rhs_col.rsplit(".", 1)
return lhs_col.lower() == rhs_col.lower()
for node in self.nodes:
lhs_columns = sorted(self.get_columns(node, ctx))
rhs_columns = sorted(other.get_columns(node, other_ctx))
# check the columns are the same
if len(lhs_columns) != len(rhs_columns) \
or (np.array(lhs_columns) != np.array(rhs_columns)).any():
is_different = True
description = "%s has column differences" % node.name
description += "\n" + "-" * len(description) + "\n\n"
max_columns = max(len(lhs_columns), len(rhs_columns))
lhs_tmp_cols = list(lhs_columns) + [None] * (max_columns - len(lhs_columns))
rhs_tmp_cols = list(rhs_columns) + [None] * (max_columns - len(rhs_columns))
cols_are_similar = len(lhs_columns) == len(rhs_columns)
for i, (lhs_col, rhs_col) in enumerate(zip(lhs_tmp_cols, rhs_tmp_cols)):
if lhs_col != rhs_col:
description += "%d: %s != %s\n" % (i, lhs_col, rhs_col)
if not _cols_are_similar(lhs_col, rhs_col):
cols_are_similar = False
long_description += description + "\n\n"
# if the cols aren't even similar skip the rest of the checks
if not cols_are_similar:
long_description += "**Not diffing data because of column differences**\n\n"
different_nodes.append(node)
continue
lhs_df = lhs_data[lhs_columns]
rhs_df = rhs_data[rhs_columns]
# check the indices are the same
if (np.array(lhs_df.index) != np.array(rhs_df.index)).any():
is_different = True
different_nodes.append(node)
mask = np.array(rhs_data.index) != np.array(lhs_data.index)
lhs_diff_dates = lhs_data.index[mask]
rhs_diff_dates = rhs_data.index[mask]
description = "%s has index differences" % node.name
description += "\n" + "-" * len(description) + "\n\n"
description += "indexes are different starting at %s != %s" % (
lhs_diff_dates[0],
rhs_diff_dates[0])
long_description += description + "\n\n"
continue
#
# columns and indices are the same so check the contents
#
try:
lhs_df = lhs_df.astype(float)
except TypeError:
pass
try:
rhs_df = rhs_df.astype(float)
except TypeError:
pass
tolerance, is_abs = self.get_tolerance(node)
if is_abs:
diffs = np.abs(lhs_df - rhs_df)
mask = (diffs > tolerance).values
else:
diffs = np.abs((lhs_df / rhs_df) - 1.0)
mask = (diffs > tolerance).values
# don't include differences where both sides are NaN or 0.0
try:
mask &= ~((lhs_df == 0.0) & (rhs_df == 0.0)).values
mask &= ~(np.isnan(lhs_df) & np.isnan(rhs_df)).values
except TypeError:
pass
# do include differences where one side is NaN but the other isn't
try:
mask |= np.isnan(lhs_df).values & ~np.isnan(rhs_df).values
mask |= np.isnan(rhs_df).values & ~np.isnan(lhs_df).values
except TypeError:
pass
if mask.any():
is_different = True
different_nodes.append(node)
row_mask = np.apply_along_axis(np.any, 1, mask)
diffs = diffs[row_mask]
description = "%s has %d differences" % (node.name, len(diffs.index))
description += "\n" + "-" * len(description) + "\n\n"
description += "tolerance = %f%s\n\n" % (
tolerance if is_abs else tolerance * 100.0,
"%" if not is_abs else "")
lhs_diffs = lhs_df[row_mask]
rhs_diffs = rhs_df[row_mask]
# convert the lhs and rhs to strings
lhs_lines = lhs_diffs.to_string().splitlines()
rhs_lines = rhs_diffs.to_string().splitlines()
# pad so they're the same length
lhs_lines += ["" * max(len(rhs_lines) - len(lhs_lines), 0)]
rhs_lines += ["" * max(len(lhs_lines) - len(rhs_lines), 0)]
max_lines = 10
mid = min(len(lhs_lines), max_lines) // 2
# format them on the same lines
lines = []
fmt = "%%-%ds %%-2s %%s" % max([len(x) for x in lhs_lines])
for i, (l, r) in enumerate(zip(lhs_lines, rhs_lines)):
if i == mid:
lines.append(fmt % (l, "!=", r))
else:
lines.append(fmt % (l, " ", r))
description += "\n".join(lines[:max_lines])
if len(lines) > max_lines:
description += "\n..."
long_description += description + "\n\n"
if is_different:
node_names = [x.short_name for x in different_nodes]
_log.debug("Differences found in nodes: %s" % ", ".join(node_names))
if len(different_nodes) == 0:
brief_description = "No data differences"
long_description += "No data differences\n\n"
elif len(different_nodes) == 1:
brief_description = "%s has differences" % node_names[0]
else:
brief_description = ", ".join(node_names[:-1])
brief_description += " and %s have differences" % node_names[-1]
if self.__xls_filename and len(different_nodes) > 0:
_log.debug("Writing differences to Excel file '%s'" % self.__xls_filename)
details_filename = self.__xls_filename
self.__write_xls(other, different_nodes, lhs_data, rhs_data, details_filename, ctx, other_ctx)
return (is_different, brief_description, long_description, details_filename)
def __write_xls(self, rhs_differ, different_nodes, lhs_data, rhs_data, filename, lhs_ctx, rhs_ctx):
"""write the diffs to a spreadsheet"""
wb = xlwt.Workbook()
date_style = xlwt.easyxf(num_format_str='YYYY-MM-DD')
nsheets = 0
for node in different_nodes:
lhs_columns = sorted(self.get_columns(node, lhs_ctx))
lhs_df = lhs_data[lhs_columns]
rhs_columns = sorted(rhs_differ.get_columns(node, rhs_ctx))
rhs_df = rhs_data[rhs_columns]
if len(lhs_df.columns) > 255 or len(rhs_df.columns) > 255: # xlwt has a limit of 256 columns
# just dump data into two separate CSV if its too big for a nice XLS report
fname = "%s__%s" % (node.short_name, os.path.splitext(os.path.basename(filename))[0])
csv_fpath = os.path.join(os.path.dirname(filename), fname)
_log.info("Node %s has mare than 255 columns, can't use xlwt, writing CSV to "
"%s[_LHS|_RHS].csv" % (node.name, csv_fpath))
lhs_df.to_csv(csv_fpath+"_LHS.csv")
rhs_df.to_csv(csv_fpath+"_RHS.csv")
else:
_log.info("Writing Excel sheet for %s" % node.name)
nsheets += 1
diffs_ws = wb.add_sheet(("%s_DIFFS" % node.short_name)[-31:])
lhs_ws = wb.add_sheet(("%s_LHS" % node.short_name)[-31:])
rhs_ws = wb.add_sheet(("%s_RHS" % node.short_name)[-31:])
for ws, df in ((lhs_ws, lhs_df), (rhs_ws, rhs_df)):
for row, value in enumerate(df.index):
ws.write(row + 1, 0, value, date_style)
for col_i, col_name in enumerate(df.columns):
ws.write(0, col_i + 1, str(col_name))
col = df[col_name]
for row_i, value in enumerate(col):
if np.isnan(value):
ws.row(row_i + 1).set_cell_error(col_i + 1, "#NUM!")
else:
ws.write(row_i + 1, col_i + 1, value)
max_cols = max(len(lhs_columns), len(rhs_columns))
max_rows = max(len(lhs_df.index), len(rhs_df.index))
tolerance, is_abs = self.get_tolerance(node)
for row, value in enumerate(lhs_df.index):
diffs_ws.write(row + 1, 0,
xlwt.Formula("IF(EXACT(%(l)s,%(r)s),%(l)s,\"ERROR\")" % {
"l" : _to_range(row + 1, 0, lhs_ws),
"r" : _to_range(row + 1, 0, rhs_ws)}),
date_style)
for col_i, col_name in enumerate(lhs_df.columns):
diffs_ws.write(0, col_i + 1,
xlwt.Formula("IF(EXACT(%(l)s,%(r)s),%(l)s,\"ERROR\")" % {
"l" : _to_range(0, col_i + 1, lhs_ws),
"r" : _to_range(0, col_i + 1, rhs_ws)}))
for col_i in xrange(1, max_cols + 1):
for row_i in xrange(1, max_rows + 1):
if is_abs:
diffs_ws.write(row_i,
col_i,
xlwt.Formula("ABS(%s-%s)" % (_to_range(row_i, col_i, lhs_ws),
_to_range(row_i, col_i, rhs_ws))))
else:
diffs_ws.write(row_i,
col_i,
xlwt.Formula("ABS((%s/%s)-1)" % (_to_range(row_i, col_i, lhs_ws),
_to_range(row_i, col_i, rhs_ws))))
if nsheets:
wb.save(filename)
| mit |
carrillo/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
sumspr/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
pypot/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
jskDr/jamespy_py3 | cell/classification.py | 1 | 11443 | # classification.py
import os
import pandas as pd
import numpy as np
# import seaborn as sns
import matplotlib.pyplot as plt
# from sklearn import manifold, svm, model_selection, tree, metrics, cluster, ensemble
# from sklearn.model_selection import GridSearchCV
from scipy.io import loadmat
from time import time
from imblearn.under_sampling import RandomUnderSampler
# import kutil
# import kgrid
# import kkeras, kkeras_cv
import kcellml
# import jmath
import kmds
# from kcellml import pd_clsf2_by_clst, pd_clsf2_by_yint
def read_data(fname, Da, Db):
"""
Read data for processing
"""
Data = loadmat(fname)
print(Data.keys())
X2part = np.concatenate([Data[Da], Data[Db]], axis=0)
print(X2part.shape)
y = np.array([0] * Data[Da].shape[0] + [1] * Data[Db].shape[0])
print(y.shape)
return X2part, y
def get_X_y2_nb_classes(X2part, y):
X = X2part
y2 = y.copy()
# plt.hist( y2)
nb_classes = len(set(y2))
return X, y2, nb_classes
def _do_classification_r0(X, y2, nb_classes, M=10, N=10):
"""
Perform classification
"""
defalut_param_d = {
'SVC:C': 1.93, 'SVC:gamma': 0.037,
'RF:n_estimators': 100, 'RF:oob_score': True
}
kcellml.setparam(defalut_param_d)
clsf2_by_yint = kcellml.GET_clsf2_by_yint(nb_classes, confusion_matric_return=True,
matthews_corrcoef_return=True)
sc_ll = []
cf_lll = []
mc_ll = []
dt_agg = 0
for i_rs in range(N):
print("i_rs", i_rs)
Xeq, y2eq = RandomUnderSampler().fit_sample(X, y2)
st = time()
for i_sp in range(M):
# print( "i_sp", i_sp)
sc_cf_lx = clsf2_by_yint(Xeq, y2eq, test_size=0.3, disp=False)
sc_ll.append(sc_cf_lx[:5])
cf_lll.append(sc_cf_lx[5:10])
mc_ll.append(sc_cf_lx[10:])
ed = time()
dt = ed - st
dt_agg += dt
print("Elapsed: {:.2f}s".format(dt),
"Remaind time: {:.2f}m".format((N - i_rs - 1) * (dt_agg) / (i_rs + 1) / 60))
sc_a2d = np.array(sc_ll)
print(sc_a2d.shape)
cf_a3d = np.array(cf_lll)
print(cf_a3d.shape)
mc_a2d = np.array(mc_ll)
print(mc_a2d.shape)
return sc_a2d, mc_a2d, cf_a3d
def do_classification(X, y2, nb_classes, M=10, N=10):
"""
Perform classification
"""
defalut_param_d = {
'SVC:C': 1.93, 'SVC:gamma': 0.037,
'RF:n_estimators': 100, 'RF:oob_score': True
}
kcellml.setparam(defalut_param_d)
if nb_classes == 2:
clsf2_by_yint = kcellml.GET_clsf2_by_yint(nb_classes,
confusion_matric_return=True,
matthews_corrcoef_return=True)
elif nb_classes > 2:
clsf2_by_yint = kcellml.GET_clsf2_by_yint(nb_classes,
confusion_matric_return=True,
matthews_corrcoef_return=False)
else:
raise ValueError('nb_classes should be equal to or larger than 2.')
sc_ll = []
cf_lll = []
mc_ll = []
dt_agg = 0
for i_rs in range(N):
print("i_rs", i_rs)
Xeq, y2eq = RandomUnderSampler().fit_sample(X, y2)
st = time()
for i_sp in range(M):
# print( "i_sp", i_sp)
sc_cf_lx = clsf2_by_yint(Xeq, y2eq, test_size=0.3, disp=False)
sc_ll.append(sc_cf_lx[:5])
cf_lll.append(sc_cf_lx[5:10])
if nb_classes == 2:
mc_ll.append(sc_cf_lx[10:])
ed = time()
dt = ed - st
dt_agg += dt
print("Elapsed: {:.2f}s".format(dt),
"Remaind time: {:.2f}m".format((N - i_rs - 1) * (dt_agg) / (i_rs + 1) / 60))
sc_a2d = np.array(sc_ll)
print(sc_a2d.shape)
cf_a3d = np.array(cf_lll)
print(cf_a3d.shape)
if nb_classes == 2:
mc_a2d = np.array(mc_ll)
print(mc_a2d.shape)
return sc_a2d, mc_a2d, cf_a3d
else:
return sc_a2d, cf_a3d
def _save_result_r0(fold, sc_a2d, mc_a2d, cf_a3d):
if not os.path.exists(fold):
os.mkdir(fold)
# os.mkdir(fold+'/sheet')
fold += '/'
plt.boxplot(sc_a2d)
plt.show()
print('Accuracy', ["DT", "SVC", "DNN", "CDNN", "RF"])
print(np.average(sc_a2d, axis=0))
plt.boxplot(mc_a2d)
plt.show()
print('Matthews Corrcoef', ["DT", "SVC", "DNN", "CDNN", "RF"])
print(np.average(mc_a2d, axis=0))
np.save(fold + 'sc_a2d', sc_a2d)
np.save(fold + 'cf_a3d', cf_a3d)
np.save(fold + 'mc_a2d', mc_a2d)
sc_df = pd.DataFrame(sc_a2d, columns=["DT", "SVC", "DNN", "CDNN", "RF"])
# sc_df.plot(kind='bar')
# plt.show()
sc_df.to_csv(fold + 'sheet_sc_a2d.csv')
sc_df.head()
mc_df = pd.DataFrame(mc_a2d, columns=["DT", "SVC", "DNN", "CDNN", "RF"])
# mc_df.plot(kind='bar')
# plt.show()
mc_df.to_csv(fold + 'sheet_mc_a2d.csv')
mc_df.head()
cf_a3d_avg = np.average(cf_a3d, axis=0)
mode_l = ['DT', 'SVC', 'DNN', 'CDNN', 'RF']
with open(fold + 'cf_a3d_avg.txt', 'w') as F:
# dt_score, sv_score, mlp_score, cnn_score, rf_score
for i, mode in enumerate(mode_l):
F.write("{} Confusion Metrics\n".format(mode))
F.write(", ".join([str(x) for x in cf_a3d_avg[i, 0, :]]))
F.write('\n')
F.write(", ".join([str(x) for x in cf_a3d_avg[i, 1, :]]))
F.write('\n\n')
print("Current working directory:")
print(os.getcwd() + '/' + fold)
print("Saved data")
print(os.listdir(fold))
def save_result(fold, sc_a2d, mc_a2d, cf_a3d):
if not os.path.exists(fold):
os.mkdir(fold)
# os.mkdir(fold+'/sheet')
fold += '/'
plt.boxplot(sc_a2d)
plt.show()
print('Accuracy', ["DT", "SVC", "DNN", "CDNN", "RF"])
print(np.average(sc_a2d, axis=0))
plt.boxplot(mc_a2d)
plt.show()
print('Matthews Corrcoef', ["DT", "SVC", "DNN", "CDNN", "RF"])
print(np.average(mc_a2d, axis=0))
np.save(fold + 'sc_a2d', sc_a2d)
np.save(fold + 'cf_a3d', cf_a3d)
np.save(fold + 'mc_a2d', mc_a2d)
sc_df = pd.DataFrame(sc_a2d, columns=["DT", "SVC", "DNN", "CDNN", "RF"])
# sc_df.plot(kind='bar')
# plt.show()
sc_df.to_csv(fold + 'sheet_sc_a2d.csv')
sc_df.head()
mc_df = pd.DataFrame(mc_a2d, columns=["DT", "SVC", "DNN", "CDNN", "RF"])
# mc_df.plot(kind='bar')
# plt.show()
mc_df.to_csv(fold + 'sheet_mc_a2d.csv')
mc_df.head()
cf_a3d_avg = np.average(cf_a3d, axis=0)
mode_l = ['DT', 'SVC', 'DNN', 'CDNN', 'RF']
with open(fold + 'cf_a3d_avg.txt', 'w') as F:
# dt_score, sv_score, mlp_score, cnn_score, rf_score
for i, mode in enumerate(mode_l):
F.write("{} Confusion Metrics\n".format(mode))
for j in range(cf_a3d_avg.shape[1]):
F.write(", ".join([str(x) for x in cf_a3d_avg[i, j, :]]))
F.write('\n')
# F.write(", ".join([str(x) for x in cf_a3d_avg[i, 1, :]]))
F.write('\n')
print("Current working directory:")
print(os.getcwd() + '/' + fold)
print("Saved data")
print(os.listdir(fold))
def save_result_multiple_classes(fold, sc_a2d, cf_a3d):
if not os.path.exists(fold):
os.mkdir(fold)
# os.mkdir(fold+'/sheet')
fold += '/'
plt.boxplot(sc_a2d)
plt.xticks(range(1,6), ["DT", "SVC", "DNN", "CDNN", "RF"])
plt.show()
print('Accuracy', ["DT", "SVC", "DNN", "CDNN", "RF"])
print(np.average(sc_a2d, axis=0))
# plt.boxplot(mc_a2d)
# plt.show()
# print('Matthews Corrcoef', ["DT", "SVC", "DNN", "CDNN", "RF"])
# print(np.average(mc_a2d, axis=0))
np.save(fold + 'sc_a2d', sc_a2d)
np.save(fold + 'cf_a3d', cf_a3d)
# np.save(fold + 'mc_a2d', mc_a2d)
sc_df = pd.DataFrame(sc_a2d, columns=["DT", "SVC", "DNN", "CDNN", "RF"])
# sc_df.plot(kind='bar')
# plt.show()
sc_df.to_csv(fold + 'sheet_sc_a2d.csv')
sc_df.head()
# mc_df = pd.DataFrame(mc_a2d, columns=["DT", "SVC", "DNN", "CDNN", "RF"])
# mc_df.plot(kind='bar')
# plt.show()
# mc_df.to_csv(fold + 'sheet_mc_a2d.csv')
# mc_df.head()
cf_a3d_avg = np.average(cf_a3d, axis=0)
mode_l = ['DT', 'SVC', 'DNN', 'CDNN', 'RF']
with open(fold + 'cf_a3d_avg.txt', 'w') as F:
# dt_score, sv_score, mlp_score, cnn_score, rf_score
for i, mode in enumerate(mode_l):
F.write("{} Confusion Metrics\n".format(mode))
for j in range(cf_a3d_avg.shape[1]):
F.write(", ".join([str(x) for x in cf_a3d_avg[i, j, :]]))
F.write('\n')
# F.write(", ".join([str(x) for x in cf_a3d_avg[i, 1, :]]))
F.write('\n')
print("Current working directory:")
print(os.getcwd() + '/' + fold)
print("Saved data")
print(os.listdir(fold))
def run(fname, Da, Db, save_fold,
M=10, N=10):
"""
Run this code.
Input
=====
fname = 'sheet/classification/VASP_classfication_mapping_data.mat'
Da, Db = 'Cluster1', 'Cluster23'
save_fold = 'XX-VASP_1vs23_map'
M, N = 10, 10
Iteration counts for radomization and cross-validation
"""
print("Reading data...")
X2part, y = read_data(fname, Da, Db)
X, y2, nb_classes = get_X_y2_nb_classes(X2part, y)
print("Doing classificaiton...")
sc_a2d, mc_a2d, cf_a3d = do_classification(X, y2, nb_classes, M=M, N=N)
print("Saving the results...")
save_result(save_fold, sc_a2d, mc_a2d, cf_a3d)
# ====================================
# Multiple Classes
# ====================================
def read_data_multple_classes(fname, D_l):
"""
Read data for processing
"""
Data = loadmat(fname)
print(Data.keys())
Data_con = [Data[Dx] for Dx in D_l]
X2part = np.concatenate(Data_con, axis=0)
print(X2part.shape)
y_l = []
for n, Dx in enumerate(D_l):
y_l.extend([n] * Data[Dx].shape[0])
y = np.array(y_l)
print(y.shape)
return X2part, y
def run_multiple_classes(fname, D_l, save_fold,
M=10, N=10):
"""
Run this code.
Input
=====
fname = 'sheet/classification/VASP_classfication_mapping_data.mat'
Da, Db = 'Cluster1', 'Cluster23'
save_fold = 'XX-VASP_1vs23_map'
M, N = 10, 10
Iteration counts for radomization and cross-validation
"""
print("Reading data...")
X2part, y = read_data_multple_classes(fname, D_l)
X, y2, nb_classes = get_X_y2_nb_classes(X2part, y)
print("...nb_classes ->", nb_classes)
if nb_classes == 2:
print("Doing classificaiton...")
sc_a2d, mc_a2d, cf_a3d = do_classification(X, y2, nb_classes, M=M, N=N)
print("Saving the results...")
save_result(save_fold, sc_a2d, mc_a2d, cf_a3d)
elif nb_classes > 2:
print("Doing classificaiton...")
sc_a2d, cf_a3d = do_classification(X, y2, nb_classes, M=M, N=N)
print("Saving the results...")
save_result_multiple_classes(save_fold, sc_a2d, cf_a3d)
else:
raise ValueError('nb_classes should be equal to or larger than 2.')
def show_tSNE(fname, D_l):
print("Reading data...")
X2part, y = read_data_multple_classes(fname, D_l)
print('Showing tSNE...')
kmds.plot_tSNE(X2part, y, digit=False)
plt.show()
return X2part, y
| mit |
zuku1985/scikit-learn | sklearn/datasets/species_distributions.py | 18 | 7982 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.datasets.base import _pkl_filepath
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = DIRECTORY_URL + "samples.zip"
COVERAGES_URL = DIRECTORY_URL + "coverages.zip"
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b'NODATA_value'])
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
| bsd-3-clause |
martinghunt/ariba | setup.py | 1 | 2232 | import os
import shutil
import sys
import glob
from setuptools import setup, find_packages, Extension
minimap_c_files = [
'bseq.c',
'index.c',
'kthread.c',
'map.c',
'misc.c',
'sdust.c',
'sketch.c',
]
minimap_c_files = [os.path.join('third_party', 'minimap-0.2', x) for x in minimap_c_files]
minimap_c_files.append(os.path.join('ariba', 'ext', 'minimap_ariba.cpp'))
minimap_mod = Extension(
"minimap_ariba",
minimap_c_files,
extra_link_args=['-lz'],
include_dirs=[os.path.join('third_party', 'minimap-0.2')],
)
fermilite_c_files = [
'bfc.c',
'bseq.c',
'bubble.c',
'htab.c',
'ksw.c',
'kthread.c',
'mag.c',
'misc.c',
'mrope.c',
'rld0.c',
'rle.c',
'rope.c',
'unitig.c'
]
fermilite_c_files = [os.path.join('third_party', 'fermi-lite-0.1', x) for x in fermilite_c_files]
fermilite_c_files.append(os.path.join('ariba', 'ext', 'fml-asm_ariba.cpp'))
fermilite_mod = Extension(
"fermilite_ariba",
fermilite_c_files,
extra_link_args=['-lz'],
include_dirs=[os.path.join('third_party', 'fermi-lite-0.1')],
)
vcfcall_mod = Extension(
"vcfcall_ariba",
[os.path.join('ariba', 'ext', 'vcfcall_ariba.cpp')],
)
setup(
ext_modules=[minimap_mod, fermilite_mod, vcfcall_mod],
name='ariba',
version='2.14.6',
description='ARIBA: Antibiotic Resistance Identification By Assembly',
packages = find_packages(),
package_data={'ariba': ['test_run_data/*', 'tb_data/*']},
author='Martin Hunt',
author_email='[email protected]',
url='https://github.com/sanger-pathogens/ariba',
scripts=glob.glob('scripts/*'),
test_suite='nose.collector',
tests_require=['nose >= 1.3'],
install_requires=[
'BeautifulSoup4 >= 4.1.0',
'biopython',
'dendropy >= 4.2.0',
'pyfastaq >= 3.12.0',
'pysam >= 0.9.1',
'pymummer<=0.10.3',
'matplotlib >= 3.1.0',
],
license='GPLv3',
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
],
)
| gpl-3.0 |
CIFASIS/pylearn2 | pylearn2/models/svm.py | 1 | 4352 | """Wrappers for SVM models."""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
import warnings
from pylearn2.blocks import Block
from model import Model
from pylearn2.utils import wraps
try:
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
except ImportError:
warnings.warn("Could not import sklearn.")
class OneVsRestClassifier(object):
"""
See `sklearn.multiclass.OneVsRestClassifier`.
Notes
-----
This class is a dummy class included so that sphinx
can import DenseMulticlassSVM and document it even
when sklearn is not installed.
"""
def __init__(self, estimator):
raise RuntimeError("sklearn not available.")
class DenseMulticlassSVM(OneVsRestClassifier, Block, Model):
"""
sklearn does very different things behind the scenes depending
upon the exact identity of the class you use. The only way to
get an SVM implementation that works with dense data is to use
the `SVC` class, which implements one-against-one
classification. This wrapper uses it to implement one-against-
rest classification, which generally works better in my
experiments.
To avoid duplicating the training data, use only numpy ndarrays
whose tags.c_contigous flag is true, and which are in float64
format.
Parameters
----------
C : float
SVM regularization parameter.
See SVC.__init__ for details.
kernel : str
Type of kernel to use.
See SVC.__init__ for details.
gamma : float
Optional parameter of kernel.
See SVC.__init__ for details.
coef0 : float
Optional parameter of kernel.
See SVC.__init__ for details.
degree : int
Degree of kernel, if kernel is polynomial.
See SVC.__init__ for details.
"""
def __init__(self, C, kernel='rbf', gamma = 1.0, coef0 = 1.0, degree = 3):
estimator = SVC(C=C, kernel=kernel, gamma = gamma, coef0 = coef0,
degree = degree)
Block.__init__(self)
Model.__init__(self)
super(DenseMulticlassSVM,self).__init__(estimator)
def train_all(self, dataset):
"""
If implemented, performs one epoch of training.
Parameters
----------
dataset : pylearn2.datasets.dataset.Dataset
Dataset object to draw training data from
Notes
-----
This method is useful
for models with highly specialized training algorithms for which is
does not make much sense to factor the training code into a separate
class. It is also useful for implementors that want to make their model
trainable without enforcing compatibility with pylearn2
TrainingAlgorithms.
"""
self.fit(dataset.X, dataset.y)
@wraps(Model.continue_learning)
def continue_learning(self):
# One call to train_all currently trains the model fully,
# so return False immediately.
return False
def enforce_constraints(self):
pass
def fit(self, X, y):
"""
Fit underlying estimators.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self
"""
super(DenseMulticlassSVM, self).fit(X, y)
return self
def decision_function(self, X):
"""
Returns the distance of each sample from the decision boundary for
each class.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A 2D ndarray with each row containing the input features for one
example.
Returns
-------
T : array-like, shape = [n_samples, n_classes]
"""
return np.column_stack([estimator.decision_function(X)
for estimator in self.estimators_])
| bsd-3-clause |
tswast/google-cloud-python | spanner/docs/conf.py | 2 | 11899 | # -*- coding: utf-8 -*-
#
# google-cloud-spanner documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-spanner"
copyright = u"2017, Google"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-spanner-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-spanner.tex",
u"google-cloud-spanner Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-spanner",
u"google-cloud-spanner Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-spanner",
u"google-cloud-spanner Documentation",
author,
"google-cloud-spanner",
"GAPIC library for the {metadata.shortName} service",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("https://requests.kennethreitz.org/en/stable/", None),
"fastavro": ("https://fastavro.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| apache-2.0 |
Kortemme-Lab/kddg | kddg/api/ppi.py | 1 | 189894 | #!/usr/bin/python2.4
# encoding: utf-8
"""
ppi.py
High-level functions for interacting with the protein-protein interaction sections of the ddG database.
Classes:
BindingAffinityDDGInterface - an class used to interface with the database. Call get_interface to get a user API based on this class.
Created by Shane O'Connor 2015.
Copyright (c) 2015 __UCSF__. All rights reserved.
"""
import pprint
from io import BytesIO
import os
import sys
import copy
import json
import zipfile
import re
import random
import traceback
import StringIO
import gzip
import shutil
import sqlite3
import cPickle as pickle
import datetime
import time
import getpass
import numpy
from sqlalchemy import and_, or_, func
from klab import colortext
from klab.bio.pdb import PDB
from klab.bio.basics import ChainMutation, residue_type_1to3_map
from klab.fs.fsio import read_file, write_temp_file
from klab.benchmarking.analysis.ddg_binding_affinity_analysis import DBBenchmarkRun as BindingAffinityBenchmarkRun
from klab.bio.alignment import ScaffoldModelChainMapper, DecoyChainMapper
from klab.db.sqlalchemy_interface import row_to_dict, get_or_create_in_transaction, get_single_record_from_query
from klab.stats.misc import get_xy_dataset_statistics_pandas
import kddg.api.schema as dbmodel
from kddg.api.layers import *
from kddg.api.db import ddG, PartialDataException, SanityCheckException
from kddg.api.data import json_dumps
from kddg.api import settings
sys_settings = settings.load()
DeclarativeBase = dbmodel.DeclarativeBase
def get_interface(passwd, username = sys_settings.database.username, hostname = sys_settings.database.hostname, rosetta_scripts_path = None, rosetta_database_path = None, port = sys_settings.database.port):
'''This is the function that should be used to get a BindingAffinityDDGInterface object. It hides the private methods
from the user so that a more traditional object-oriented API is created.'''
return GenericUserInterface.generate(BindingAffinityDDGInterface, passwd = passwd, username = username, hostname = hostname, rosetta_scripts_path = rosetta_scripts_path, rosetta_database_path = rosetta_database_path, port = port)
def get_interface_with_config_file(host_config_name = sys_settings.database.host_config_name, rosetta_scripts_path = None, rosetta_database_path = None, get_interface_factory = get_interface, passed_port = None):
# Uses ~/.my.cnf to get authentication information
### Example .my.cnf (host_config_name will equal myserver):
### [clientmyserver]
### user=username
### password=notmyrealpass
### host=server.domain.com
my_cnf_path = os.path.expanduser(os.path.join('~', '.my.cnf'))
if not os.path.isfile( os.path.expanduser(my_cnf_path) ):
raise Exception("A .my.cnf file must exist at: " + my_cnf_path)
# These three variables must be set in a section of .my.cnf named host_config_name
user = None
password = None
host = None
port = None
with open(my_cnf_path, 'r') as f:
parsing_config_section = False
for line in f:
if line.strip() == '[client%s]' % host_config_name:
parsing_config_section = True
elif line.strip() == '':
parsing_config_section = False
elif parsing_config_section:
if '=' in line:
tokens = line.strip().split('=')
key, val = tokens[0], '='.join(tokens[1:]) # values may contain '=' signs
key, val = key.strip(), val.strip()
if key == 'user':
user = val
elif key == 'password':
password = val
elif key == 'host':
host = val
elif key == 'port':
port = int(val)
else:
parsing_config_section = False
port = passed_port or port or 3306
if not user or not password or not host:
raise Exception("Couldn't find host(%s), username(%s), or password in section %s in %s" % (host, user, host_config_name, my_cnf_path) )
return get_interface_factory(password, username = user, hostname = host, rosetta_scripts_path = rosetta_scripts_path, rosetta_database_path = rosetta_database_path, port = port)
class BindingAffinityDDGInterface(ddG):
'''This is the internal API class that should be NOT used to interface with the database.'''
def __init__(self, passwd = None, username = sys_settings.database.username, hostname = sys_settings.database.hostname, rosetta_scripts_path = None, rosetta_database_path = None, port = sys_settings.database.port, file_content_buffer_size = None):
super(BindingAffinityDDGInterface, self).__init__(passwd = passwd, username = username, hostname = hostname, rosetta_scripts_path = rosetta_scripts_path, rosetta_database_path = rosetta_database_path, port = port, file_content_buffer_size = file_content_buffer_size)
self.prediction_data_path = self.DDG_db.execute('SELECT Value FROM _DBCONSTANTS WHERE VariableName="PredictionPPIDataPath"')[0]['Value']
self.unfinished_prediction_ids_cache = {}
def get_prediction_ids_with_scores(self, prediction_set_id, score_method_id = None):
'''Returns a set of all prediction_ids that already have an associated score in prediction_set_id
'''
score_table = self._get_sqa_prediction_structure_scores_table()
prediction_table = self.PredictionTable
if score_method_id != None:
return set([r['ID'] for r in self.DDG_db.execute_select('''
SELECT DISTINCT PredictionPPI.ID FROM PredictionPPIStructureScore
INNER JOIN PredictionPPI
ON PredictionPPI.ID=PredictionPPIStructureScore.PredictionPPIID
WHERE PredictionPPI.PredictionSet=%s AND PredictionPPIStructureScore.ScoreMethodID=%s''', parameters=(prediction_set_id, score_method_id))])
else:
return set([r['ID'] for r in self.DDG_db.execute_select('''
SELECT DISTINCT PredictionPPI.ID FROM PredictionPPIStructureScore
INNER JOIN PredictionPPI
ON PredictionPPI.ID=PredictionPPIStructureScore.PredictionPPIID
WHERE PredictionPPI.PredictionSet=%s''', parameters=(prediction_set_id,))])
def get_prediction_ids_and_record_ids(self, prediction_set_id, data_set_id = 'ZEMu_10.1002/prot.24634'):
'''Returns a set of all prediction_ids and the record ids for the underlying data set
'''
# Old query (delete if reading this):
# SELECT PredictionPPI.ID, PredictionPPI.PredictionSet, PredictionPPI.PPMutagenesisID, PredictionPPI.UserPPDataSetExperimentID,
# PPIDataSetDDG.RecordNumber, PPIDataSetDDG.PublishedPDBFileID
# FROM PredictionPPI
# INNER JOIN PPIDataSetDDG ON PPIDataSetDDG.PPMutagenesisID=PredictionPPI.PPMutagenesisID
# WHERE PredictionPPI.PredictionSet=%s
# AND PPIDataSetDDG.DataSetID=%s
return self.DDG_db.execute_select('''
SELECT PredictionPPI.ID, PredictionPPI.PredictionSet, PredictionPPI.PPMutagenesisID, PredictionPPI.UserPPDataSetExperimentID, PPIDataSetDDG.RecordNumber, PPIDataSetDDG.PublishedPDBFileID
FROM PredictionPPI
INNER JOIN
(SELECT UserPPDataSetExperiment.ID AS UserPPDataSetExperimentID, PPComplexID, SetNumber
FROM UserPPDataSetExperiment
INNER JOIN UserPPDataSetExperimentTag ON UserPPDataSetExperiment.ID=UserPPDataSetExperimentTag.UserPPDataSetExperimentID
WHERE
UserPPDataSetExperimentTag.Tag = 'ZEMu') AS ZEMuUserDataSet
ON PredictionPPI.UserPPDataSetExperimentID=ZEMuUserDataSet.UserPPDataSetExperimentID
INNER JOIN PPIDataSetDDG
ON PPIDataSetDDG.PPMutagenesisID=PredictionPPI.PPMutagenesisID AND PPIDataSetDDG.PPComplexID = ZEMuUserDataSet.PPComplexID AND PPIDataSetDDG.SetNumber = ZEMuUserDataSet.SetNumber
WHERE
PredictionPPI.PredictionSet = %s AND
PPIDataSetDDG.DataSetID=%s AND
PPIDataSetDDG.PPComplexID = ZEMuUserDataSet.PPComplexID AND
PPIDataSetDDG.SetNumber = ZEMuUserDataSet.SetNumber AND
PPIDataSetDDG.RecordNumber NOT IN (929, 524, 468, 1027, 1026)
''', parameters=(prediction_set_id, data_set_id))
def get_unfinished_prediction_ids(self, prediction_set_id):
'''Returns a set of all prediction_ids that have Status != "done"
'''
if prediction_set_id in self.unfinished_prediction_ids_cache:
return self.unfinished_prediction_ids_cache[prediction_set_id]
else:
unfinished_ids = [r.ID for r in self.get_session().query(self.PredictionTable).filter(and_(self.PredictionTable.PredictionSet == prediction_set_id, self.PredictionTable.Status != 'done'))]
self.unfinished_prediction_ids_cache[prediction_set_id] = unfinished_ids
return unfinished_ids
def get_prediction_ids_without_scores(self, prediction_set_id, score_method_id = None):
all_prediction_ids = [x for x in self.get_prediction_ids(prediction_set_id)]
all_prediction_ids_set = set()
for prediction_id in all_prediction_ids:
all_prediction_ids_set.add( prediction_id )
scored_prediction_ids_set = self.get_prediction_ids_with_scores(prediction_set_id, score_method_id = score_method_id)
return [x for x in all_prediction_ids_set.difference(scored_prediction_ids_set)]
###########################################################################################
## Information layer
##
## This layer is for functions which extract data from the database.
###########################################################################################
#== Information API =======================================================================
@informational_pdb
def get_pdb_chains_for_prediction(self, prediction_id):
# look up the complex associated with the dataset record for the list of chains
raise Exception('This needs to be implemented.')
@informational_pdb
def get_chain_sets_for_mutatagenesis(self, mutagenesis_id, complex_id = None):
'''Gets a list of possibilities for the associated complex and calls get_chains_for_mutatagenesis on each.
e.g. returns {('1KI1', 0) : {'L' : ['A','B'], 'R' : ['C']}, ('12AB', 2) : {'L' : ['L','H'], 'R' : ['A']}, ...}
This function assumes that a complex structure is required i.e. that all chains in the PDB chain set are in the same PDB file.
This is a useful method for listing the possible complexes to use in a prediction or to determine whether one
may be missing. and we need to update the database.'''
pp_mutagenesis = self.DDG_db.execute_select("SELECT * FROM PPMutagenesis WHERE ID=%s", parameters = (mutagenesis_id,))
# Sanity checks
assert(len(pp_mutagenesis) == 1)
if complex_id:
assert(pp_mutagenesis[0]['PPComplexID'] == complex_id)
else:
complex_id = pp_mutagenesis[0]['PPComplexID']
d = {}
for pdb_set in self.DDG_db.execute_select("SELECT * FROM PPIPDBSet WHERE PPComplexID=%s AND IsComplex=1", parameters = (complex_id,)):
pdb_set_number = pdb_set['SetNumber']
pdb_file_ids = self.DDG_db.execute_select("SELECT DISTINCT PDBFileID FROM PPIPDBPartnerChain WHERE PPComplexID=%s AND SetNumber=%s", parameters = (complex_id, pdb_set_number))
assert(len(pdb_file_ids) == 1)
pdb_file_id = pdb_file_ids[0]['PDBFileID']
d[(pdb_file_id, pdb_set_number)] = self.get_chains_for_mutatagenesis(mutagenesis_id, pdb_file_id, pdb_set_number)
return d
@informational_pdb
def get_chains_for_mutatagenesis(self, mutagenesis_id, pdb_file_id, pdb_set_number, complex_id = None, tsession = None):
'''Returns a dictionary mapping 'L' to the list of left chains and 'R' to the list of right chains.
This function assumes that a complex structure is required i.e. that all chains in the PDB chain set are in the same PDB file.
'''
tsession = tsession or self.get_session() # do not create a new session
pp_mutagenesis = None
for r in tsession.execute('''SELECT * FROM PPMutagenesis WHERE ID=:mutagenesis_id''', dict(mutagenesis_id = mutagenesis_id)):
assert(pp_mutagenesis == None)
pp_mutagenesis = r
# Sanity checks
if complex_id:
assert(pp_mutagenesis['PPComplexID'] == complex_id)
pdb_set = None
for r in tsession.execute('''SELECT * FROM PPIPDBSet WHERE PPComplexID=:complex_id AND SetNumber=:pdb_set_number''', dict(complex_id = complex_id, pdb_set_number = pdb_set_number)):
assert(pdb_set == None)
pdb_set = r
assert(pdb_set['IsComplex'] == 1) # complex structure check
else:
complex_id = pp_mutagenesis['PPComplexID']
pdb_file_id, complex_chains = self.get_bound_pdb_set_details(complex_id, pdb_set_number, pdb_file_id = pdb_file_id, tsession = tsession)
return complex_chains
def get_bound_pdb_set_details(self, complex_id, pdb_set_number, pdb_file_id = None, tsession = None):
'''Returns the pdb_id and complex partner definitions (left PDB chains, right PDB chains) for complexes where all chains share the same PDB structure.'''
tsession = tsession or self.get_session() # do not create a new session
assert(complex_id != None and pdb_set_number != None)
complex_chains = dict(L = [], R = [])
for c in tsession.execute('''SELECT * FROM PPIPDBPartnerChain WHERE PPComplexID=:complex_id AND SetNumber=:pdb_set_number ORDER BY ChainIndex''', dict(complex_id = complex_id, pdb_set_number = pdb_set_number)):
if pdb_file_id:
assert(c['PDBFileID'] == pdb_file_id) # complex structure check
else:
pdb_file_id = c['PDBFileID']
complex_chains[c['Side']].append(c['Chain'])
assert(complex_chains['L'] and complex_chains['R'])
assert(len(set(complex_chains['L']).intersection(set(complex_chains['R']))) == 0) # in one unbound case, the same chain appears twice on one side (2CLR_DE|1CD8_AA, may be an error since this was published as 1CD8_AB but 1CD8 has no chain B) but it seems reasonable to assume that a chain should only appear on one side
return pdb_file_id, complex_chains
@informational_pdb
def get_pdb_mutations_for_mutagenesis(self, mutagenesis_id, pdb_file_id, set_number, complex_id = None):
'''Returns the PDB mutations for a mutagenesis experiment as well as the PDB residue information.'''
pdb_mutations = []
for pdb_mutation in self.DDG_db.execute_select('''
SELECT PPMutagenesisPDBMutation.*, PDBResidue.ResidueType,
PDBResidue.BFactorMean, PDBResidue.BFactorDeviation,
PDBResidue.ComplexExposure, PDBResidue.ComplexDSSP, PDBResidue.MonomericExposure, PDBResidue.MonomericDSSP
FROM
PPMutagenesisPDBMutation
INNER JOIN
PDBResidue ON PPMutagenesisPDBMutation.PDBFileID = PDBResidue.PDBFileID AND PPMutagenesisPDBMutation.Chain = PDBResidue.Chain AND PPMutagenesisPDBMutation.ResidueID = PDBResidue.ResidueID AND PPMutagenesisPDBMutation.WildTypeAA = PDBResidue.ResidueAA
WHERE PPMutagenesisID=%s AND PDBResidue.PDBFileID=%s AND SetNumber=%s ORDER BY Chain, ResidueID''', parameters=(mutagenesis_id, pdb_file_id, set_number)):
if complex_id:
assert(pdb_mutation['PPComplexID'] == complex_id)
pdb_mutations.append(pdb_mutation)
return pdb_mutations
@sanity_check
def find_pdb_files_involved_in_multiple_complexes(self):
known_exceptions = {
# These need to be checked - 1OYV only has 1 chain besides Subtilisin Carlsberg
'1OYV' : 2, # Subtilisin Carlsberg bound to: i) domain 1 of its inhibitor; and ii) domain 2 of its inhibitor.
'1QFW' : 2, # Human chorionic gonadotropin (chains A, B) bound to: i) Fv anti-alpha (chains L, H); and ii) Fv anti-beta (chain M, I).
}
d = {}
for r in self.DDG_db.execute_select('SELECT ID FROM PDBFile ORDER BY ID'):
pdb_id = r['ID']
complex_ids = self.search_complexes_by_pdb_id(pdb_id)
if pdb_id.upper() in known_exceptions:
assert(len(complex_ids) == known_exceptions[pdb_id])
else:
if len(complex_ids) > 1:
d[pdb_id] = {'complex_ids' : complex_ids, 'complexes' : {}}
for complex_id in complex_ids:
d[pdb_id]['complexes'][complex_id] = self.get_complex_details(complex_id)
if d:
raise SanityCheckException('Some PDB files are associated with multiple complexes:\n{0}'.format(pprint.pformat(d)))
@informational_complex
def search_complexes_by_pdb_id(self, pdb_id):
'''Returns the list of PPComplexIDs which are related to the PDB ID. Typically this list will be empty or have one
ID. In rarer cases, the same structure may be used as a structural basis for multiple complexes.'''
results = self.DDG_db_utf.execute_select('''
SELECT DISTINCT PPIPDBSet.PPComplexID FROM PPIPDBPartnerChain
INNER JOIN PPIPDBSet ON PPIPDBPartnerChain.PPComplexID=PPIPDBSet.PPComplexID AND PPIPDBPartnerChain.SetNumber=PPIPDBSet.SetNumber
WHERE PDBFileID=%s AND IsComplex=1
''', parameters=(pdb_id,))
return [r['PPComplexID'] for r in results]
@informational_job
def get_complex_details(self, complex_id):
results = self.DDG_db_utf.execute_select('SELECT * FROM PPComplex WHERE ID=%s', parameters=(complex_id, ))
if len(results) == 1:
return results[0]
return None
def _get_dataset_record_with_checks(self, dataset_experiment_id, dataset_id = None):
if dataset_id:
de = self.DDG_db_utf.execute_select('SELECT * FROM PPIDataSetDDG WHERE ID=%s AND DataSetID=%s', parameters=(dataset_experiment_id, dataset_id))
if len(de) != 1:
raise colortext.Exception('Dataset record #%d does not exist for/correspond to the dataset %s.' % (dataset_experiment_id, dataset_id))
else:
de = self.DDG_db_utf.execute_select('SELECT * FROM PPIDataSetDDG WHERE ID=%s', parameters=(dataset_experiment_id,))
if len(de) != 1:
raise colortext.Exception('Dataset record #%d does not exist.' % (dataset_experiment_id, ))
return de[0]
@informational_job
def get_job_details(self, prediction_id, include_files = True, truncate_content = None):
try:
prediction_record = self.get_session().query(self.PredictionTable).filter(self.PredictionTable.ID == prediction_id).one()
except Exception, e:
raise colortext.Exception('No details could be found for prediction #{0} in the database.\n{1}\n{2}'.format(prediction_id, str(e), traceback.format_exc()))
# mutfile_content = self.create_mutfile(prediction_id)
# Read the UserPPDataSetExperiment details
user_dataset_experiment_id = prediction_record.UserPPDataSetExperimentID
ude_details = self.get_user_dataset_experiment_details(user_dataset_experiment_id)
assert(ude_details['Mutagenesis']['PPMutagenesisID'] == prediction_record.PPMutagenesisID)
# Convert the record to dict
prediction_record = row_to_dict(prediction_record)
prediction_record['Files'] = {}
if include_files:
prediction_record['Files'] = self.get_job_files(prediction_id, truncate_content = truncate_content)
for k, v in ude_details.iteritems():
assert(k not in prediction_record)
prediction_record[k] = v
return prediction_record
@informational_job
def get_dataset_experiment_details(self, dataset_experiment_id, dataset_id = None):
de = self._get_dataset_record_with_checks(dataset_experiment_id, dataset_id = dataset_id)
PDBFileID = de['PDBFileID']
PPMutagenesisID = de['PPMutagenesisID']
ComplexID = self.DDG_db.execute_select('SELECT PPComplexID FROM PPMutagenesis WHERE ID=%s', parameters=(PPMutagenesisID,))[0]['PPComplexID']
SetNumber = None
# todo: this is a nasty hack due to the fact that we do not currently store the SetNumber and PPComplexID in the PPIDataSetDDG table. See ticket:1457.
pdb_sets = self.DDG_db.execute_select('SELECT * FROM PPIPDBSet WHERE PPComplexID=%s AND IsComplex=1', parameters=(ComplexID,))
if len(pdb_sets) > 1:
probable_sets = self.DDG_db.execute_select('SELECT DatabaseKey FROM PPIDatabaseComplex WHERE DatabaseName LIKE "%%SKEMPI%%" AND DatabaseKey LIKE "%%%s%%" AND PPComplexID=%s' % (PDBFileID, ComplexID))
assert(len(probable_sets) == 1)
match_pdb_chains = sorted(list(''.join(probable_sets[0]['DatabaseKey'].split('_')[1:])))
pdb_sets = {}
for set_record in self.DDG_db.execute_select('SELECT * FROM PPIPDBPartnerChain WHERE PPComplexID=%s AND PDBFileID=%s', parameters=(ComplexID, PDBFileID)):
pdb_sets[set_record['SetNumber']] = pdb_sets.get(set_record['SetNumber'], [])
pdb_sets[set_record['SetNumber']].append(set_record['Chain'])
pdb_sets[set_record['SetNumber']] = sorted(pdb_sets[set_record['SetNumber']])
hits = []
for k, v in pdb_sets.iteritems():
if v == match_pdb_chains:
hits.append(k)
if not len(hits) == 1:
raise Exception('Error: multiple possible PDB sets for dataset record #%d and PPMutagenesisID=%s.' % (dataset_experiment_id, PPMutagenesisID))
SetNumber = hits[0]
elif len(pdb_sets) == 0:
raise Exception('Error: no possible PDB sets for dataset record #%d and PPMutagenesisID=%s.' % (dataset_experiment_id, PPMutagenesisID))
else:
SetNumber = pdb_sets[0]['SetNumber']
pdb_mutations = self.get_pdb_mutations_for_mutagenesis(PPMutagenesisID, PDBFileID, SetNumber, complex_id = ComplexID)
d = dict(
_DataSetID = de['ID'],
RecordID = de['RecordNumber'],
PublishedDDG = de['PublishedDDG'],
PDBFileID = PDBFileID,
DerivedMutation = de['RecordIsDerivative'] == 1,
PossiblyBadRecord = de['PossibleError'] == 1,
Notes = [de['Remark'], de['CorrectionRemark']],
Mutagenesis = dict(
PPMutagenesisID = PPMutagenesisID,
),
Complex = self.get_complex_details(ComplexID),
Structure = dict(
PDBFileID = PDBFileID,
SetNumber = SetNumber,
Partners = self.get_chains_for_mutatagenesis(PPMutagenesisID, PDBFileID, SetNumber, complex_id = ComplexID),
),
PDBMutations = pdb_mutations,
)
if de['PublishedPDBFileID'] != PDBFileID:
d['Notes'].append("The PDB ID was changed by Shane O'Connor from %s to %s." % (de['PublishedPDBFileID'], PDBFileID))
d['Notes'] = '. '.join([x for x in d['Notes'] if x])
d['ExperimentalDDGs'] = self.get_ddg_values_for_dataset_record(dataset_experiment_id, dataset_id = dataset_id)
d['DDG'] = sum([((e.get('Positive') or {}).get('DDG', 0) - (e.get('Negative') or {}).get('DDG', 0)) for e in d['ExperimentalDDGs']])
# todo: add SCOPe class, Pfam domain
return d
def _get_ddg_values_for_dataset_record(self, dataset_experiment_id, dataset_id = None):
de = self._get_dataset_record_with_checks(dataset_experiment_id, dataset_id = dataset_id)
ddg_pairs = self.DDG_db.execute_select('SELECT PositiveDependentPPIDDGID, NegativeDependentPPIDDGID FROM PPIDataSetDDGSource WHERE PPIDataSetDDGID=%s', parameters=(dataset_experiment_id,))
assert(ddg_pairs)
ddgs = []
for ddg_pair in ddg_pairs:
paired_record = {'Positive' : None, 'Negative' : None}
if ddg_pair['PositiveDependentPPIDDGID']:
positive_record = self.DDG_db.execute_select('SELECT * FROM PPIDDG WHERE ID=%s', parameters=(ddg_pair['PositiveDependentPPIDDGID'],))[0]
paired_record['Positive'] = dict(
DDG = positive_record['DDG'],
LocationOfValueInPublication = positive_record['LocationOfValueInPublication'],
Publication = positive_record['Publication'],
Temperature = positive_record['Temperature'],
pH = positive_record['pH'],
)
if ddg_pair['NegativeDependentPPIDDGID']:
negative_record = self.DDG_db.execute_select('SELECT * FROM PPIDDG WHERE ID=%s', parameters=(ddg_pair['NegativeDependentPPIDDGID'],))[0]
paired_record['Negative'] = dict(
DDG = negative_record['DDG'],
LocationOfValueInPublication = negative_record['LocationOfValueInPublication'],
Publication = negative_record['Publication'],
Temperature = negative_record['Temperature'],
pH = negative_record['pH'],
)
ddgs.append(paired_record)
return ddgs
@informational_job
def get_user_dataset_experiment_details(self, user_dataset_experiment_id, user_dataset_id = None):
if user_dataset_id:
colortext.ppurple('PRE-SELECT')
ude = self.DDG_db.execute_select('SELECT * FROM UserPPDataSetExperiment WHERE ID=%s AND UserDataSetID=%s', parameters=(user_dataset_experiment_id, user_dataset_id))
colortext.ppurple('POST-SELECT')
if len(ude) != 1:
raise colortext.Exception('User dataset experiment %d does not exist for/correspond to the user dataset %s.' % (user_dataset_experiment_id, user_dataset_id))
else:
ude = self.DDG_db.execute_select('SELECT * FROM UserPPDataSetExperiment WHERE ID=%s', parameters=(user_dataset_experiment_id,))
if len(ude) != 1:
raise colortext.Exception('User dataset experiment %d does not exist.' % (user_dataset_experiment_id, ))
ude = ude[0]
user_dataset_id = ude['UserDataSetID']
assert(ude['IsComplex'] == 1)
pdb_mutations = self.get_pdb_mutations_for_mutagenesis(ude['PPMutagenesisID'], ude['PDBFileID'], ude['SetNumber'], complex_id = ude['PPComplexID'])
return dict(
Mutagenesis = dict(
PPMutagenesisID = ude['PPMutagenesisID'],
),
Complex = self.get_complex_details(ude['PPComplexID']),
Structure = dict(
PDBFileID = ude['PDBFileID'],
SetNumber = ude['SetNumber'],
Partners = self.get_chains_for_mutatagenesis(ude['PPMutagenesisID'], ude['PDBFileID'], ude['SetNumber'], complex_id = ude['PPComplexID']),
),
PDBMutations = pdb_mutations,
)
def _export_dataset(self, dataset_id):
'''Returns a dict containing the dataset information.'''
dataset_record = self.DDG_db.execute_select('SELECT * FROM DataSet WHERE ID=%s', parameters=(dataset_id,))
if not dataset_record:
raise Exception('Dataset %s does not exist in the database.' % dataset_id)
dataset_record = dataset_record[0]
if dataset_record['DatasetType'] != 'Binding affinity' and dataset_record['DatasetType'] != 'Protein stability and binding affinity':
raise Exception('The dataset %s does not contain any binding affinity data..' % dataset_id)
# Read the UserPPDataSetExperiment details
data = []
ref_ids = set()
for dataset_ddg in self.DDG_db.execute_select('SELECT * FROM PPIDataSetDDG WHERE DataSetID=%s ORDER BY Section, RecordNumber', parameters=(dataset_id,)):
de_details = self.get_dataset_experiment_details(dataset_ddg['ID'], dataset_id)
for ddg_pair in de_details['ExperimentalDDGs']:
if ddg_pair['Positive']: ref_ids.add(ddg_pair['Positive']['Publication'])
if ddg_pair['Negative']: ref_ids.add(ddg_pair['Negative']['Publication'])
data.append(de_details)
references = {}
for ref_id in sorted(ref_ids):
references[ref_id] = self.get_publication(ref_id)
return dict(
Data = data,
References = references
)
@informational_job
def export_dataset_to_csv(self, dataset_id):
'''Returns the dataset information in CSV format.'''
dataset_set = self._export_dataset(dataset_id)['Data']
lines = ['\t'.join(['Record #', 'Mutagenesis #', 'Partner 1', 'Partner 2', 'PDB ID', 'Partner 1 chains', 'Partner 2 chains', 'Mutations', 'DDG', 'PublishedDDG', 'IsDerivedMutation'])]
for record in dataset_set:
line = '\t'.join([
str(record['RecordID']),
str(record['Mutagenesis']['PPMutagenesisID']),
record['Complex']['LShortName'],
record['Complex']['RShortName'],
record['PDBFileID'],
','.join(sorted(record['Structure']['Partners']['L'])),
','.join(sorted(record['Structure']['Partners']['R'])),
','.join(['%s:%s%s%s' % (m['Chain'], m['WildTypeAA'], m['ResidueID'], m['MutantAA']) for m in record['PDBMutations']]),
str(record['DDG']),
str(record['PublishedDDG']),
str(int(record['DerivedMutation'])),
])
lines.append(line)
return ('\n'.join(lines)).encode('utf8', 'replace')
@informational_job
def get_predictions_experimental_details(self, prediction_id, userdatset_experiment_ids_to_subset_ddgs = None, include_files = False, reference_ids = set(), include_experimental_data = True):
details = self.get_job_details(prediction_id, include_files = include_files)
# Sanity checks and redundancy removal
PPMutagenesisID = details['PPMutagenesisID']
ComplexID = details['Complex']['ID']
chains = set([item for sublist in [v for k, v in details['Structure']['Partners'].iteritems()] for item in sublist])
PDBFileID = details['Structure']['PDBFileID']
SetNumber = details['Structure']['SetNumber']
for m in details['PDBMutations']:
assert(m['PPMutagenesisID'] == PPMutagenesisID)
del m['PPMutagenesisID']
assert(ComplexID == m['PPComplexID'])
del m['PPComplexID']
assert(PDBFileID == m['PDBFileID'])
del m['PDBFileID']
assert(SetNumber == m['SetNumber'])
del m['SetNumber']
assert(m['Chain'] in chains)
assert(details['Mutagenesis']['PPMutagenesisID'] == PPMutagenesisID)
del details['Mutagenesis']
# Add the DDG values for the related analysis sets
user_dataset_experiment_id = details['UserPPDataSetExperimentID']
if include_experimental_data:
userdatset_experiment_ids_to_subset_ddgs = userdatset_experiment_ids_to_subset_ddgs or self.get_experimental_ddgs_by_analysis_set(user_dataset_experiment_id, reference_ids = reference_ids)
assert('DDG' not in details)
details['DDG'] = userdatset_experiment_ids_to_subset_ddgs[user_dataset_experiment_id]
else:
details['DDG'] = None
return details
@informational_job
def get_experimental_ddgs_by_analysis_set(self, user_dataset_experiment_id = None, reference_ids = set()):
# Determine the set of analysis sets
userdatset_experiment_ids_to_subset_ddgs = {}
analysis_sets = [r['Subset'] for r in self.DDG_db.execute_select('SELECT DISTINCT Subset FROM UserPPAnalysisSet')]
# Query the database, restricting to one user_dataset_experiment_id if passed
parameters = None
qry = '''
SELECT UserPPAnalysisSet.*,
(IFNULL(PositiveDDG.DDG, 0) - IFNULL(NegativeDDG.DDG, 0)) AS ExperimentalDDG,
IF(ISNULL(NegativeDDG.DDG), 0, 1) AS DerivedMutation,
PositiveDDG.PPMutagenesisID, PositiveDDG.Publication AS PositiveDDGPublication, PositiveDDG.DDG as PositiveDDGValue,
NegativeDDG.PPMutagenesisID, NegativeDDG.Publication AS NegativeDDGPublication, NegativeDDG.DDG as NegativeDDGValue
FROM UserPPAnalysisSet
LEFT JOIN PPIDDG AS PositiveDDG ON PositiveDependentPPIDDGID=PositiveDDG.ID
LEFT JOIN PPIDDG AS NegativeDDG ON NegativeDependentPPIDDGID=NegativeDDG.ID'''
if user_dataset_experiment_id != None:
qry += ' WHERE UserPPAnalysisSet.UserPPDataSetExperimentID=%s'
parameters = (user_dataset_experiment_id,)
results = self.DDG_db.execute_select(qry, parameters)
# Return the mapping
for r in results:
if not userdatset_experiment_ids_to_subset_ddgs.get(r['UserPPDataSetExperimentID']):
d = dict.fromkeys(analysis_sets, None)
for analysis_set in analysis_sets:
d[analysis_set] = {}
userdatset_experiment_ids_to_subset_ddgs[r['UserPPDataSetExperimentID']] = d
userdatset_experiment_ids_to_subset_ddgs[r['UserPPDataSetExperimentID']][r['Subset']] = userdatset_experiment_ids_to_subset_ddgs[r['UserPPDataSetExperimentID']][r['Subset']] or dict(
Cases = set(),
DDGs = [],
IsDerivedValue = False,
MeanDDG = None
)
# Store the references IDs
reference = None
if r['PositiveDDGPublication'] and r['NegativeDDGPublication']:
reference = r['PositiveDDGPublication'] + ', ' + r['NegativeDDGPublication']
reference_ids.add(r['PositiveDDGPublication'])
reference_ids.add(r['NegativeDDGPublication'])
elif r['PositiveDDGPublication']:
reference = r['PositiveDDGPublication']
reference_ids.add(r['PositiveDDGPublication'])
elif r['NegativeDDGPublication']:
reference = r['NegativeDDGPublication']
reference_ids.add(r['NegativeDDGPublication'])
record_d = userdatset_experiment_ids_to_subset_ddgs[r['UserPPDataSetExperimentID']][r['Subset']]
record_d['Cases'].add((r['Subset'], r['Section'], r['RecordNumber']))
record_d['DDGs'].append({'Value' : r['ExperimentalDDG'], 'IsDerivedValue' : r['DerivedMutation'], 'Reference' : reference})
record_d['IsDerivedValue'] = record_d['IsDerivedValue'] or r['DerivedMutation']
# Calculate the mean of the DDG values
# Note: Based on experience, summing in Python over small lists can be faster than creating temporary numpy arrays due to the array creation overhead
for k, v in userdatset_experiment_ids_to_subset_ddgs.iteritems():
for subset, subset_ddgs in v.iteritems():
if subset_ddgs:
num_points = len(subset_ddgs['DDGs'])
if num_points > 1:
subset_ddgs['MeanDDG'] = sum([float(ddg['Value'])for ddg in subset_ddgs['DDGs']]) / float(num_points)
else:
# Avoid unnecessary garbage creation and division
subset_ddgs['MeanDDG'] = subset_ddgs['DDGs'][0]['Value']
return userdatset_experiment_ids_to_subset_ddgs
@informational_job
def export_prediction_cases_to_json(self, prediction_set_id, retrieve_references = True):
print('This will probably break - I need to dump datetime.datetime objects to ISO strings.')
return json_dumps(self.get_prediction_set_case_details(prediction_set_id, retrieve_references = retrieve_references))
@informational_job
def export_prediction_cases_to_pickle(self, prediction_set_id, retrieve_references = True):
return pickle.dumps(self.get_prediction_set_case_details(prediction_set_id, retrieve_references = retrieve_references))
##### Public API: Rosetta-related functions
@job_input
def create_resfile(self, prediction_id):
raise Exception('This needs to be implemented.')
@job_input
def create_mutfile(self, prediction_id):
raise Exception('This needs to be implemented.')
###########################################################################################
## Prediction layer
##
## This part of the API is responsible for inserting prediction jobs in the database via
## the trickle-down proteomics paradigm.
###########################################################################################
#== Job creation/management API ===========================================================
#
# This part of the API is responsible for inserting prediction jobs in the database via the
# trickle-down proteomics paradigm.
# PredictionSet interface
@job_creator
def add_prediction_set(self, prediction_set_id, halted = True, priority = 5, batch_size = 40, allow_existing_prediction_set = False,
series_name = None, series_color = 'ff0000', series_alpha = 1.0, description = None):
return super(BindingAffinityDDGInterface, self).add_prediction_set(prediction_set_id, halted = halted, priority = priority, batch_size = batch_size, allow_existing_prediction_set = allow_existing_prediction_set, contains_protein_stability_predictions = False, contains_binding_affinity_predictions = True, series_name = series_name, series_color = series_color, series_alpha = series_alpha, description = description)
@job_creator
def add_development_protocol_command_lines(self, prediction_set_id, protocol_name, application, template_command_line, rosetta_script_file = None):
dev_protocol_id = self._get_dev_protocol_id(protocol_name)
if not dev_protocol_id:
dev_protocol_id = self._create_dev_protocol(protocol_name, application, template_command_line)
rosetta_script = None
if rosetta_script_file:
with open(rosetta_script_file, 'r') as f:
rosetta_script = f.read()
prediction_ids = self.get_prediction_ids(prediction_set_id)
# All functions within the next with block should use the same database cursor.
# The commands then function as parts of a transaction which is rolled back if errors occur within the block
# or else is committed.
file_content_id = None
tsession = self.get_session(new_session = True)
try:
for prediction_id in prediction_ids:
prediction_record = tsession.query(dbmodel.PredictionPPI).filter(dbmodel.PredictionPPI.ID == prediction_id)
prediction_record.DevelopmentProtocolID = dev_protocol_id
tsession.flush()
if rosetta_script:
# Add the Rosetta script to the database, not using cursor
file_content_id = self._add_prediction_file(tsession, prediction_id, rosetta_script, os.path.basename(rosetta_script_file), 'RosettaScript', 'RosettaScript', 'Input', rm_trailing_line_whitespace = True, forced_mime_type = 'text/xml', file_content_id = file_content_id)
tsession.commit()
tsession.close()
except Exception, e:
colortext.error('Failure: {0}.'.format(str(e)))
colortext.error(traceback.format_exc())
tsession.rollback()
tsession.close()
@job_creator
def add_job(self, tsession, prediction_set_id, protocol_id, pp_mutagenesis_id, pp_complex_id, pdb_file_id, pp_complex_pdb_set_number, extra_rosetta_command_flags = None, keep_all_lines = False, keep_hetatm_lines = False, input_files = {}, test_only = False, pdb_residues_to_rosetta_cache = None, suppress_warnings = False):
'''This function inserts a prediction into the database.
The parameters define:
- the prediction set id used to group this prediction with other predictions for analysis;
- the protocol to be used to run the prediction;
- the set of mutations and PDB complex associated with the mutagenesis experiment;
- whether HETATM lines are to be kept or not.
- additional Rosetta flags e.g. "-ignore_zero_occupancy false" used to determine the mapping from PDB to Rosetta numbering. These flags should correspond to those used in the protocol otherwise errors could occur.
We strip the PDB based on the chains defined by the complex and keep_all_lines and keep_hetatm_lines and store the PDB in the database.
Next, the mapping from Rosetta numbering to PDB numbering is determined and stored in the database.
Then, the appropriate input files e.g. resfiles or mutfiles are generated and stored in the database.
Finally, we add the prediction record and associate it with the generated files.'''
return self._add_job(tsession, prediction_set_id, protocol_id, pp_mutagenesis_id, pp_complex_id, pdb_file_id, pp_complex_pdb_set_number, extra_rosetta_command_flags = extra_rosetta_command_flags, keep_all_lines = keep_all_lines, keep_hetatm_lines = keep_hetatm_lines, input_files = input_files, test_only = test_only, pdb_residues_to_rosetta_cache = pdb_residues_to_rosetta_cache, suppress_warnings = suppress_warnings)
@job_creator
def add_job_by_user_dataset_record(self, prediction_set_id, user_dataset_name, user_dataset_experiment_id, protocol_id, extra_rosetta_command_flags = None, keep_all_lines = False, keep_hetatm_lines = False, input_files = {}, test_only = False, pdb_residues_to_rosetta_cache = None, suppress_warnings = False, tsession = None, allowed_user_datasets = None):
'''Add a prediction job based on a user dataset record. This is typically called during add_prediction_run rather than directly by the user.
user_dataset_name is implied by user_dataset_experiment_id but we include it for sanity checking errors in data-entry.
The extra_rosetta_command_flags variable is used to add additional flags e.g. "-ignore_zero_occupancy false". These should be added if they are used in the protocol.'''
new_session = False
if not tsession:
new_session = True
tsession = self.get_session(new_session = True)
if not allowed_user_datasets:
allowed_user_datasets = self.get_defined_user_datasets(tsession)
try:
user_dataset_id = allowed_user_datasets[user_dataset_name]['ID']
except:
raise colortext.Exception('The user dataset "%s" does not exist for this API.' % user_dataset_name)
udse_table = self._get_sqa_user_dataset_experiment_table()
ude = None
for r in tsession.execute('''SELECT * FROM UserPPDataSetExperiment WHERE ID=:udse AND UserDataSetID=:uds''', dict(udse = user_dataset_experiment_id, uds = user_dataset_id)):
assert(not ude)
ude = r
if not ude:
raise colortext.Exception('User dataset experiment {0} does not exist for/correspond to this user dataset.'.format(user_dataset_experiment_id))
prediction_id = self._add_job(tsession, prediction_set_id, protocol_id, ude.PPMutagenesisID, ude.PPComplexID, ude.PDBFileID, ude.SetNumber, extra_rosetta_command_flags = extra_rosetta_command_flags, user_dataset_experiment_id = user_dataset_experiment_id, keep_all_lines = keep_all_lines, keep_hetatm_lines = keep_hetatm_lines, input_files = input_files, test_only = test_only, pdb_residues_to_rosetta_cache = pdb_residues_to_rosetta_cache, suppress_warnings = suppress_warnings)
if new_session:
tsession.close()
return prediction_id
@job_creator
def merge_prediction_run(self, from_prediction_set_id, to_prediction_set_id, create_if_does_not_exist = True, series_color = 'ff0000', description = None):
# Start a new transaction
tsession = self.get_session(new_session = True)
try:
# Look up the source prediction set details
try:
from_prediction_set = self.get_session().query(dbmodel.PredictionSet).filter(dbmodel.PredictionSet.ID == from_prediction_set_id).one()
except Exception, e:
print(str(e))
print(traceback.format_exc())
raise Exception('Could not retrieve details for source PredictionSet "{0}".'.format(from_prediction_set_id))
# Look up or create the target prediction set details
try:
to_prediction_set_details = self.get_session().query(dbmodel.PredictionSet).filter(dbmodel.PredictionSet.ID == to_prediction_set_id).one()
except:
if create_if_does_not_exist:
prediction_set_dict = row_to_dict(from_prediction_set)
prediction_set_dict['ID'] = to_prediction_set_id
prediction_set_dict['EntryDate'] = datetime.datetime.now()
prediction_set_dict['Description'] = description or 'Clone of {0}'.format(from_prediction_set_id)
db_ligand_synonym = get_or_create_in_transaction(tsession, dbmodel.PredictionSet, prediction_set_dict)
else:
raise Exception('Could not retrieve details for target PredictionSet "{0}". To create a new PredictionSet, set create_if_does_not_exist to True.'.format(to_prediction_set_id))
# Create prediction records
num_predictions = len(from_prediction_set.ppi_predictions)
colortext.message('Merging/cloning prediction set.'.format())
c = 1
for prediction in from_prediction_set.ppi_predictions:
colortext.wyellow('{0}/{1}: Prediction #{2}\r'.format(c, num_predictions, str(prediction.ID).ljust(15)))
c += 1
# Add a prediction record if it does not already exist
new_prediction_id = None
if self.get_session().query(self.PredictionTable).filter(and_(
self.PredictionTable.PredictionSet == to_prediction_set_id,
self.PredictionTable.UserPPDataSetExperimentID == prediction.UserPPDataSetExperimentID,
self.PredictionTable.ProtocolID == prediction.ProtocolID)).count() > 0:
continue
else:
new_prediction = prediction.clone(to_prediction_set_id)
tsession.add(new_prediction)
tsession.flush()
new_prediction_id = new_prediction.ID
# Add the prediction file records. The underlying FileContent tables will already exist.
for prediction_file in prediction.files:
new_prediction_file = prediction_file.clone(new_prediction_id)
tsession.add(new_prediction_file)
tsession.flush()
print('\nSuccess.\n')
tsession.commit()
tsession.close()
except:
colortext.error('Failure.')
tsession.rollback()
tsession.close()
raise
@job_creator
def add_prediction_run(self, prediction_set_id, user_dataset_name, extra_rosetta_command_flags = None, protocol_id = None, tagged_subset = None, keep_all_lines = False, keep_hetatm_lines = False, input_files = {}, quiet = False, test_only = False, only_single_mutations = False, short_run = False, test_run_first = True, show_full_errors = False, suppress_warnings = False):
'''Adds all jobs corresponding to a user dataset e.g. add_prediction_run("my first run", "AllBindingAffinityData", tagged_subset = "ZEMu").
If keep_hetatm_lines is False then all HETATM records for the PDB prediction chains will be removed. Otherwise, they are kept.
input_files is a global parameter for the run which is generally empty. Any files added here will be associated to all predictions in the run.
The extra_rosetta_command_flags parameter e.g. "-ignore_zero_occupancy false" is used to determine the mapping
from PDB to Rosetta numbering. These flags should correspond to those used in the protocol otherwise errors could occur.
Returns False if no predictions were added to the run else return True if all predictions (and there were some) were added to the run.'''
# For test runs, this number of predictions will be created
short_run_limit = 100
# Create a new session
tsession = self.get_session(new_session = True)
try:
# Check preconditions
assert(not(input_files)) # todo: do something with input_files when we use that here - call self._add_file_content, associate the filenames with the FileContent IDs, and pass that dict to add_job which will create PredictionPPIFile records
assert(only_single_mutations == False) # todo: support this later? it may make more sense to just define new UserDataSets
allowed_user_datasets = self._add_prediction_run_preconditions(tsession, prediction_set_id, user_dataset_name, tagged_subset)
# Get the list of user dataset experiment records
user_dataset_experiments = self.get_user_dataset_experiments(tsession, user_dataset_name, tagged_subset = tagged_subset)
assert(set([u.IsComplex for u in user_dataset_experiments]) == set([1,]))
num_user_dataset_experiments = user_dataset_experiments.count()
if not user_dataset_experiments:
tsession.close()
return False
# Count the number of individual PDB files
pdb_file_ids = set([u.PDBFileID for u in user_dataset_experiments])
tagged_subset_str = ''
if not quiet:
if tagged_subset:
tagged_subset_str = 'subset "%s" of ' % tagged_subset
# Create a cache to speed up job insertion
pdb_residues_to_rosetta_cache = {}
t1 = time.time()
# Run one query over the PredictionSet
result_set = None
if protocol_id:
result_set = tsession.execute('''SELECT * FROM PredictionPPI WHERE PredictionSet=:prediction_set AND ProtocolID=:protocol_id''', dict(prediction_set = prediction_set_id, protocol_id = protocol_id))
else:
result_set = tsession.execute('''SELECT * FROM PredictionPPI WHERE PredictionSet=:prediction_set AND ProtocolID IS NULL''', dict(prediction_set = prediction_set_id))
existing_results = set()
for r in result_set:
existing_results.add(r['UserPPDataSetExperimentID'])
# Test all predictions before creating records
if test_only or test_run_first:
if not quiet:
colortext.message('Testing %d predictions spanning %d PDB files for %suser dataset "%s" using protocol %s.' % (num_user_dataset_experiments, len(pdb_file_ids), tagged_subset_str, user_dataset_name, str(protocol_id or 'N/A')))
# Progress counter setup
count, records_per_dot = 0, 50
showprogress = not(quiet) and num_user_dataset_experiments > 300
if showprogress: print("|" + ("*" * (int(num_user_dataset_experiments/records_per_dot)-2)) + "|")
for ude in user_dataset_experiments:
# If the mutagenesis already exists in the prediction set, do not test it again
if not(ude.ID in existing_results):
# Test the prediction setup
prediction_id = self.add_job_by_user_dataset_record(prediction_set_id, user_dataset_name, ude.ID, protocol_id, extra_rosetta_command_flags = extra_rosetta_command_flags, keep_all_lines = keep_all_lines, keep_hetatm_lines = keep_hetatm_lines, input_files = input_files, test_only = True, pdb_residues_to_rosetta_cache = pdb_residues_to_rosetta_cache, suppress_warnings = suppress_warnings, tsession = tsession, allowed_user_datasets = allowed_user_datasets)
# Progress counter
count += 1
if showprogress and count % records_per_dot == 0: colortext.write(".", "cyan", flush = True)
if short_run and count >= short_run_limit: break
if not quiet: print('')
t2 = time.time()
print('Time taken for dry run: {0}s.'.format(t2 - t1))
if test_only:
tsession.rollback()
tsession.close()
return
# Progress counter setup
failed_jobs = {}
if not quiet:
colortext.message('Adding %d predictions spanning %d PDB files for %suser dataset "%s" using protocol %s.' % (num_user_dataset_experiments, len(pdb_file_ids), tagged_subset_str, user_dataset_name, str(protocol_id or 'N/A')))
count, records_per_dot = 0, 50
showprogress = not(quiet) and num_user_dataset_experiments > 300
if showprogress: print("|" + ("*" * (int(num_user_dataset_experiments/records_per_dot)-2)) + "|")
t1 = time.time()
time_to_ignore = 0
# Add the individual predictions
for ude in user_dataset_experiments:
# If the mutagenesis already exists in the prediction set, do not add it again
if not(ude.ID in existing_results):
t3 = time.time()
try:
# Add the prediction
user_dataset_id = allowed_user_datasets[user_dataset_name]['ID']
prediction_id = self.add_job_by_user_dataset_record(prediction_set_id, user_dataset_name, ude.ID, protocol_id, extra_rosetta_command_flags = extra_rosetta_command_flags, keep_all_lines = keep_all_lines, keep_hetatm_lines = keep_hetatm_lines, input_files = input_files, test_only = False, pdb_residues_to_rosetta_cache = pdb_residues_to_rosetta_cache, suppress_warnings = suppress_warnings, tsession = tsession, allowed_user_datasets = allowed_user_datasets)
except Exception, e:
time_to_ignore += time.time() - t3
user_dataset_id = allowed_user_datasets[user_dataset_name]['ID']
ude_record = None
for r in tsession.execute('SELECT * FROM UserPPDataSetExperiment WHERE ID=:ude_id AND UserDataSetID=:uds_id', dict(ude_id = ude.ID, uds_id = user_dataset_id)):
assert(ude_record == None)
ude_record = r
assert(ude_record['ID'] == ude.ID)
colortext.error('Adding the prediction for UserPPDataSetExperimentID %(ID)d failed (%(PDBFileID)s).' % ude_record)
failed_jobs[ude_record['PDBFileID']] = failed_jobs.get(ude_record['PDBFileID'], 0)
failed_jobs[ude_record['PDBFileID']] += 1
if show_full_errors:
print(e)
print(traceback.format_exc())
# Progress counter
count += 1
if showprogress and count % records_per_dot == 0: colortext.write(".", "green", flush = True)
if short_run and count >= short_run_limit: break
t2 = time.time()
print('Time taken for actual run: {0}s.'.format(t2 - t1 - time_to_ignore))
if failed_jobs:
colortext.error('Some jobs failed to run:\n%s' % pprint.pformat(failed_jobs))
if not quiet: print('')
print('Success')
tsession.commit()
tsession.close()
return True
except Exception, e:
print(str(e))
print(traceback.format_exc())
tsession.rollback()
tsession.close()
raise
def _create_pdb_residues_to_rosetta_cache_mp(self, pdb_residues_to_rosetta_cache, pdb_file_id, pdb_chains_to_keep, extra_rosetta_command_flags, keep_hetatm_lines):
# Retrieve the PDB file content, strip out the unused chains, and create a PDB object
raise Exception('Shane should finish this and add keep_all_lines')
assert(type(pdb_residues_to_rosetta_cache) == None)# use the manager dictproxy)
pdb_file = self.DDG_db.execute_select("SELECT * FROM PDBFile WHERE ID=%s", parameters = (pdb_file_id,))
p = PDB(pdb_file[0]['Content'])
p.strip_to_chains(list(pdb_chains_to_keep))
if not keep_hetatm_lines:
p.strip_HETATMs()
stripped_p = PDB('\n'.join(p.lines))
stripped_p.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, self.rosetta_database_path, extra_command_flags = extra_rosetta_command_flags)
atom_to_rosetta_residue_map = stripped_p.get_atom_sequence_to_rosetta_json_map()
rosetta_to_atom_residue_map = stripped_p.get_rosetta_sequence_to_atom_json_map()
cache_key = (pdb_file_id, ''.join(sorted(pdb_chains_to_keep)), self.rosetta_scripts_path, self.rosetta_database_path, extra_rosetta_command_flags)
pdb_residues_to_rosetta_cache[cache_key] = dict(
stripped_p = stripped_p,
atom_to_rosetta_residue_map = atom_to_rosetta_residue_map,
rosetta_to_atom_residue_map = rosetta_to_atom_residue_map)
@job_creator
def add_prediction_run_mp(self, prediction_set_id, user_dataset_name, extra_rosetta_command_flags = None, protocol_id = None, tagged_subset = None, keep_hetatm_lines = False, input_files = {}, quiet = False, only_single_mutations = False, short_run = False, show_full_errors = False):
'''This is a multiprocessing version of add_prediction_run and should be used in favor of that function as it runs faster.
It takes advantage of parallelism at two points - creating the stripped PDB files and mutfiles for input and
inserting the jobs (MD5 is run multiple times for each job).
It was simple/quicker to write this as a 2-step method with a bottleneck in the middle i.e. it waits until all
stripped PDB files are generated before adding the jobs.
This could be made even more parallel by removing the bottleneck i.e. the process which strips the PDBs could
then call _add_job immediately rather than waiting for the other calls to _create_pdb_residues_to_rosetta_cache_mp
to complete.
'''
# Check preconditions
assert(keep_all_lines)
assert(suppress_warnings)
assert(tsession)
assert(not(input_files)) # todo: do something with input_files when we use that here - call self._add_file_content, associate the filenames with the FileContent IDs, and pass that dict to add_job which will create PredictionPPIFile records
assert(only_single_mutations == False) # todo: support this later? it may make more sense to just define new UserDataSets
self._add_prediction_run_preconditions(tsession, prediction_set_id, user_dataset_name, tagged_subset)
# Get the list of user dataset experiment records
user_dataset_experiments = self.get_user_dataset_experiments(tsession, user_dataset_name, tagged_subset = tagged_subset)
assert(set([u['IsComplex'] for u in user_dataset_experiments]) == set([1,]))
if not user_dataset_experiments:
return False
# Count the number of individual PDB files
pdb_file_ids = set([u['PDBFileID'] for u in user_dataset_experiments])
tagged_subset_str = ''
if not quiet:
if tagged_subset:
tagged_subset_str = 'subset "%s" of ' % tagged_subset
# Create a cache to speed up job insertion
#todo: start back here pdb_residues_to_rosetta_cache = manager dictproxy
# Create the stripped PDBs and residue maps in parallel using the multiprocessing module
#todo: write this function on Monday - get_user_dataset_pdb_partner_chains should return a set (<list of {'id' : pdb_file_id, 'L' : <list of chain ids>, , 'R' : <list of chain ids>} dicts>)
pdb_partner_chains = self.get_user_dataset_pdb_partner_chains(user_dataset_name, tagged_subset = tagged_subset)
#todo: start back here for ppc in pdb_partner_chains:
#todo: start back here apply_async self._create_pdb_residues_to_rosetta_cache_mp(pdb_residues_to_rosetta_cache, ppc['id'], set(ppc['L'] + ppc['R']), extra_rosetta_command_flags, keep_hetatm_lines)
#todo: start back here .join()
# Progress counter setup
failed_jobs = {}
if not quiet:
colortext.message('Adding %d predictions spanning %d PDB files for %suser dataset "%s" using protocol %s.' % (len(user_dataset_experiments), len(pdb_file_ids), tagged_subset_str, user_dataset_name, str(protocol_id or 'N/A')))
count, records_per_dot = 0, 50
showprogress = not(quiet) and len(user_dataset_experiments) > 300
if showprogress: print("|" + ("*" * (int(len(user_dataset_experiments)/records_per_dot)-2)) + "|")
# Add the individual predictions
for ude in user_dataset_experiments:
# If the mutagenesis already exists in the prediction set, do not add it again
if protocol_id:
existing_results = self.DDG_db.execute_select("SELECT * FROM PredictionPPI WHERE PredictionSet=%s AND UserPPDataSetExperimentID=%s AND ProtocolID=%s", parameters=(prediction_set_id, ude['ID'], protocol_id))
else:
existing_results = self.DDG_db.execute_select("SELECT * FROM PredictionPPI WHERE PredictionSet=%s AND UserPPDataSetExperimentID=%s AND ProtocolID IS NULL", parameters=(prediction_set_id, ude['ID']))
if len(existing_results) == 0:
# Add the prediction
try:
user_dataset_id = self.get_defined_user_datasets(tsession)[user_dataset_name]['ID']
prediction_id = self.add_job_by_user_dataset_record(prediction_set_id, user_dataset_name, ude['ID'], protocol_id, extra_rosetta_command_flags = extra_rosetta_command_flags, keep_all_lines = keep_all_lines, keep_hetatm_lines = keep_hetatm_lines, input_files = input_files, test_only = False, pdb_residues_to_rosetta_cache = pdb_residues_to_rosetta_cache, suppress_warnings = suppress_warnings)
except Exception, e:
user_dataset_id = self.get_defined_user_datasets(tsession)[user_dataset_name]['ID']
ude_record = self.DDG_db.execute_select('SELECT * FROM UserPPDataSetExperiment WHERE ID=%s AND UserDataSetID=%s', parameters=(ude['ID'], user_dataset_id))
ude_record = ude_record[0]
assert(ude_record['ID'] == ude['ID'])
colortext.error('Adding the prediction for UserPPDataSetExperimentID %(ID)d failed (%(PDBFileID)s).' % ude_record)
failed_jobs[ude_record['PDBFileID']] = failed_jobs.get(ude_record['PDBFileID'], 0)
failed_jobs[ude_record['PDBFileID']] += 1
if show_full_errors:
print(e)
print(traceback.format_exc())
# Progress counter
count += 1
if showprogress and count % records_per_dot == 0: colortext.write(".", "cyan", flush = True)
if short_run and count > 4: break
if failed_jobs:
colortext.error('Some jobs failed to run:\n%s' % pprint.pformat(failed_jobs))
if not quiet: print('')
return True
@job_creator
def clone_prediction_run(self, existing_prediction_set, new_prediction_set):
raise Exception('not implemented yet')
#assert(existing_prediction_set exists and has records)
#assert(new_prediction_set is empty)
#for each prediction record, add the record and all associated predictionfile records,
def _add_job(self, tsession, prediction_set_id, protocol_id, pp_mutagenesis_id, pp_complex_id, pdb_file_id, pp_complex_pdb_set_number, extra_rosetta_command_flags = None, user_dataset_experiment_id = None, keep_all_lines = False, keep_hetatm_lines = False, input_files = {}, test_only = False, pdb_residues_to_rosetta_cache = {}, suppress_warnings = False):
'''This is the internal function which adds a prediction job to the database. We distinguish it from add_job as
prediction jobs added using that function should have no associated user dataset experiment ID.
pdb_residues_to_rosetta_cache can be used to speed up job insertion. When the same PDB/chains combination is used again, this cache uses the old mapping rather than run RosettaScripts again.
The extra_rosetta_command_flags variable is used to add additional flags e.g. "-ignore_zero_occupancy false".
These are used to generate a mapping from PDB to Rosetta numbering so they should be set according to how they
are set in the protocol. In particular, include any flags which have an effect on what residues are present.
'-ignore_zero_occupancy false' and '-ignore_unrecognized_res' are typically used.
'''
# todo: do something with input_files when we use that here - see add_prediction_run
assert(not(input_files))
# Preliminaries
if not self.rosetta_scripts_path or not os.path.exists(self.rosetta_scripts_path):
raise Exception('The path "%s" to the RosettaScripts executable does not exist.' % self.rosetta_scripts_path)
cache_maps = False
if isinstance(pdb_residues_to_rosetta_cache, dict):
cache_maps = True
# Information for debugging
pp_complex = None
for r in tsession.execute('''SELECT * FROM PPComplex WHERE ID=:pp_complex_id''', dict(pp_complex_id = pp_complex_id)):
assert(pp_complex == None)
pp_complex = r
# Determine the list of PDB chains that will be kept
pdb_chains = self.get_chains_for_mutatagenesis(pp_mutagenesis_id, pdb_file_id, pp_complex_pdb_set_number, complex_id = pp_complex_id, tsession = tsession)
pdb_chains_to_keep = set(pdb_chains['L'] + pdb_chains['R'])
if self.rosetta_database_path:
cache_key = (pdb_file_id, ''.join(sorted(pdb_chains_to_keep)), self.rosetta_scripts_path, self.rosetta_database_path, extra_rosetta_command_flags)
else:
cache_key = (pdb_file_id, ''.join(sorted(pdb_chains_to_keep)), self.rosetta_scripts_path, extra_rosetta_command_flags)
if cache_maps and pdb_residues_to_rosetta_cache.get(cache_key):
stripped_p = pdb_residues_to_rosetta_cache[cache_key]['stripped_p']
else:
# Retrieve the PDB file content, strip out the unused chains, and create a PDB object
p = PDB(tsession.query(dbmodel.PDBFile).filter(dbmodel.PDBFile.ID == pdb_file_id).one().Content)
stripped_p = p
if not keep_all_lines:
p.strip_to_chains(list(pdb_chains_to_keep))
if not keep_hetatm_lines:
p.strip_HETATMs()
stripped_p = PDB('\n'.join(p.lines))
# Determine PDB chains to move
pdb_chains_to_move_str = ','.join(sorted(set(pdb_chains['R'])))
# Check for CSE and MSE
try:
if 'CSE' in stripped_p.residue_types:
raise Exception('This case contains a CSE residue which may (or may not) cause an issue.')
elif 'MSE' in stripped_p.residue_types:
raise Exception('This case contains an MSE residue which may (or may not) cause an issue.')
# It looks like MSE (and CSE?) may now be handled - https://www.rosettacommons.org/content/pdb-files-rosetta-format
except Exception, e:
if not suppress_warnings:
colortext.error('%s: %s, chains %s' % (str(e), stripped_p.pdb_id or pdb_file_id, str(pdb_chains_to_keep)))
# Assert that there are no empty sequences
assert(sorted(stripped_p.atom_sequences.keys()) == sorted(pdb_chains_to_keep))
for chain_id, sequence in stripped_p.atom_sequences.iteritems():
assert(len(sequence) > 0)
# Get the PDB mutations and check that they make sense in the context of the stripped PDB file
# Note: the schema assumes that at most one set of mutations can be specified per PDB file per complex per mutagenesis. We may want to relax that in future by adding the SetNumber to the PPMutagenesisPDBMutation table
complex_mutations = [m for m in tsession.execute('SELECT * FROM PPMutagenesisMutation WHERE PPMutagenesisID=:pp_mutagenesis_id', dict(pp_mutagenesis_id = pp_mutagenesis_id))]
pdb_complex_mutations = [m for m in tsession.execute('SELECT * FROM PPMutagenesisPDBMutation WHERE PPMutagenesisID=:pp_mutagenesis_id AND PPComplexID=:pp_complex_id AND PDBFileID=:pdb_file_id', dict(pp_mutagenesis_id = pp_mutagenesis_id, pp_complex_id = pp_complex_id, pdb_file_id = pdb_file_id))]
assert(len(complex_mutations) == len(pdb_complex_mutations))
mutations = [ChainMutation(m['WildTypeAA'], m['ResidueID'], m['MutantAA'], Chain = m['Chain']) for m in pdb_complex_mutations]
try:
stripped_p.validate_mutations(mutations)
except Exception, e:
colortext.error('%s: %s' % (str(e), str(mutations)))
#colortext.warning('PPMutagenesisID=%d, ComplexID=%d, PDBFileID=%s, SetNumber=%d, UserDatasetExperimentID=%d' % (pp_mutagenesis_id, pp_complex_id, pdb_file_id, pp_complex_pdb_set_number, user_dataset_experiment_id))
#colortext.warning('SKEMPI record: %s' % self.DDG_db.execute_select('SELECT * FROM PPMutagenesis WHERE ID=%s', parameters=(pp_mutagenesis_id,))[0]['SKEMPI_KEY'])
#colortext.warning('PDB chains to keep: %s' % str(pdb_chains_to_keep))
#colortext.warning('PPIPDBPartnerChain records: %s' % pprint.pformat(self.DDG_db.execute_select('SELECT PPIPDBPartnerChain.* FROM PPIPDBPartnerChain INNER JOIN PPIPDBSet ON PPIPDBSet.PPComplexID=PPIPDBPartnerChain.PPComplexID AND PPIPDBSet.SetNumber=PPIPDBPartnerChain.SetNumber WHERE PPIPDBPartnerChain.PPComplexID=%s AND IsComplex=1 ORDER BY PPIPDBPartnerChain.SetNumber, PPIPDBPartnerChain.ChainIndex', parameters=(pp_complex_id,))))
# Determine the mapping from the stripped PDB to Rosetta numbering
# Note: we assume that this stripped PDB will be the input to the Rosetta protocol and that
# Make JSON mappings
if cache_maps and pdb_residues_to_rosetta_cache.get(cache_key):
atom_to_rosetta_residue_map = pdb_residues_to_rosetta_cache[cache_key]['atom_to_rosetta_residue_map']
rosetta_to_atom_residue_map = pdb_residues_to_rosetta_cache[cache_key]['rosetta_to_atom_residue_map']
else:
if self.rosetta_database_path:
stripped_p.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, self.rosetta_database_path, extra_command_flags = extra_rosetta_command_flags)
else:
stripped_p.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, extra_command_flags = extra_rosetta_command_flags)
atom_to_rosetta_residue_map = stripped_p.get_atom_sequence_to_rosetta_json_map()
rosetta_to_atom_residue_map = stripped_p.get_rosetta_sequence_to_atom_json_map()
if cache_maps and (not pdb_residues_to_rosetta_cache.get(cache_key)):
pdb_residues_to_rosetta_cache[cache_key] = dict(
stripped_p = stripped_p,
atom_to_rosetta_residue_map = atom_to_rosetta_residue_map,
rosetta_to_atom_residue_map = rosetta_to_atom_residue_map)
# Assert that there are no empty sequences in the Rosetta-processed PDB file
total_num_residues = 0
d = json.loads(rosetta_to_atom_residue_map)
stripped_p_chains = stripped_p.atom_sequences.keys()
for chain_id in stripped_p_chains:
num_chain_residues = len([z for z in d.values() if z[0] == chain_id])
total_num_residues += num_chain_residues
assert(num_chain_residues > 0)
pdb_filename = '%s_%s.pdb' % (pdb_file_id, ''.join(sorted(pdb_chains_to_keep)))
# Create parameter substitution dictionary
mutfile_name = 'mutations.mutfile'
resfile_name = 'mutations.resfile'
parameter_sub_dict = {
'%%input_pdb%%' : pdb_filename,
'%%chainstomove%%' : pdb_chains_to_move_str,
'%%pathtoresfile%%' : resfile_name,
'%%pathtomutfile%%' : mutfile_name,
}
if test_only:
return
# All functions below use tsession which allows us to use transactions which can be rolled back if errors occur
if protocol_id:
existing_records = [r for r in tsession.execute('SELECT * FROM {0} WHERE PredictionSet=:prediction_set AND UserPPDataSetExperimentID=:user_dataset_experiment_id AND ProtocolID=:protocol_id'.format(self._get_prediction_table()), dict(prediction_set = prediction_set_id, user_dataset_experiment_id = user_dataset_experiment_id, protocol_id = protocol_id))]
else:
existing_records = [r for r in tsession.execute('SELECT * FROM {0} WHERE PredictionSet=:prediction_set AND UserPPDataSetExperimentID=:user_dataset_experiment_id AND ProtocolID IS NULL'.format(self._get_prediction_table()), dict(prediction_set = prediction_set_id, user_dataset_experiment_id = user_dataset_experiment_id))]
assert(len(existing_records) == 0)
prediction_record = dict(
PredictionSet = prediction_set_id,
PPMutagenesisID = pp_mutagenesis_id,
UserPPDataSetExperimentID = user_dataset_experiment_id,
ProtocolID = protocol_id,
JSONParameters = json_dumps(parameter_sub_dict),
DevelopmentProtocolID = None,
ExtraParameters = extra_rosetta_command_flags,
Status = 'queued',
Cost = total_num_residues,
KeptHETATMLines = keep_hetatm_lines,
)
prediction_ppi = get_or_create_in_transaction(tsession, self._get_sqa_prediction_table(), dict(
PredictionSet = prediction_set_id,
PPMutagenesisID = pp_mutagenesis_id,
UserPPDataSetExperimentID = user_dataset_experiment_id,
ProtocolID = protocol_id,
JSONParameters = json_dumps(parameter_sub_dict),
DevelopmentProtocolID = None,
ExtraParameters = extra_rosetta_command_flags,
Status = 'queued',
Cost = total_num_residues,
KeptHETATMLines = keep_hetatm_lines,
), missing_columns = ['ID', 'EntryDate', 'StartDate', 'EndDate', 'Errors', 'AdminCommand', 'maxvmem', 'DDGTime', 'NumberOfMeasurements'])
#sql, params, record_exists = self.DDG_db.create_insert_dict_string(self._get_prediction_table(), prediction_record, ['PredictionSet', 'UserPPDataSetExperimentID', 'ProtocolID'])
#cur.execute(sql, params)
#prediction_id = cur.lastrowid
prediction_id = prediction_ppi.ID
# Add the stripped PDB file
self._add_prediction_file(tsession, prediction_id, '\n'.join(stripped_p.lines), pdb_filename, 'PDB', 'StrippedPDB', 'Input', rm_trailing_line_whitespace = True, forced_mime_type = 'chemical/x-pdb')
# Make and add the mutfile
rosetta_mutations = stripped_p.map_pdb_residues_to_rosetta_residues(mutations)
self._add_mutfile_to_prediction(tsession, prediction_id, rosetta_mutations, mutfile_name)
# Make and add the resfile
self._add_resfile_to_prediction(tsession, prediction_id, mutations, resfile_name)
# Add the residue mappings
self._add_residue_map_json_to_prediction(tsession, prediction_id, rosetta_to_atom_residue_map, 'Rosetta residue->PDB residue map')
self._add_residue_map_json_to_prediction(tsession, prediction_id, atom_to_rosetta_residue_map, 'PDB residue->Rosetta residue map')
# Add the params files
self._add_ligand_params_files_to_prediction(tsession, prediction_id, pdb_file_id)
if protocol_id:
existing_records = [r for r in tsession.execute('SELECT * FROM {0} WHERE PredictionSet=:prediction_set_id AND UserPPDataSetExperimentID=:user_dataset_experiment_id AND ProtocolID=:protocol_id'.format(self._get_prediction_table()),
dict(prediction_set_id = prediction_set_id, user_dataset_experiment_id = user_dataset_experiment_id, protocol_id = protocol_id))]
else:
existing_records = [r for r in tsession.execute('SELECT * FROM {0} WHERE PredictionSet=:prediction_set_id AND UserPPDataSetExperimentID=:user_dataset_experiment_id AND ProtocolID IS NULL'.format(self._get_prediction_table()),
dict(prediction_set_id = prediction_set_id, user_dataset_experiment_id = user_dataset_experiment_id))]
assert(len(existing_records) == 1)
prediction_id = existing_records[0]['ID']
return prediction_id
#== Job execution/completion API ===========================================================
#
# This part of the API is responsible for starting jobs and setting them as failed or
# completed
@job_execution
def set_job_temporary_protocol_field(self, prediction_id, prediction_set_id, temporary_protocol_field):
raise Exception('not implemented yet')
@job_execution
def start_job(self, prediction_id, prediction_set_id):
'''Sets the job status to "active". prediction_set must be passed and is used as a sanity check.'''
prediction_record = self.DDG_db.execute_select('SELECT * FROM PredictionPPI WHERE ID=%s AND PredictionSet=%s', parameters=(prediction_id, prediction_set_id))
if prediction_record['Protocol'] == None:
print('empty Protocol')
if prediction_record['DevelopmentProtocolID'] == None:
raise Exception('Neither the Protocol nor the DevelopmentProtocolID is set for this job - it cannot be started without this information.')
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_execution
def get_max_number_of_cluster_jobs(self, prediction_set_id, priority):
return self.DDG_db.execute_select('SELECT Value FROM _DBCONSTANTS WHERE VariableName="MaxStabilityClusterJobs"')['Value']
@job_completion
def complete_job(self, prediction_id, prediction_set, scores, maxvmem, ddgtime, files = []):
'''Sets a job to 'completed' and stores scores. prediction_set must be passed and is used as a sanity check.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
###########################################################################################
## Analysis layer
##
## This part of the API is responsible for running analysis on completed predictions
###########################################################################################
@analysis_api
def determine_best_pairs(self, prediction_id, score_method_id = None, expectn = None, top_x = 3):
'''This returns the top_x lowest-scoring wildtype/mutants for a prediction given a scoring method.
The results are returned as a dict:
"wildtype" -> list(tuple(score, structure_id))
"mutant" -> list(tuple(score, structure_id))
If no scoring method is supplied then the first (i.e. random) top_x structures are returned (with scores set
to zero) as we have no method of scoring or discerning them.
.'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn)
if score_method_id != None:
assert(isinstance(top_x, int) and top_x > 0)
scores = scores.get(score_method_id)
mutant_complexes = []
wildtype_complexes = []
for structure_id, scores in scores.iteritems():
if scores.get('MutantComplex'):
mutant_complexes.append((scores['MutantComplex']['total'], structure_id))
if scores.get('WildTypeComplex'):
wildtype_complexes.append((scores['WildTypeComplex']['total'], structure_id))
wildtype_complexes = sorted(wildtype_complexes)[:top_x]
mutant_complexes = sorted(mutant_complexes)[:top_x]
else:
wt_structure_ids = set()
mut_structure_ids = set()
for score_method_id, scores in scores.iteritems():
for structure_id, scores in scores.iteritems():
if scores.get('WildTypeComplex'):
wt_structure_ids.add(structure_id)
if scores.get('MutantComplex'):
mut_structure_ids.add(structure_id)
wildtype_complexes = [(0, i) for i in sorted(wt_structure_ids)]
mutant_complexes = [(0, i) for i in sorted(mut_structure_ids)]
if top_x != None:
# If no score method is specified then we cannot choose the top X so we arbitrarily choose X structures
assert(isinstance(top_x, int) and top_x > 0)
wildtype_complexes = wildtype_complexes[:top_x]
mutant_complexes = mutant_complexes[:top_x]
# Truncate so that we have an equal number of both types
max_len = min(len(wildtype_complexes), len(mutant_complexes))
wildtype_complexes, mutant_complexes = wildtype_complexes[:max_len], mutant_complexes[:max_len]
if wildtype_complexes and mutant_complexes:
return {'wildtype' : wildtype_complexes, 'mutant' : mutant_complexes}
return {}
@app_pymol
def create_pymol_session_in_memory(self, prediction_id, wt_task_number, mutant_task_number, pymol_executable = '/var/www/tg2/tg2env/designdb/pymol/pymol/pymol'):
# Retrieve and unzip results
archive = self.get_job_data(prediction_id)
zipped_content = zipfile.ZipFile(BytesIO(archive), 'r', zipfile.ZIP_DEFLATED)
try:
# Get the name of the files from the zip
wildtype_filename = 'repacked_wt_round_%d.pdb.gz' % wt_task_number
mutant_filename = None
for filepath in sorted(zipped_content.namelist()):
filename = os.path.split(filepath)[1]
if filename.startswith('mut_') and filename.endswith('_round_%d.pdb.gz' % mutant_task_number):
mutant_filename = filename
break
print(wildtype_filename, mutant_filename)
PyMOL_session = None
file_list = zipped_content.namelist()
print(file_list)
# If both files exist in the zip, extract their contents in memory and create a PyMOL session pair (PSE, script)
if (mutant_filename in file_list) and (wildtype_filename in file_list):
wildtype_pdb = zipped_content.open(wildtype_filename, 'r').read()
mutant_pdb = zipped_content.open(mutant_filename, 'U').read()
wildtype_pdb = read_file(write_temp_file('/tmp', wildtype_pdb, ftype = 'w', suffix = '.gz', prefix = ''))
mutant_pdb = read_file(write_temp_file('/tmp', mutant_pdb, ftype = 'w', suffix = '.gz', prefix = ''))
# todo: this should be structure_1_name = 'Wildtype', structure_2_name = 'Mutant' but the underlying PyMOL script needs to be parameterized
chain_mapper = ScaffoldModelChainMapper.from_file_contents(wildtype_pdb, mutant_pdb, structure_1_name = 'Scaffold', structure_2_name = 'Model')
PyMOL_session = chain_mapper.generate_pymol_session(pymol_executable = pymol_executable)
zipped_content.close()
return PyMOL_session
except Exception, e:
zipped_content.close()
raise Exception(str(e))
@app_pymol
def create_full_pymol_session_in_memory(self, prediction_id, score_method_id = None, top_x = 3, mutation_string = None, settings = {}, pymol_executable = '/var/www/tg2/tg2env/designdb/pymol/pymol/pymol', wt_chain_seed = None, mutant_chain_seed = None):
wt_chain_seed = wt_chain_seed or 'blue'
mutant_chain_seed = mutant_chain_seed or 'yellow'
best_pairs = self.determine_best_pairs(prediction_id, score_method_id = score_method_id, expectn = None, top_x = top_x)
# Retrieve and unzip results
archive = self.get_job_data(prediction_id)
zipped_content = zipfile.ZipFile(BytesIO(archive), 'r', zipfile.ZIP_DEFLATED)
try:
file_paths = {'wildtype' : {}, 'mutant' : {}}
# Get the name of the files from the zip
zip_filenames = set([os.path.split(filepath)[1] for filepath in zipped_content.namelist()])
# Retrieve the input structure
input_pdb_contents = None
try:
file_content_id = self.get_session().query(dbmodel.PredictionPPIFile).filter(and_(dbmodel.PredictionPPIFile.PredictionPPIID == prediction_id, dbmodel.PredictionPPIFile.FileRole == 'StrippedPDB')).one().FileContentID
input_pdb_contents = self.importer.get_file_content_from_cache(file_content_id)
except Exception, e:
# Report the error but continue
colortext.error(str(e))
colortext.error(traceback.format_exc())
# Find all wildtype structures
for p in best_pairs['wildtype']:
structure_id = p[1]
expected_filename = 'repacked_wt_round_{0}.pdb.gz'.format(structure_id)
if expected_filename in zip_filenames:
file_paths['wildtype'][structure_id] = expected_filename
# Find all mutant structures
mutant_ids = [p[1] for p in best_pairs['mutant']]
for filename in zip_filenames:
if filename.startswith('mut_'):
mtch = re.match('^mut_(.*?)_round_(\d+).pdb.*$', filename)
if mtch:
structure_id = int(mtch.group(2))
if structure_id in mutant_ids:
if not mutation_string:
mutation_string = mtch.group(1)
file_paths['mutant'][structure_id] = filename
PyMOL_session = None
file_list = zipped_content.namelist()
# If both files exist in the zip, extract their contents in memory and create a PyMOL session pair (PSE, script)
chain_mapper = DecoyChainMapper()
for stypep in [('wildtype', 'wt', wt_chain_seed, 'white'), ('mutant', mutation_string or 'mutant', mutant_chain_seed, 'red')]:
stype = stypep[0]
prefix = stypep[1].replace(' ', '_')
for structure_id, filename in file_paths[stype].iteritems():
if filename in file_list:
if filename.endswith('.gz'):
wildtype_pdb_stream = StringIO.StringIO(zipped_content.open(filename, 'r').read())
wildtype_pdb = gzip.GzipFile(fileobj=wildtype_pdb_stream).read()
else:
wildtype_pdb = zipped_content.open(filename, 'r').read()
pdb_object = PDB(wildtype_pdb)
chain_mapper.add(pdb_object, '{0}_n{1}'.format(prefix, structure_id), chain_seed_color = stypep[2], backbone_color = stypep[2], sidechain_color = stypep[3])
if input_pdb_contents:
chain_mapper.add(PDB(input_pdb_contents), 'input', backbone_color = 'grey50', sidechain_color = 'grey50')
zipped_content.close()
PyMOL_session = chain_mapper.generate_pymol_session(settings = settings, pymol_executable = pymol_executable)
return PyMOL_session
except Exception, e:
zipped_content.close()
raise Exception('{0}\n{1}'.format(str(e), traceback.format_exc()))
def _get_prediction_data(self, prediction_id, score_method_id, main_ddg_analysis_type, expectn = None, extract_data_for_case_if_missing = False, root_directory = None, dataframe_type = "Binding affinity", prediction_data = {}):
assert( main_ddg_analysis_type.startswith('DDG_') )
analysis_type = main_ddg_analysis_type[4:]
top_x = 3
if analysis_type.startswith('Top'):
analysis_function = self.get_top_x_ddg
analysis_parameter = int( analysis_type[3:] )
top_x = analysis_parameter
elif analysis_type.startswith('Random'):
analysis_function = self.get_random_pairing_ddg
if len(analysis_type) > len('Random'):
analysis_parameter = int( analysis_type[len('Random'):] )
else:
analysis_parameter = None
elif analysis_type == 'AvgAllPairs':
analysis_function = self.get_avg_all_pairings_ddg
analysis_parameter = None
elif analysis_type == 'MatchPairs':
analysis_function = self.get_match_pairs_ddg
analysis_parameter = None
elif analysis_type.startswith('CplxBoltzWT'):
assert( len(analysis_type) > len('CplxBoltzWT') )
analysis_function = self.get_wt_complex_weighted_boltzmann_ddg
analysis_parameter = float( analysis_type[len('CplxBoltzWT'):] )
elif analysis_type.startswith('CplxBoltzMut'):
assert( len(analysis_type) > len('CplxBoltzMut') )
analysis_function = self.get_mut_complex_weighted_boltzmann_ddg
analysis_parameter = float( analysis_type[len('CplxBoltzMut'):] )
elif analysis_type.startswith('CplxBoltzBoth'):
assert( len(analysis_type) > len('CplxBoltzBoth') )
analysis_function = self.get_both_complex_weighted_boltzmann_ddg
analysis_parameter = float( analysis_type[len('CplxBoltzBoth'):] )
else:
raise Exception("Didn't recognize analysis type: " + str(main_ddg_analysis_type))
try:
predicted_ddg = analysis_function(prediction_id, score_method_id, analysis_parameter, expectn = expectn)
except Exception, e:
colortext.pcyan(str(e))
colortext.warning(traceback.format_exc())
if extract_data_for_case_if_missing:
self.extract_data_for_case(prediction_id, root_directory = root_directory, force = True, score_method_id = score_method_id)
try:
predicted_ddg = analysis_function(prediction_id, score_method_id, analysis_parameter, expectn = expectn)
except PartialDataException, e:
raise
except Exception, e:
raise
top_x_ddg_stability = self.get_top_x_ddg_stability(prediction_id, score_method_id, top_x = top_x, expectn = expectn)
prediction_data[main_ddg_analysis_type] = predicted_ddg
prediction_data['DDGStability_Top%d' % top_x] = top_x_ddg_stability
return prediction_data
@analysis_api
def get_wt_complex_weighted_boltzmann_ddg(self, prediction_id, score_method_id, temperature, expectn = None):
return self.get_complex_weighted_boltzmann_ddg(prediction_id, score_method_id, temperature, expectn = expectn, scores_to_weight = 'wt_complex')
@analysis_api
def get_mut_complex_weighted_boltzmann_ddg(self, prediction_id, score_method_id, temperature, expectn = None):
return self.get_complex_weighted_boltzmann_ddg(prediction_id, score_method_id, temperature, expectn = expectn, scores_to_weight = 'mut_complex')
@analysis_api
def get_both_complex_weighted_boltzmann_ddg(self, prediction_id, score_method_id, temperature, expectn = None):
return self.get_complex_weighted_boltzmann_ddg(prediction_id, score_method_id, temperature, expectn = expectn, scores_to_weight = 'both_complexes')
@analysis_api
def get_complex_weighted_boltzmann_ddg(self, prediction_id, score_method_id, temperature, expectn = None, scores_to_weight = 'wt_complex'):
'''
Returns DDG for this prediction by averaging all values for paired output structures
'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None:
return None
if self.scores_contains_ddg_score(scores):
raise Exception("This scoring analysis doesn't make sense to use without complex scores")
def boltz_exponent(x, t):
return numpy.exp( -1.0 * x / t )
try:
np_type = numpy.float64
struct_nums = scores.keys()
mut_complex = numpy.array( [np_type( scores[struct_num]['MutantComplex']['total'] ) for struct_num in struct_nums] )
mut_lpartner = numpy.array( [np_type( scores[struct_num]['MutantLPartner']['total'] ) for struct_num in struct_nums] )
mut_rpartner = numpy.array( [np_type( scores[struct_num]['MutantRPartner']['total'] ) for struct_num in struct_nums] )
wt_complex = numpy.array( [np_type( scores[struct_num]['WildTypeComplex']['total'] ) for struct_num in struct_nums] )
wt_lpartner = numpy.array( [np_type( scores[struct_num]['WildTypeLPartner']['total'] ) for struct_num in struct_nums] )
wt_rpartner = numpy.array( [np_type( scores[struct_num]['WildTypeRPartner']['total'] ) for struct_num in struct_nums] )
matched_ddgs = (mut_complex - mut_lpartner - mut_rpartner) - (wt_complex - wt_lpartner - wt_rpartner)
if scores_to_weight == 'wt_complex':
scores_for_weighting = wt_complex
elif scores_to_weight == 'mut_complex':
scores_for_weighting = mut_complex
elif scores_to_weight == 'both_complexes':
scores_for_weighting = mut_complex + wt_complex
else:
raise Exception('Unrecognized scores_to_weight argument: ' + str(scores_to_weight) )
max_scores_for_weighting = numpy.max(scores_for_weighting)
normalized_scores_for_weighting = scores_for_weighting - max_scores_for_weighting
exponented_scores = numpy.exp( np_type(-1.0) * normalized_scores_for_weighting / np_type(temperature) )
weighted_ddg = numpy.divide(
numpy.sum( numpy.multiply(matched_ddgs, exponented_scores) ),
numpy.sum( exponented_scores )
)
return weighted_ddg
except PartialDataException:
sys.exit(0)
raise PartialDataException('The case is missing some data.')
@analysis_api
def get_match_pairs_ddg(self, prediction_id, score_method_id, structs_to_use, expectn = None):
'''
Returns DDG for this prediction by averaging all values for paired output structures
'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None:
return None
if self.scores_contains_ddg_score(scores):
raise Exception("This scoring analysis doesn't make sense to use without complex scores")
try:
structs_to_use_score = numpy.average([
(scores[struct_num]['MutantComplex']['total'] - scores[struct_num]['MutantLPartner']['total'] - scores[struct_num]['MutantRPartner']['total']) -
(scores[struct_num]['WildTypeComplex']['total'] - scores[struct_num]['WildTypeLPartner']['total'] - scores[struct_num]['WildTypeRPartner']['total'])
for struct_num in scores
])
return structs_to_use_score
except PartialDataException:
sys.exit(0)
raise PartialDataException('The case is missing some data.')
@analysis_api
def get_avg_all_pairings_ddg(self, prediction_id, score_method_id, structs_to_use, expectn = None):
'''
Returns DDG for this prediction by averaging together all possible pairings
'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None:
return None
if self.scores_contains_ddg_score(scores):
raise Exception("This scoring analysis doesn't make sense to use without complex scores")
try:
all_struct_num_pairs = []
for wt_struct_num in scores:
if 'WildTypeComplex' in scores[wt_struct_num]:
for mut_struct_num in scores:
if 'MutantComplex' in scores[mut_struct_num]:
all_struct_num_pairs.append( (wt_struct_num, mut_struct_num) )
structs_to_use_score = numpy.average([
(scores[mut_struct_num]['MutantComplex']['total'] - scores[mut_struct_num]['MutantLPartner']['total'] - scores[mut_struct_num]['MutantRPartner']['total']) -
(scores[wt_struct_num]['WildTypeComplex']['total'] - scores[wt_struct_num]['WildTypeLPartner']['total'] - scores[wt_struct_num]['WildTypeRPartner']['total'])
for wt_struct_num, mut_struct_num in all_struct_num_pairs
])
return structs_to_use_score
except PartialDataException:
sys.exit(0)
raise PartialDataException('The case is missing some data.')
@analysis_api
def get_random_pairing_ddg(self, prediction_id, score_method_id, structs_to_use, expectn = None):
'''
Returns DDG for this prediction by randomly pairing mutant structures with wildtype structures
'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None:
return None
if self.scores_contains_ddg_score(scores):
try:
total_scores = [scores[struct_num]['DDG']['total'] for struct_num in scores]
if structs_to_use == None:
structs_to_use = len(total_scores)
structs_to_use_score = numpy.average(
random.sample(total_scores, structs_to_use)
)
return structs_to_use_score
except:
raise PartialDataException('The case is missing some data.')
try:
if structs_to_use == None:
structs_to_use = len(scores)
else:
structs_to_use = min(structs_to_use, len(scores))
structs_to_use_wt_struct_nums = random.sample(scores.keys(), structs_to_use)
structs_to_use_mut_struct_nums = random.sample(scores.keys(), structs_to_use)
structs_to_use_score = numpy.average([
(scores[mut_struct_num]['MutantComplex']['total'] - scores[mut_struct_num]['MutantLPartner']['total'] - scores[mut_struct_num]['MutantRPartner']['total']) -
(scores[wt_struct_num]['WildTypeComplex']['total'] - scores[wt_struct_num]['WildTypeLPartner']['total'] - scores[wt_struct_num]['WildTypeRPartner']['total'])
for wt_struct_num, mut_struct_num in zip(structs_to_use_wt_struct_nums, structs_to_use_mut_struct_nums)
])
return structs_to_use_score
except PartialDataException:
raise PartialDataException('The case is missing some data.')
@analysis_api
def get_top_x_ddg(self, prediction_id, score_method_id, top_x , expectn = None):
'''Returns the TopX value for the prediction. Typically, this is the mean value of the top X predictions for a
case computed using the associated Score records in the database.'''
# scores is a mapping from nstruct -> ScoreType -> score record where ScoreType is one of 'DDG', 'WildTypeLPartner', 'WildTypeRPartner', 'WildTypeComplex', 'MutantLPartner', 'MutantRPartner', 'MutantComplex'
# if we do the calculation in Python, pull scores out to the top level first
# otherwise, we can add a stored procedure to determine the TopX
# if we go the Python route, we can implement different variations on TopX (including a stored procedure) and pass the function pointers as an argument to the main analysis function
# Make sure that we have as many cases as we expect
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None:
return None
if self.scores_contains_ddg_score(scores):
try:
total_scores = [(scores[struct_num]['DDG']['total'], struct_num) for struct_num in scores]
total_scores.sort()
top_x_struct_nums = [t[1] for t in total_scores[:top_x]]
top_x_score = numpy.average([
scores[struct_num]['DDG']['total']
for struct_num in top_x_struct_nums
])
return top_x_score
except:
print scores[struct_num]
raise PartialDataException('The case is missing some data.')
try:
wt_total_scores = [(scores[struct_num]['WildTypeComplex']['total'], struct_num) for struct_num in scores]
wt_total_scores.sort()
top_x_wt_struct_nums = [t[1] for t in wt_total_scores[:top_x]]
mut_total_scores = [(scores[struct_num]['MutantComplex']['total'], struct_num) for struct_num in scores]
mut_total_scores.sort()
top_x_mut_struct_nums = [t[1] for t in mut_total_scores[:top_x]]
top_x_score = numpy.average([
(scores[mut_struct_num]['MutantComplex']['total'] - scores[mut_struct_num]['MutantLPartner']['total'] - scores[mut_struct_num]['MutantRPartner']['total']) -
(scores[wt_struct_num]['WildTypeComplex']['total'] - scores[wt_struct_num]['WildTypeLPartner']['total'] - scores[wt_struct_num]['WildTypeRPartner']['total'])
for wt_struct_num, mut_struct_num in zip(top_x_wt_struct_nums, top_x_mut_struct_nums)
])
return top_x_score
except:
raise PartialDataException('The case is missing some data.')
def scores_contains_ddg_score(self, scores):
for struct_num, score_dict in scores.iteritems():
if 'DDG' not in score_dict:
return False
return True
def scores_contains_complex_scores(self, scores):
for struct_num, score_dict in scores.iteritems():
if 'WildTypeComplex' not in score_dict or 'MutantComplex' not in score_dict:
return False
return True
@analysis_api
def get_top_x_ddg_stability(self, prediction_id, score_method_id, top_x = 3, expectn = None):
'''Returns the TopX value for the prediction only considering the complex scores. This computation may work as a
measure of a stability DDG value.'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None or not self.scores_contains_complex_scores(scores):
return None
wt_total_scores = [(scores[struct_num]['WildTypeComplex']['total'], struct_num) for struct_num in scores]
wt_total_scores.sort()
top_x_wt_struct_nums = [t[1] for t in wt_total_scores[:top_x]]
mut_total_scores = [(scores[struct_num]['MutantComplex']['total'], struct_num) for struct_num in scores]
mut_total_scores.sort()
top_x_mut_struct_nums = [t[1] for t in mut_total_scores[:top_x]]
return numpy.average([scores[mut_struct_num]['MutantComplex']['total'] - scores[wt_struct_num]['WildTypeComplex']['total']
for wt_struct_num, mut_struct_num in zip(top_x_wt_struct_nums, top_x_mut_struct_nums)])
@analysis_api
def get_analysis_dataframe(self, prediction_set_id,
experimental_data_exists = True,
prediction_set_series_name = None, prediction_set_description = None, prediction_set_credit = None,
prediction_set_color = None, prediction_set_alpha = None,
use_existing_benchmark_data = True,
include_derived_mutations = False,
use_single_reported_value = False,
ddg_analysis_type = 'DDG_Top3',
take_lowest = None,
burial_cutoff = 0.25,
stability_classication_experimental_cutoff = 1.0,
stability_classication_predicted_cutoff = 1.0,
report_analysis = True,
silent = False,
root_directory = None, # where to find the prediction data on disk
score_method_id = None,
expectn = None,
allow_failures = False,
extract_data_for_case_if_missing = False,
debug = False,
restrict_to = set(),
remove_cases = set(),
):
#todo: rename function since we return BenchmarkRun objects
assert(score_method_id)
dataframe_type = 'Binding affinity'
parameters = copy.copy(locals())
del parameters['self']
return super(BindingAffinityDDGInterface, self)._get_analysis_dataframe(BindingAffinityBenchmarkRun, **parameters)
@analysis_api
def get_existing_analysis(self, prediction_set_id = None, analysis_dataframe_id = None, return_dataframe = True):
'''Returns a list of the summary statistics for any existing dataframes in the database.
Each item in the list is a dict corresponding to a dataframe. These dicts are structured as e.g.
{
'AnalysisDataFrameID': 185L,
'analysis_sets': ['SKEMPI', 'BeAtMuSiC', 'ZEMu'],
'analysis_type': 'DDG_Top3',
'analysis_type_description': '...',
'dataframe': <pandas dataframe>,
'scalar_adjustments': {
'BeAtMuSiC': 2.383437079488905,
'SKEMPI': 2.206268329703589,
'ZEMu': 2.2046199780552374
},
'stats': {
'BeAtMuSiC': {
'MAE': nan,
'fraction_correct': 0.7308900523560209,
'fraction_correct_fuzzy_linear': 0.74128683025321573,
'gamma_CC': 0.4047074501135616,
'ks_2samp': (0.24269480519480513, 2.9466866316296972e-32),
'kstestx': (nan, nan),
'kstesty': (nan, nan),
'normaltestx': (nan, nan),
'normaltesty': (nan, nan),
'pearsonr': (nan, 1.0),
'spearmanr': (0.41841534629950339, 2.1365219255798831e-53)
},
'SKEMPI': {...},
'ZEMu': {...},
}
}
'''
### KAB TODO: this function is not adjusted for new changes in top_x
if analysis_dataframe_id == None:
# Get a valid PredictionSet record if one exists
assert(prediction_set_id != None)
try:
prediction_set = self.get_session().query(dbmodel.PredictionSet).filter(and_(dbmodel.PredictionSet.ID == prediction_set_id, dbmodel.PredictionSet.BindingAffinity == 1)).one()
except:
return None
dataframes = self.get_session().query(dbmodel.AnalysisDataFrame).filter(and_(dbmodel.AnalysisDataFrame.PredictionSet == prediction_set_id, dbmodel.AnalysisDataFrame.DataFrameType == 'Binding affinity')).order_by(dbmodel.AnalysisDataFrame.ScoreMethodID, dbmodel.AnalysisDataFrame.TopX, dbmodel.AnalysisDataFrame.StabilityClassicationExperimentalCutoff, dbmodel.AnalysisDataFrame.StabilityClassicationPredictedCutoff)
else:
try:
dataframe = self.get_session().query(dbmodel.AnalysisDataFrame).filter(dbmodel.AnalysisDataFrame.ID == analysis_dataframe_id).one()
assert(dataframe.DataFrameType == 'Binding affinity')
dataframes = [dataframe]
except Exception, e:
colortext.error(str(e))
colortext.error(traceback.format_exc())
return None
analysis_results = []
dataframes = [dfr for dfr in dataframes]
for dfr in dataframes:
# The dict to return
dfi = dfr.get_dataframe_info()
dfi['stats'] = {}
# Compute the stats per analysis set
df = dfi['dataframe']
if dfi['analysis_sets']:
# Case where there are analysis sets
for analysis_set in dfi['analysis_sets']:
dfi['stats'][analysis_set] = get_xy_dataset_statistics_pandas(
df,
BindingAffinityBenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set),
BindingAffinityBenchmarkRun.get_analysis_set_fieldname('Predicted_adj', analysis_set),
fcorrect_x_cutoff = float(dfr.StabilityClassicationExperimentalCutoff),
fcorrect_y_cutoff = float(dfr.StabilityClassicationPredictedCutoff),
ignore_null_values = True)
elif 'Experimental' in df.columns:
# Case where there are no analysis sets
dfi['stats']['Global'] = get_xy_dataset_statistics_pandas(
df,
'Experimental',
'Predicted_adj',
fcorrect_x_cutoff = float(dfr.StabilityClassicationExperimentalCutoff),
fcorrect_y_cutoff = float(dfr.StabilityClassicationPredictedCutoff),
ignore_null_values = True)
else:
# Case where there are no experimental data
dfi['stats'] = None
if not return_dataframe:
# May be useful if we are keeping a lot of these in memory and the dataframe is not useful
dfi['dataframe'] = None
analysis_results.append(dfi)
return analysis_results
@analysis_api
def analyze(self, prediction_set_ids, score_method_ids,
experimental_data_exists = True,
analysis_set_ids = [],
prediction_set_series_names = {}, prediction_set_descriptions = {}, prediction_set_credits = {}, prediction_set_colors = {}, prediction_set_alphas = {},
use_published_data = False,
allow_failures = False,
use_existing_benchmark_data = True, recreate_graphs = False,
include_derived_mutations = False,
expectn = 50,
use_single_reported_value = False,
take_lowests = [],
ddg_analysis_types = [],
burial_cutoff = 0.25,
stability_classication_experimental_cutoff = 1.0,
stability_classication_predicted_cutoff = 1.0,
output_directory = None,
output_directory_root = None,
generate_plots = True,
generate_matplotlib_plots = False,
report_analysis = True,
silent = False,
root_directory = None, # where to find the prediction data on disk
debug = False,
restrict_to = set(),
remove_cases = set(),
call_analysis = True,
):
'''Runs the analyses for the specified PredictionSets and cross-analyzes the sets against each other if appropriate.
* Analysis setup arguments *
PredictionSets is a list of PredictionSet IDs. Each PredictionSet will be analyzed separately and appropriate
pairs will be cross-analyzed.
PredictionSetSeriesNames, PredictionSetDescriptions, and PredictionSetCredits are mappings from PredictionSet IDs
to series names (in plots), descriptions, and credits respectively. The details are stored in PredictionSet so
they are not necessary. The mappings can be used to override the database values to customize the analysis
reports. Likewise, PredictionSetColors and PredictionSetAlphas are mappings to series colors and transparency values
for use in the plots.
use_published_data. todo: implement later. This should include any published data e.g. the Kellogg et al. data for protein stability.
use_existing_benchmark_data and recreate_graphs are data creation arguments i.e. "should we use existing data or create it from scratch?"
include_derived_mutations is used to filter out dataset cases with derived mutations.
expectn declares how many predictions we expect to see per dataset case. If the actual number is less than expectn
then a warning will be included in the analysis.
* Dataframe arguments *
use_single_reported_value is specific to ddg_monomer. If this is True then the DDG value reported by the application is used and take_lowest is ignored. This is inadvisable - take_lowest = 3 is a better default.
take_lowest AKA Top_X. Specifies how many of the best-scoring groups of structures to consider when calculating the predicted DDG value.
analysis_types defines if other analysis methods other than TopX/take_lowest will be used. Not mutually exclusive.
burial_cutoff defines what should be considered buried (DSSPExposure field). Values around 1.0 are fully exposed, values of 0.0 are fully buried. For technical reasons, the DSSP value can exceed 1.0 but usually not by much.
stability_classication_experimental_cutoff AKA x_cutoff. This defines the neutral mutation range for experimental values in kcal/mol i.e. values between -1.0 and 1.0 kcal/mol are considered neutral by default.
stability_classication_predicted_cutoff AKA y_cutoff. This defines the neutral mutation range for predicted values in energy units.
* Reporting arguments *
output_directory : The directory in which to save plots and reports.
output_directory_root : A place to create an autogenerated output directory.
generate_plots : if plots are not needed, setting this to False can shorten the analysis time.
report_analysis : Whether or not to print analysis to stdout.
silent = False : Whether or not anything should be printed to stdout (True is useful for webserver interaction).
'''
for ddg_analysis_type in ddg_analysis_types:
assert( ddg_analysis_type.startswith('DDG_') )
for take_lowest in take_lowests:
assert(take_lowest > 0 and (int(take_lowest) == take_lowest))
ddg_analysis_types.append( 'DDG_Top%d' % take_lowest )
# Remove duplicate analysis types
ddg_analysis_types = set( ddg_analysis_types )
ddg_analysis_types = sorted( list(ddg_analysis_types) )
assert(0 <= burial_cutoff <= 2.0)
assert(stability_classication_experimental_cutoff > 0)
assert(stability_classication_predicted_cutoff > 0)
assert(expectn > 0 and (int(expectn) == expectn))
# Can't specify both output_directory and output_directory_root
if output_directory_root != None:
assert( output_directory == None )
if not os.path.isdir( output_directory_root ):
os.makedirs( output_directory_root )
if output_directory != None:
assert( output_directory_root == None )
benchmark_runs = []
for prediction_set_id in prediction_set_ids:
if len(prediction_set_ids) > 1:
print 'Generating benchmark run for prediction set: %s' % prediction_set_id
for score_method_id in score_method_ids:
if len(score_method_ids) > 1:
print 'Generating benchmark run for score method ID: %d' % score_method_id
for ddg_analysis_type in ddg_analysis_types:
if len(ddg_analysis_types) > 1:
print 'Generating benchmark run for DDG analysis type: %s' % ddg_analysis_type
benchmark_run = self.get_analysis_dataframe(prediction_set_id,
experimental_data_exists = experimental_data_exists,
prediction_set_series_name = prediction_set_series_names.get(prediction_set_id),
prediction_set_description = prediction_set_descriptions.get(prediction_set_id),
prediction_set_color = prediction_set_colors.get(prediction_set_id),
prediction_set_alpha = prediction_set_alphas.get(prediction_set_id),
prediction_set_credit = prediction_set_credits[prediction_set_id],
use_existing_benchmark_data = use_existing_benchmark_data,
include_derived_mutations = include_derived_mutations,
use_single_reported_value = use_single_reported_value,
ddg_analysis_type = ddg_analysis_type,
burial_cutoff = burial_cutoff,
stability_classication_experimental_cutoff = 1.0,
stability_classication_predicted_cutoff = 1.0,
report_analysis = report_analysis,
silent = silent,
root_directory = root_directory, # where to find the
score_method_id = score_method_id,
expectn = expectn,
allow_failures = allow_failures,
debug = debug,
restrict_to = restrict_to,
remove_cases = remove_cases,
)
# The keys of scalar_adjustments are the stored analysis sets
analysis_sets_to_run = benchmark_run.scalar_adjustments.keys()
if analysis_set_ids:
analysis_sets_to_run = set(analysis_sets_to_run).intersection(set(analysis_set_ids))
benchmark_runs.append(benchmark_run)
analysis_sets_to_run = sorted(analysis_sets_to_run)
if experimental_data_exists:
#todo: hack. this currently seems to expect all datapoints to be present. handle the case when we are missing data e.g. prediction set "ZEMu run 1"
analysis_sets_to_run = ['ZEMu'] # ['BeAtMuSiC', 'SKEMPI', 'ZEMu']
if call_analysis:
if len(benchmark_runs) == 1 and len(analysis_sets_to_run) == 1:
if output_directory_root:
# Create output directory inside output_directory_root
output_directory = os.path.join(output_directory_root, '%s-%s-%s_n-%d_topx-%d_score_method_%d-analysis_%s' % (time.strftime("%y%m%d"), getpass.getuser(), prediction_set_id, expectn, take_lowest, score_method_id, analysis_set_id))
colortext.message(analysis_set_id)
benchmark_run.full_analysis(analysis_set_id, output_directory)
else:
if output_directory or not output_directory_root:
raise Exception("Multiple benchmark run objects will be analyzed and output created; this requires setting output_directory_root instead of output_directory")
BindingAffinityBenchmarkRun.analyze_multiple(
benchmark_runs,
analysis_sets = analysis_sets_to_run,
analysis_directory = output_directory_root,
)
else:
return (benchmark_runs, analysis_sets_to_run)
################################################################################################
## Private API layer
## These are helper functions used internally by the class but which are not intended for export
################################################################################################
###########################################################################################
## Subclass layer
##
## These functions need to be implemented by subclasses
###########################################################################################
# Concrete functions
def _get_sqa_prediction_table(self): return dbmodel.PredictionPPI
def _get_sqa_prediction_structure_scores_table(self): return dbmodel.PredictionPPIStructureScore
def _get_sqa_user_dataset_experiment_table(self): return dbmodel.UserPPDataSetExperiment
def _get_sqa_user_dataset_experiment_tag_table(self): return dbmodel.UserPPDataSetExperimentTag
def _get_sqa_user_dataset_experiment_tag_table_udsid(self): return dbmodel.UserPPDataSetExperimentTag.UserPPDataSetExperimentID
def _get_sqa_predictions_user_dataset_experiment_id(self, p): return p.UserPPDataSetExperimentID
def _get_sqa_prediction_type(self): return dbmodel.PredictionSet.BindingAffinity
prediction_table = 'PredictionPPI'
def _get_prediction_table(self): return self.prediction_table
prediction_structure_scores_table = 'PredictionPPIStructureScore'
def _get_prediction_structure_scores_table(self): return self.prediction_structure_scores_table
def _get_prediction_type(self): return 'BindingAffinity'
def _get_prediction_dataset_type(self): return 'Binding affinity'
def _get_prediction_type_description(self): return 'binding affinity'
def _get_user_dataset_experiment_table(self): return 'UserPPDataSetExperiment'
def _get_user_dataset_experiment_tag_table(self): return 'UserPPDataSetExperimentTag'
def _get_allowed_score_types(self): return set(['DDG', 'WildTypeLPartner', 'WildTypeRPartner', 'WildTypeComplex', 'MutantLPartner', 'MutantRPartner', 'MutantComplex'])
###########################################################################################
## Information layer
##
## This layer is for functions which extract data from the database.
###########################################################################################
#== Information API =======================================================================
@informational_job
def get_development_protocol(self, development_protocol_id):
results = self.DDG_db.execute_select('SELECT * FROM DevelopmentProtocol WHERE ID = %s', parameters=(development_protocol_id,) )
assert( len(results) == 1 )
return results[0]
@informational_pdb
def get_complex_ids_matching_protein_name(self, partial_name, tsession = None):
'''Returns a list of PPComplex IDs where at least one of the partner names matches partial_name.'''
tsession = self.importer.get_session(utf = True)
tsession_utf = self.importer.get_session()
results = []
partial_name_ascii = partial_name.encode('ascii', errors='ignore').decode('ascii') # ugh
if len(partial_name.split()) == 1 and len(partial_name) <= 4:
results += [c.ID for c in tsession_utf.query(dbmodel.PPComplex).filter(or_(
dbmodel.PPComplex.LName.like(u'^' + partial_name),
dbmodel.PPComplex.LShortName.like(u'^' + partial_name),
dbmodel.PPComplex.RName.like(u'^' + partial_name),
dbmodel.PPComplex.RShortName.like(u'^' + partial_name)))]
results += [c.ID for c in tsession.query(dbmodel.PPComplex).filter(or_(
dbmodel.PPComplex.LHTMLName.like('^' + partial_name_ascii),
dbmodel.PPComplex.RHTMLName.like('^' + partial_name_ascii)))]
results += [c.ID for c in tsession_utf.query(dbmodel.PPComplex).filter(or_(
dbmodel.PPComplex.LName.like(partial_name + u'$'),
dbmodel.PPComplex.LShortName.like(partial_name + u'$'),
dbmodel.PPComplex.RName.like(partial_name + u'$'),
dbmodel.PPComplex.RShortName.like(partial_name + u'$')))]
results += [c.ID for c in tsession.query(dbmodel.PPComplex).filter(or_(
dbmodel.PPComplex.LHTMLName.like(partial_name_ascii + '$'),
dbmodel.PPComplex.RHTMLName.like(partial_name_ascii + '$')))]
else:
results += [c.ID for c in tsession_utf.query(dbmodel.PPComplex).filter(or_(
dbmodel.PPComplex.LName.like(u'%' + partial_name + u'%'),
dbmodel.PPComplex.LShortName.like(u'%' + partial_name + u'%'),
dbmodel.PPComplex.RName.like(u'%' + partial_name + u'%'),
dbmodel.PPComplex.RShortName.like(u'%' + partial_name + u'%')))]
results += [c.ID for c in tsession.query(dbmodel.PPComplex).filter(or_(
dbmodel.PPComplex.LHTMLName.like('%' + partial_name_ascii + '%'),
dbmodel.PPComplex.RHTMLName.like('%' + partial_name_ascii + '%')))]
return results
qry = '''SELECT ID FROM PPComplex
WHERE
LName LIKE %s
OR LShortName LIKE %s
OR LHTMLName LIKE %s
OR RName LIKE %s
OR RShortName LIKE %s
OR RHTMLName LIKE %s ORDER BY ID'''
if len(partial_name.split()) == 1 and len(partial_name) <= 4:
# for short names, we require that any matches have the string as a prefix or suffix as otherwise we may get many matches e.g. 'RAN' matches 'transferase', 'membrane', etc.
partial_name_ascii = partial_name.encode('ascii', errors='ignore').decode('ascii') # ugh
results += self.DDG_db_utf.execute_select(qry, parameters=(u'%{0}'.format(partial_name), u'%{0}'.format(partial_name), '%{0}'.format(partial_name_ascii), u'%{0}'.format(partial_name), u'%{0}'.format(partial_name), '%{0}'.format(partial_name_ascii)))
results += self.DDG_db_utf.execute_select(qry, parameters=(u'{0}%'.format(partial_name), u'{0}%'.format(partial_name), '{0}%'.format(partial_name_ascii), u'{0}%'.format(partial_name), u'{0}%'.format(partial_name), '{0}%'.format(partial_name_ascii)))
else:
partial_name_ascii = partial_name.encode('ascii', errors='ignore').decode('ascii') # ugh
results += self.DDG_db_utf.execute_select(qry, parameters=(u'%{0}%'.format(partial_name), u'%{0}%'.format(partial_name), '%{0}%'.format(partial_name_ascii), u'%{0}%'.format(partial_name), u'%{0}%'.format(partial_name), '%{0}%'.format(partial_name_ascii)))
return [r['ID'] for r in results]
@informational_pdb
def _get_pdb_chains_used_for_prediction_set(self, prediction_set):
raise Exception('not implemented yet')
return self.DDG_db.execute_select('''
SELECT Prediction.ID, Experiment.PDBFileID, Chain
FROM Prediction
INNER JOIN Experiment ON Experiment.ID=Prediction.ExperimentID
INNER JOIN ExperimentChain ON ExperimentChain.ExperimentID=Prediction.ExperimentID
WHERE PredictionSet=%s''', parameters=(prediction_set,))
###########################################################################################
## Prediction layer
##
## This part of the API is responsible for inserting prediction jobs in the database via
## the trickle-down proteomics paradigm.
###########################################################################################
#== Job creation API ===========================================================
#
# This part of the API is responsible for inserting prediction jobs in the database via
# the trickle-down proteomics paradigm.
def _charge_prediction_set_by_residue_count(self, PredictionSet):
'''This function assigns a cost for a prediction equal to the number of residues in the chains.'''
raise Exception('This function needs to be rewritten.')
from klab.bio.rcsb import parseFASTAs
DDG_db = self.DDG_db
predictions = DDG_db.execute_select("SELECT ID, ExperimentID FROM Prediction WHERE PredictionSet=%s", parameters=(PredictionSet,))
PDB_chain_lengths ={}
for prediction in predictions:
chain_records = DDG_db.execute_select('SELECT PDBFileID, Chain FROM Experiment INNER JOIN ExperimentChain ON ExperimentID=Experiment.ID WHERE ExperimentID=%s', parameters=(prediction['ExperimentID']))
num_residues = 0
for chain_record in chain_records:
key = (chain_record['PDBFileID'], chain_record['Chain'])
if PDB_chain_lengths.get(key) == None:
fasta = DDG_db.execute_select("SELECT FASTA FROM PDBFile WHERE ID=%s", parameters = (chain_record['PDBFileID'],))
assert(len(fasta) == 1)
fasta = fasta[0]['FASTA']
f = parseFASTAs(fasta)
PDB_chain_lengths[key] = len(f[chain_record['PDBFileID']][chain_record['Chain']])
chain_length = PDB_chain_lengths[key]
num_residues += chain_length
print("UPDATE Prediction SET Cost=%0.2f WHERE ID=%d" % (num_residues, prediction['ID']))
predictions = DDG_db.execute("UPDATE Prediction SET Cost=%s WHERE ID=%s", parameters=(num_residues, prediction['ID'],))
def _get_dev_protocol_id(self, name):
dev_protocol_ids = self.DDG_db.execute_select("SELECT ID FROM DevelopmentProtocol WHERE Name=%s", parameters = (name,))
if len(dev_protocol_ids) == 0:
return None
elif len(dev_protocol_ids) == 1:
return int(dev_protocol_ids[0]['ID'])
else:
raise Exception("DevelopmentProtocol table was originally set up so that names are unique; this has obviously changed")
def _create_dev_protocol(self, name, application, template_command_line):
dev_prot_record = {
'Name' : name,
'Application' : application,
'TemplateCommandLine' : template_command_line,
}
sql, params, record_exists = self.DDG_db.create_insert_dict_string('DevelopmentProtocol', dev_prot_record)
self.DDG_db.execute(sql, params)
###########################################################################################
## Data entry layer
##
## This part of the API is responsible for data entry (e.g. complex definitions)
###########################################################################################
#== Job creation API ===========================================================
#
# This part of the API is responsible for inserting prediction jobs in the database via
# the trickle-down proteomics paradigm.
#######################################
# #
# Protein-protein complex data entry #
# public API #
# #
# #
# PPComplex #
# PPIPDBPartnerChain #
# PPIPDBSet #
# #
# Missing tables: #
# PPIConformationalChange #
# PPIDatabaseComplex #
# PPIDataSetCrossmap #
# #
#######################################
@ppi_data_entry
def find_complex(self, pdb_ids, keywords = [], tsession = None, quiet = True):
possible_match_ids = []
for pdb_id in pdb_ids:
existing_records = self.DDG_db.execute_select('SELECT * FROM PDBFile WHERE ID=%s', parameters=(pdb_id,))
if existing_records and not quiet:
colortext.warning('The PDB file {0} exists in the database.'.format(pdb_id))
complex_ids = self.search_complexes_by_pdb_id(pdb_id)
if complex_ids:
if existing_records and not quiet:
colortext.warning('The PDB file {0} has associated complexes: {1}'.format(pdb_id, ', '.join(map(str, complex_ids))))
assert(len(complex_ids) == 1)
complex_id = complex_ids[0]
#colortext.warning('Complex #{0}'.format(complex_id))
#pprint.pprint(self.get_complex_details(complex_id))
assert(type(keywords) == list)
keywords = set(keywords)
for keyword in keywords:
hits = self.get_complex_ids_matching_protein_name(keyword, tsession = tsession)
if hits:
if not quiet:
colortext.warning('Partial match on "{0}".'.format(keyword))
possible_match_ids.extend(hits)
possible_match_idses = sorted(set(possible_match_ids))
return [self.get_complex_details(id) for id in possible_match_ids]
@ppi_data_entry
def add_complex_structure_pair(self, complex_structure_definition_pair, keywords = None, force = False, previously_added = set(), trust_database_content = False, update_sections = set(), allow_missing_params_files = False, debug = False, minimum_sequence_identity = 95.0):
'''Wrapper function for add_designed_pdb and add_complex.
complex_structure_definition_pair should be a dict with the structure:
dict(
Structure = <see the definition in kddg.api.data:add_designed_pdb>,
Complex = <see the definition in ppi_api:add_complex>,
)
To simplify the logic, we treat this function call as an atomic call i.e. it creates its own session and rolls back or commits.
'''
# Sanity checks
assert(complex_structure_definition_pair['Complex']['structure_id'] == complex_structure_definition_pair['Structure']['db_id'])
if 'chain_mapping' in complex_structure_definition_pair['Structure']:
assert(sorted(complex_structure_definition_pair['Structure']['chain_mapping'].keys()) == sorted(complex_structure_definition_pair['Complex']['LChains'] + complex_structure_definition_pair['Complex']['RChains']))
# Create a new session
tsession = self.importer.get_session(new_session = True, utf = False)
try:
# Add the structure
self.importer.add_designed_pdb(complex_structure_definition_pair['Structure'], previously_added = previously_added, trust_database_content = trust_database_content,
update_sections = update_sections, allow_missing_params_files = allow_missing_params_files,
minimum_sequence_identity = minimum_sequence_identity, tsession = tsession, debug = debug)
if debug:
tsession.rollback()
else:
tsession.commit()
tsession.close()
except:
colortext.error('Failure.')
tsession.rollback()
tsession.close()
raise
tsession = self.importer.get_session(new_session = True, utf = True)
try:
# Add the complex definition and PDB definition
api_response = self.add_complex(complex_structure_definition_pair['Complex'], keywords = keywords, force = force, debug = debug, tsession = tsession)
if api_response['success']:
str(api_response['PPIPDBSet']) # this forced lookup of partner_chains seems to be crucial when accessing it later (which should only be done for printing as the data cannot be guaranteed to be up-to-date)
tsession.expunge_all() # note: we only need to expunge api_response['PPIPDBSet'].partner_chains (it is loaded lazily/deferred)
if debug:
api_response = dict(success = False, error = 'Debug call - rolling back the transaction.')
tsession.rollback()
else:
tsession.commit()
else:
tsession.rollback()
tsession.close()
return api_response
except:
colortext.error('Failure.')
tsession.rollback()
tsession.close()
raise
def lookup_pdb_set(self, tsession, passed_pdb_set, allow_partial_matches = True, complex_id = None):
'''Takes a dict {'L' -> List(Tuple(PDB ID, Chain ID)), 'R' -> List(Tuple(PDB ID, Chain ID))} and returns all PDB
sets (complex_id, set_number, reverse_match) which have either partial or exact matches depending on
whether allow_partial_matches is True or False respectively. If reverse_match is True it means that the
partner definitions are reversed (left partner = right partner,...).
The matching is symmetric over the partner definitions i.e. if L1 matches R2 and R1 matches L2 then we consider this a match.
If complex_id is specified then we restrict matches to that particular ID (PPComplex.ID). Otherwise, all definitions
in the database are considered.
If allow_partial_matches is True then we return hits if there is at least one common chain in each partner.
Otherwise, we return hits if there are exact matches (modulo chain ordering)
'''
defined_sets = {}
if complex_id != None:
# Consider sets for a specific complex
defined_sets[complex_id] = {}
for r in tsession.query(dbmodel.PPIPDBPartnerChain).filter(dbmodel.PPIPDBPartnerChain.PPComplexID == complex_id):
set_number = r.SetNumber
defined_sets[complex_id][set_number] = defined_sets[complex_id].get(set_number, {'L' : [], 'R' : []})
defined_sets[complex_id][set_number][r.Side].append((r.PDBFileID, r.Chain))
else:
# Consider all sets
for r in tsession.query(dbmodel.PPIPDBPartnerChain):
set_number = r.SetNumber
c_id = r.PPComplexID
defined_sets[c_id] = defined_sets.get(c_id, {})
defined_sets[c_id][set_number] = defined_sets[c_id].get(set_number, {'L' : [], 'R' : []})
defined_sets[c_id][set_number][r.Side].append((r.PDBFileID, r.Chain))
set_number_hits = set()
for c_id, set_definitions in sorted(defined_sets.iteritems()):
for set_number, set_partners in sorted(set_definitions.iteritems()):
# Check for matches against the stored PDB sets. Check for the symmetric definition as well
if allow_partial_matches:
# Partial matching
if set(passed_pdb_set['L']).intersection(set_partners['L']) and set(passed_pdb_set['R']).intersection(set_partners['R']):
set_number_hits.add((c_id, set_number, False))
if set(passed_pdb_set['L']).intersection(set_partners['R']) and set(passed_pdb_set['R']).intersection(set_partners['L']):
set_number_hits.add((c_id, set_number, True))
else:
# Exact matching
if (sorted(passed_pdb_set['L']) == sorted(set_partners['L'])) and (sorted(passed_pdb_set['R']) == sorted(set_partners['R'])):
set_number_hits.add((c_id, set_number, False))
if (sorted(passed_pdb_set['L']) == sorted(set_partners['R'])) and (sorted(passed_pdb_set['R']) == sorted(set_partners['L'])):
set_number_hits.add((c_id, set_number, True))
if len(set([t[2] for t in set_number_hits])) > 1:
raise colortext.Exception('WARNING: the complex definition has at least two PDB sets where the left and right partners are in the reverse direction. This indicates a redundancy in the database.')
return sorted(set_number_hits)
def lookup_complex_by_details(self, tsession, complex_details, allow_partial_matches = True):
'''Takes a complex_details dict (as defined in add_complex) for a bound complex (i.e. a single PDB ID) and returns
the corresponding complex(es) and PDB set details if the defined complex exists in the database.
There are two paths. First, we check whether a complex exists with an exact match on all fields in the PPComplex
table. This case is probably only likely in the case where the same complex definition is being added repeatedly
e.g. if a data import script is being run over and over again. Next, we check whether a complex exists based on
the PDB set i.e. whether a complex using the same PDB chains exists in the database.
Note that this function will NOT detect cases where the same complex is being used as an existing complex in the
database but where there are differences in the partner names and a different PDB file is being specified. Therefore,
care must still be taken when adding complexes to the database to ensure that we do not store duplicate definitions.
This function is mainly useful as a helper function for add_complex to avoid hitting fail branches when force == False
in that function. It results in cleaner handling of attempts to re-add existing data.
Note: We ignore the ChainIndex field in PPIPDBPartnerChain - i.e. we treat partner definitions as bags, not sequences
Returns: a dict mapping:
complex_id -> Dict(reverse_match -> Boolean, # reverse_match is None, True, or False and indicates whether or not the matched complex names (L, R) are in the same order
set_numbers -> List(dict(set_number -> set_number, reverse_match = Boolean))) # reverse_match here is True or False and indicates whether or not the matched PDB sets (L, R) are in the same order
'''
# todo: this part of the function currently only allows bound complexes as there is a single structure_id parameter
# todo: this is the part of the code to change to allow the function to handle unbound complexes
passed_pdb_set = dict(
L = sorted([(complex_details['structure_id'], c) for c in complex_details['LChains']]),
R = sorted([(complex_details['structure_id'], c) for c in complex_details['RChains']])
)
complex_id = None
complex_reverse_match = None
# Try for an exact match
# This branch is only useful when the user is adding the same definition multiple times i.e. the same names for the complex.
# This is mostly hit when import scripts are run multiple times.
complex_record = get_or_create_in_transaction(tsession, dbmodel.PPComplex, complex_details, variable_columns = ['ID'], only_use_supplied_columns = True, read_only = True)
if complex_record:
results = [r for r in tsession.query(dbmodel.PPComplex).filter(and_(dbmodel.PPComplex.LName == complex_details['LName'], dbmodel.PPComplex.RName == complex_details['RName']))]
results += [r for r in tsession.query(dbmodel.PPComplex).filter(and_(dbmodel.PPComplex.LShortName == complex_details['LShortName'], dbmodel.PPComplex.RShortName == complex_details['RShortName']))]
results += [r for r in tsession.query(dbmodel.PPComplex).filter(and_(dbmodel.PPComplex.LHTMLName == complex_details['LHTMLName'], dbmodel.PPComplex.RHTMLName == complex_details['RHTMLName']))]
complex_ids = sorted(set([r.ID for r in results]))
if complex_ids:
if not len(complex_ids) == 1:
raise colortext.Exception('WARNING: Multiple complex definitions (PPComplex.ID = {0}) share the same partner names. This indicates a redundancy in the database.'.format(', '.join(complex_ids)))
complex_id = complex_ids[0]
complex_record = tsession.query(dbmodel.PPComplex).filter(dbmodel.PPComplex.ID == complex_id).one()
complex_reverse_match = False
results = [r for r in tsession.query(dbmodel.PPComplex).filter(and_(dbmodel.PPComplex.LName == complex_details['RName'], dbmodel.PPComplex.RName == complex_details['LName']))]
results += [r for r in tsession.query(dbmodel.PPComplex).filter(and_(dbmodel.PPComplex.LShortName == complex_details['RShortName'], dbmodel.PPComplex.RShortName == complex_details['LShortName']))]
results += [r for r in tsession.query(dbmodel.PPComplex).filter(and_(dbmodel.PPComplex.LHTMLName == complex_details['RHTMLName'], dbmodel.PPComplex.LHTMLName == complex_details['LHTMLName']))]
complex_ids = sorted(set([r.ID for r in results]))
if complex_ids:
if (complex_id != None) or (len(complex_ids) != 1):
raise colortext.Exception('WARNING: Multiple complex definitions (PPComplex.ID = {0}) share the same partner names. This indicates a redundancy in the database.'.format(', '.join(complex_ids)))
complex_id = complex_ids[0]
complex_record = tsession.query(dbmodel.PPComplex).filter(dbmodel.PPComplex.ID == complex_id).one()
complex_reverse_match = True
if complex_record:
# We found an associated PPComplex record. Now we check to see whether an associated PPIPDBSet exists
complex_id = complex_record.ID
# todo: this part of the function allows unbound complexes and does not need to be updated
set_number_hits = self.lookup_pdb_set(tsession, passed_pdb_set, allow_partial_matches = allow_partial_matches, complex_id = complex_id)
# One exact hit for the complex definition with one or many PDB sets
l = []
for h in set_number_hits:
assert(h[0] == complex_id)
set_number = h[1]
reverse_match = h[2]
assert(complex_reverse_match == reverse_match)
l.append(dict(set_number = set_number, reverse_match = reverse_match))
return {complex_id : dict(reverse_match = complex_reverse_match, set_numbers = l)}
else:
# The complex did not exactly match a PPComplex record however there may simply be differences in the partner names.
# We proceed by looking for a match based on the PDB chains by checking all PDB sets.
set_number_hits = self.lookup_pdb_set(tsession, passed_pdb_set, allow_partial_matches = allow_partial_matches)
results_by_complex = {}
for h in set_number_hits:
complex_id = h[0]
set_number = h[1]
reverse_match = h[2]
results_by_complex[complex_id] = results_by_complex.get(complex_id, dict(reverse_match = None, set_numbers = []))
results_by_complex[complex_id]['set_numbers'].append(dict(set_number = set_number, reverse_match = reverse_match))
return results_by_complex
return None
@ppi_data_entry
def add_complex(self, complex_details, keywords = [], force = False, debug = False, tsession = None):
'''Add a complex to the database using a defined dict structure.
:param complex_details: A dict fitting the defined structure (see below).
:param keywords: A list of keywords used to search existing complexes for an existing match. Not necessary but
advised, particularly when adding a small number of complexes.
:param force: If a potentially similar complex is found and force is False then then the function returns with a
message and without adding the complex. The ForceAddition setting in the Complex dict (see below)
will have the same effect as setting this variable.
:param debug: If debug is set to True then the transaction used to insert the complex into the database will be
rolled back and a message stating that the insertion would have been successful is returned in the
return dict.
:return: On successful import, the dict
{success = True, ComplexID -> Long, SetNumber -> Long, ReverseMatch -> Boolean}
corresponding to the database PPIPDBSet primary key is returned. ReverseMatch is True if the complex was
found in the database with the same partner ordering (Left = Left, Right = Right) and False otherwise.
If a similar complex is detected and force is False then a dict
{success = False, ComplexID -> Long, SetNumber -> Long, ReverseMatch -> Boolean, message -> String}
will be returned instead.
On error, a dict {success = False, error -> String} is returned.
The database uses Unicode to encode the strings, allowing us to use e.g. Greek characters
For this reason, please contain all structure definitions in a file encoded as Unicode. On Linux, you can add the
# -*- coding: utf-8 -*-
declaration at the top of the file (with no leading whitespace).
One example of the dict structure is as follows:
dict(
# There are two cases - the complex exists in the database or we will be adding a new complex.
# Note: Before adding new complexes, you should make sure that there is no existing complex in the
# database. This will help to reduce redundancy and provide us with better data.
# These fields are required in both cases and specify the partners of the complex
# Note: Please ensure that the LChains (resp. RChains) chains correspond to the protein/complex
# identified by LName, LShortName, LHTMLName (resp. RName, RShortName, RHTMLName)
structure_id = '1A2K_TP0',
LChains = ['A'],
RChains = ['C'],
# Case 1: These fields should be used if there is an existing complex in the database.
ComplexID = 202,
# Case 2: These fields should only be used if there is no existing complex in the database.
AdditionalKeywords = ['GSP1'], # Used to search for existing complexes. The PDB ID, LName, LShortName, etc. fields will automatically be used for the search so there is no need to specify those.
LName = 'Ras-related nuclear protein', # the full protein name for the left partner. This is a Unicode field.
LShortName = 'RAN', # the short-hand name commonly used
LHTMLName = 'RAN', # a version of the short-hand name converted to HTML e.g. α used in place of an alpha character. This is an ASCII field.
RName = 'Ran-specific GTPase-activating protein', # similar
RShortName = 'RanGAP1', # similar
RHTMLName = 'RanGAP1', # similar
FunctionalClassID = 'OG', # One of A (Antibody-antigen), AB (Antigen/Bound Antibody), EI (Enzyme/inhibitor), ER (Enzyme containing complex),
# ES (Enzyme containing complex), OG (G-proteins), OR (Receptors), or OX (Miscellaneous)
PPDBMFunctionalClassID = 'O', # One of A (Antibody-antigen), AB (Antigen/Bound Antibody), E (Enzyme/Inhibitor or Enzyme/Substrate), or O (Miscellaneous)
PPDBMDifficulty = None, # specific to the protein-protein docking benchmark i.e. use None here
IsWildType = True, # if this is the wildtype sequence
WildTypeComplexID = None, # if this is not wildtype sequence and the wildtype complex is in the database, please specify that complex ID here
Notes = '...' # any notes on the complex e.g. 'There is a related complex in the database (complex #119 at the time of writing) with all three unique chains from 1K5D (AB|C).'
Warnings = None, # any warnings about the complex in general. Note: Structural warnings belong in the description field of the Structure dict.
# Optional fields for either case
PDBComplexNotes = '...' # any notes specific to the particular PDB structure rather than the complex
DatabaseKeys = [ # Used when adding complexes from databases to help map them back to that database
dict(
DatabaseName = "SKEMPI",
DatabaseKey = "1NCA_N_LH",
),
...
]
)
'''
# todo: this function currently only adds bound complexes (which is the typical case). It is straightforward to generalize the structure above for unbound complexes e.g. by changing LChains and RChains to include structure ids
existing_session = not(not(tsession))
tsession = tsession or self.importer.get_session(new_session = True, utf = True)
# Search for exact matches first, then partial matches
pp_complex = None
reverse_match = None
for match_param in [False, True]:
existing_complexes = self.lookup_complex_by_details(tsession, complex_details, allow_partial_matches = match_param)
if existing_complexes:
if len(existing_complexes) == 1:
existing_complex_id = existing_complexes.keys()[0]
pp_complex = tsession.query(dbmodel.PPComplex).filter(dbmodel.PPComplex.ID == existing_complex_id)
if 'ComplexID' in complex_details:
if complex_details['ComplexID'] != pp_complex.ID:
raise colortext.Exception('ComplexID {0} was passed but complex #{1} was found which seems to match the complex definition.'.format(complex_details['ComplexID'], pp_complex.ID))
reverse_match = existing_complexes[existing_complex_id]['reverse_match']
existing_pdb_sets = existing_complexes[existing_complex_id]['set_numbers']
if existing_pdb_sets:
if len(existing_pdb_sets) == 1:
existing_pdb_set = existing_pdb_sets[0]
msg = None
if match_param == True:
msg = 'A match was found on the partner/PDB set definition but the complex fields had different values e.g. different names of each partner.'
if not force:
return dict(success = False, message = msg, ComplexID = existing_complex_id, SetNumber = existing_pdb_set['set_number'], ReverseMatch = existing_pdb_set['reverse_match'])
else:
colortext.warning(msg)
return dict(success = True, message = msg, ComplexID = existing_complex_id, SetNumber = existing_pdb_set['set_number'], ReverseMatch = existing_pdb_set['reverse_match'])
return dict(success = True, ComplexID = existing_complex_id, SetNumber = existing_pdb_set['set_number'], ReverseMatch = existing_pdb_set['reverse_match'])
else:
raise colortext.Exception('The complex definition exists in the database but multiple PDB sets / partner definitions match the passed parameters. Check this case manually.')
else:
# If force is not passed, raise an exception. Else, cascade into the new partner definition creation below.
if not force:
raise colortext.Exception('The complex definition exists in the database although no PDB sets / partner definitions corresponding EXACTLY to the partner definition were found. Check this case manually to see whether existing definitions would suit better than the passed definition (else, the force parameter can be passed to force creation of a new definition).')
else:
raise colortext.Exception('Multiple complex definitions exists in the database which match the passed complex definition. Check this case manually.')
# We have not found an exact match or (if force == True) a similar match has been found.
# If force is False and a similar complex was found, we should have raise an exception above.
try:
assert('DatabaseKeys' not in complex_details) # todo: write this code
# Check parameters
passed_keys = sorted(complex_details.keys())
expected_keys = ['structure_id', 'LChains', 'RChains']
for k in expected_keys:
assert(k in complex_details)
structure_id, LChains, RChains = complex_details['structure_id'], complex_details['LChains'], complex_details['RChains']
# Check that the structure is already in the database
structure_record = None
try:
structure_record = tsession.query(dbmodel.PDBFile).filter(dbmodel.PDBFile.ID == structure_id).one()
except:
raise Exception('The structure "{0}" does not exist in the database.'.format(structure_id))
# Add the PPComplex record
if pp_complex:
if reverse_match == True:
raise Exception('Write this case. We should add the passed chains in the opposite order (L = R, R = L) since the found complex has the opposite partner ordering.')
else:
assert(force)
assert(reverse_match == False) # i.e. it is not equal to None
else:
pp_complex = None
if 'ComplexID' in complex_details:
expected_keys.append('ComplexID')
if (('PDBComplexNotes' in complex_details) and len(complex_details) != 5) or (('PDBComplexNotes' not in complex_details) and (len(complex_details) != 4)):
raise Exception('As the ComplexID was specified, the only expected fields were "{0}" but "{1}" were passed.'.format('", "'.join(sorted(expected_keys)), '", "'.join(passed_keys)))
pp_complex = tsession.query(dbmodel.PPComplex).filter(dbmodel.PPComplex.ID == complex_details['ComplexID']).one()
else:
keywords = keywords + [complex_details['LName'], complex_details['LShortName'], complex_details['LHTMLName'], complex_details['RName'], complex_details['RShortName'], complex_details['RHTMLName']]
if complex_details.get('AdditionalKeywords'):
keywords.extend(complex_details['AdditionalKeywords'])
possible_matches = self.find_complex([structure_id], keywords, tsession = tsession)
if possible_matches:
if not force:
return dict(success = False, debug = debug, error = 'Complexes exist in the database which may be related. Please check whether any of these complexes match your case.', possible_matches = possible_matches)
colortext.warning('Complexes exist in the database which may be related. Continuing to add a new complex regardless.')
pp_complex = get_or_create_in_transaction(tsession, dbmodel.PPComplex, dict(
LName = complex_details['LName'],
LShortName = complex_details['LShortName'],
LHTMLName = complex_details['LHTMLName'],
RName = complex_details['RName'],
RShortName = complex_details['RShortName'],
RHTMLName = complex_details['RHTMLName'],
FunctionalClassID = complex_details['FunctionalClassID'],
PPDBMFunctionalClassID = complex_details['PPDBMFunctionalClassID'],
PPDBMDifficulty = complex_details['PPDBMDifficulty'],
IsWildType = complex_details['IsWildType'],
WildTypeComplexID = complex_details['WildTypeComplexID'],
Notes = complex_details['Notes'],
Warnings = complex_details['Warnings'],
), missing_columns = ['ID'])
# Search for an existing PDB set. Read the current definitions, treating them as bags then sorting lexically
pdb_sets = {}
for pschain in tsession.query(dbmodel.PPIPDBPartnerChain).filter(dbmodel.PPIPDBPartnerChain.PPComplexID == pp_complex.ID):
pdb_sets[pschain.SetNumber] = pdb_sets.get(pschain.SetNumber, {'L' : [], 'R' : []})
pdb_sets[pschain.SetNumber][pschain.Side].append((pschain.PDBFileID, pschain.Chain))
# Create a bag from the new definition then sort lexically
new_pdb_set = dict(L = sorted([(structure_id, c) for c in LChains]),
R = sorted([(structure_id, c) for c in RChains]))
# Check whether an exact match already exists
matching_set, reverse_match = None, None
for set_number, set_def in pdb_sets.iteritems():
set_def['L'] = sorted(set_def['L'])
set_def['R'] = sorted(set_def['R'])
if set_def['L'] == new_pdb_set['L'] and set_def['R'] == new_pdb_set['R']:
matching_set, reverse_match = True, False
elif set_def['L'] == new_pdb_set['R'] and set_def['R'] == new_pdb_set['L']:
matching_set, reverse_match = True, True
if matching_set:
pdb_set = tsession.query(dbmodel.PPIPDBSet).filter(and_(dbmodel.PPIPDBSet.PPComplexID == pp_complex.ID, dbmodel.PPIPDBSet.SetNumber == set_number)).one()
return dict(success = True, ReverseMatch = reverse_match, ComplexID = pp_complex.ID, SetNumber = set_number) # this used to also return PPIPDBSet = pdb_set
# No match. Create a new set by adding a PPIPDBSet record.
if pdb_sets:
new_set_number = max(pdb_sets.keys()) + 1
else:
new_set_number = 0
assert(tsession.query(dbmodel.PPIPDBSet).filter(and_(dbmodel.PPIPDBSet.PPComplexID == pp_complex.ID, dbmodel.PPIPDBSet.SetNumber == new_set_number)).count() == 0) # Sanity check
pdb_complex_notes = None
if 'PDBComplexNotes' in complex_details:
pdb_complex_notes = complex_details['PDBComplexNotes']
pdb_set_object = get_or_create_in_transaction(tsession, dbmodel.PPIPDBSet,
dict(
PPComplexID = pp_complex.ID,
SetNumber = new_set_number,
IsComplex = True, # todo: change when we allow unbound complexes
Notes = pdb_complex_notes,
))
# Create the associated PPIPDBPartnerChain records
for set_side, side_chains in sorted(new_pdb_set.iteritems()):
chain_index = 0
for pc in sorted(side_chains):
get_or_create_in_transaction(tsession, dbmodel.PPIPDBPartnerChain,
dict(
PPComplexID = pp_complex.ID,
SetNumber = new_set_number,
Side = set_side,
ChainIndex = chain_index,
PDBFileID = pc[0],
Chain = pc[1],
NMRModel = None, # todo
), missing_columns = ['ID'])
chain_index += 1
# Return the API response
api_response = dict(success = True, ReverseMatch = False, PPIPDBSet = pdb_set_object, ComplexID = pp_complex.ID, SetNumber = new_set_number) # this used to also return PPIPDBSet = pdb_set_object
if not(existing_session):
if debug:
api_response = dict(success = False, debug = debug, error = 'Debug call - rolling back the transaction.')
tsession.rollback()
tsession.close()
else:
tsession.commit()
tsession.close()
return api_response
except:
colortext.error('Failure.')
print(traceback.format_exc())
tsession.rollback()
tsession.close()
raise
@ppi_data_entry
def add_user_dataset_case(self, tsession, user_dataset_case, user_dataset_name_to_id_map = {}):
'''Add a user dataset case to the database using a defined dict structure.
:param tsession: A transaction session. This must be created and passed into this function as user datasets should
be added in one transaction.
:param user_dataset_case: A single case for the user dataset matching the structure defined below.
:param user_dataset_name_to_id_map: Used to cache the mapping from user dataset names to their integer IDs
:return: On success, the UserDataSetExperiment object is returned.
user_dataset_case should be structured as in the following example:
dict(
# These records are used to create a PPMutagenesis record and the associated mutagenesis details
Mutagenesis = dict(
RecognizableString = 'TinaGSP_32',
PPComplexID = -1,
),
Mutations = [
# There is one dict per mutation
dict(
MutagenesisMutation = dict(
# PPMutagenesisID will be filled in when the PPMutagenesis record is created.
RecordKey = 'A D123E', # chain_id, wildtype_aa, residue_id.strip(), mutant_aa
ProteinID = None, # todo
ResidueIndex = None, # todo
WildTypeAA = 'D',
MutantAA = 'E',
),
MutagenesisPDBMutation = dict(
# PPMutagenesisID and PPMutagenesisMutationID will be filled in when the PPMutagenesisMutation record is created.
# PPComplexID is taken from the PPMutagenesis section. WildTypeAA and MutantAA are taken from the PPMutagenesisMutation section.
SetNumber = -1,
PDBFileID = '1A2K_TP0',
Chain = 'A',
ResidueID = ' 123 ',
),
),
],
# This field is used to create the UserPPDataSetExperiment record. All other fields can be derived from the above.
# Note: We use the human-readable label here. The database ID is retrieved using e.g. ppi_api.get_defined_user_datasets()[<UserDataSetTextID>]['ID']
UserDataSetTextID = 'RAN-GSP',
)
'''
udc = user_dataset_case
# Extract the PDB file and complex set number
pdb_file_id = set([m['MutagenesisPDBMutation']['PDBFileID'] for m in udc['Mutations']])
assert(len(pdb_file_id) == 1)
pdb_file_id = pdb_file_id.pop()
set_number = set([m['MutagenesisPDBMutation']['SetNumber'] for m in udc['Mutations']])
assert(len(set_number) == 1)
set_number = set_number.pop()
is_wildtype = 1
if udc['Mutations']:
is_wildtype = 0
# 1. Create the mutagenesis record
pp_mutagenesis = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesis, dict(
PPComplexID = udc['Mutagenesis']['PPComplexID'],
SKEMPI_KEY = udc['Mutagenesis']['RecognizableString'],
WildType = is_wildtype,
), missing_columns = ['ID'])
# 2. Create the PPMutagenesisMutation and PPMutagenesisPDBMutation records
for m in udc['Mutations']:
# 2a. Create the PPMutagenesisMutation record
mmut = m['MutagenesisMutation']
mmut['PPMutagenesisID'] = pp_mutagenesis.ID
# Sanity check existing records
existing_record = tsession.query(dbmodel.PPMutagenesisMutation).filter(and_(
dbmodel.PPMutagenesisMutation.PPMutagenesisID == mmut['PPMutagenesisID'], dbmodel.PPMutagenesisMutation.RecordKey == mmut['RecordKey']))
if existing_record.count() > 0:
existing_record = existing_record.one()
assert(existing_record.MutantAA == mmut['MutantAA'])
assert(existing_record.WildTypeAA == mmut['WildTypeAA'])
# Add the new record
pp_mutagenesis_mutation = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesisMutation, mmut, missing_columns = ['ID'])
# 2b. Create the PPMutagenesisPDBMutation record
pmut = m['MutagenesisPDBMutation']
pmut['PPMutagenesisID'] = pp_mutagenesis.ID
pmut['PPMutagenesisMutationID'] = pp_mutagenesis_mutation.ID
pmut['PPComplexID'] = pp_mutagenesis.PPComplexID
pmut['WildTypeAA'] = pp_mutagenesis_mutation.WildTypeAA
pmut['MutantAA'] = pp_mutagenesis_mutation.MutantAA
pmut['ResidueID'] = PDB.ResidueID2String(pmut['ResidueID']) # handle stripped strings
# Sanity check existing records
existing_record = tsession.query(dbmodel.PPMutagenesisPDBMutation).filter(and_(
dbmodel.PPMutagenesisPDBMutation.PPMutagenesisMutationID == pmut['PPMutagenesisMutationID'],
dbmodel.PPMutagenesisPDBMutation.PDBFileID == pdb_file_id,
dbmodel.PPMutagenesisPDBMutation.SetNumber == set_number,
dbmodel.PPMutagenesisPDBMutation.Chain == pmut['Chain'],
dbmodel.PPMutagenesisPDBMutation.ResidueID == pmut['ResidueID'],
))
if existing_record.count() > 0:
existing_record = existing_record.one()
assert(existing_record.PPMutagenesisID == pmut['PPMutagenesisID'])
assert(existing_record.PPComplexID == pmut['PPComplexID'])
assert(existing_record.WildTypeAA == pmut['WildTypeAA'])
assert(existing_record.MutantAA == pmut['MutantAA'])
# Add the new record
pp_mutagenesis_pdb_mutation = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesisPDBMutation, pmut, missing_columns = ['ID'])
# 3. Create the UserPPDataSetExperiment record
user_dataset_name = udc['UserDataSetTextID']
if not user_dataset_name_to_id_map.get(user_dataset_name):
user_dataset_name_to_id_map[user_dataset_name] = tsession.query(dbmodel.UserDataSet).filter(dbmodel.UserDataSet.TextID == user_dataset_name).one().ID
user_dataset_id = user_dataset_name_to_id_map[user_dataset_name]
new_record = True
if tsession.query(dbmodel.UserPPDataSetExperiment).filter(and_(
dbmodel.UserPPDataSetExperiment.UserDataSetID == user_dataset_id,
dbmodel.UserPPDataSetExperiment.PPMutagenesisID == pp_mutagenesis.ID,
dbmodel.UserPPDataSetExperiment.PDBFileID == pdb_file_id,
dbmodel.UserPPDataSetExperiment.PPComplexID == pp_mutagenesis.PPComplexID,
dbmodel.UserPPDataSetExperiment.SetNumber == set_number)).count() > 0:
new_record = False
user_dataset_experiment = get_or_create_in_transaction(tsession, dbmodel.UserPPDataSetExperiment, dict(
UserDataSetID = user_dataset_id,
PPMutagenesisID = pp_mutagenesis.ID,
PDBFileID = pdb_file_id,
PPComplexID = pp_mutagenesis.PPComplexID,
SetNumber = set_number,
IsComplex = True,
), missing_columns = ['ID'])
if new_record:
colortext.wgreen('.')
else:
colortext.wcyan('.')
@general_data_entry
def add_de_dataset(self, user_id, long_id, short_id, description, ddg_convention, dataset_creation_start_date = None, dataset_creation_end_date = None, publication_ids = [], existing_session = None):
'''Convenience wrapper for add_dataset for DeltaE-only datasets.'''
return self.add_dataset(user_id, long_id, short_id, description, False, False, True, ddg_convention, dataset_creation_start_date = dataset_creation_start_date, dataset_creation_end_date = dataset_creation_end_date, publication_ids = publication_ids, existing_session = existing_session)
@ppi_data_entry
def add_ssm_dataset(self, dataset_short_id, user_dataset_id, complex_id, set_number, mutations_dataframe, existing_session = None, debug = True):
'''Import SSM data from an RCSB PDB file. Non-RCSB files are not currently handled. Some data (DataSet and UserDataSet)
must be set up before calling this function.
:param dataset_short_id: The short ID of the existing dataset in the database (DataSet.ShortID)
:param user_dataset_id: The ID of the existing user dataset in the database (UserDataSet.ID)
:param pp_complex_id: The complex ID used in the database (PPComplex.ID). This will be used to add the structure to the database.
:param set_number: The set_number of the complex used in the database (PPIPDBSet.SetNumber). This is used to determine the choice of chains in predictions.
:param mutations_dataframe: A pandas dataframe in the intermediate input format described below.
:param debug: If True then the transaction is rolled back. This is set to True by default to reduce data-entry errors i.e. you should do a test-run of add_ssm_dataset first and then do a run with debug = False.
:return: Dict {success : <True/False>, DataSetID : dataset_id, [errors : <list of error strings if failed>]}
This function requires the complex, DataSet, and UserDataSet records to have been created. Those records can be added using
the appropriate functions e.g.
ppi_api = get_ppi_interface(read_file('pw'))
# If the complex structure has not been added to the database:
ppi_api.importer.add_pdb_from_rcsb(pdb_id, trust_database_content = True)
# If the complex has not been added to the database:
complex_ids = ppi_api.search_complexes_by_pdb_id(pdb_id)
if complex_ids:
colortext.warning('The PDB file {0} has associated complexes: {1}'.format(pdb_id, ', '.join(map(str, complex_ids))))
api_response = ppi_api.add_complex(json.loads(read_file('my_complex.json')[path][to][complex_definition])) # The structure of the JSON file is described in the docstring for add_complex
if not api_response['success']:
raise Exception(api_response['error'])
pp_complex_id, set_number = api_response['ComplexID'], api_response['SetNumber']
# else if the complex already exists in the database:
pp_complex_id, set_number = ..., ...
# Add dataset publications
publication_ids = [
ppi_api.add_publication(...).ID, # currently not implemented
...
ppi_api.add_publication(...).ID, # currently not implemented
]
# Add the dataset and user dataset records
dataset = ppi_api.add_de_dataset('oconchus', 'SSM_Psd95-CRIPT_Rama_10.1038/nature11500', 'Psd95-CRIPT', 'description...', ddg_convention, dataset_creation_start_date = datetime.date(...), dataset_creation_end_date = datetime.date(...), publication_ids = [...])
user_dataset = ppi_api.add_de_user_dataset('oconchus', 'SSM-Psd95-CRIPT', '...')
# Finally, import the SSM dataset
add_ssm_dataset(dataset.ShortID, user_dataset.ID, pp_complex_id, set_number, mutations_dataframe)
@todo: write the add_publication function (using the RIS parsing module in klab and the PubMed/DOI downloading modules).
mutations_dataframe should be a complete (either a value or null at all positions in the m x n array) pandas
dataframe with a standardized structure.
This simplifies the data import. The dataframe should be indexed/row-indexed by residue type and column-indexed
by a string chain ID + <underscore> + residue ID without spaces e.g. 'A_311' is residue ' 311 ' of chain A and 'A_312B' is residue ' 312B' of chain A.
We include an underscore in the format to reduce confusion for cases where the PDB chain ID is an integer.
For example, if the input file is a TSV formatted like:
Pos/aa A_311 A_312 ...
A 0.131 -0.42 ...
C 0.413 -0.022 ...
...
then a valid mutations_dataframe can be constructed via
mutations_dataframe = pandas.read_csv(ssm_input_data_path, sep = '\t', header = 0, index_col = 0)
'''
tsession = existing_session or self.get_session(new_session = True, utf = False)
# Sanity checks
assert(complex_id != None and set_number != None)
dataset_id = None
try:
dataset_id = tsession.query(dbmodel.DataSet).filter(dbmodel.DataSet.ShortID == dataset_short_id).one().ID
except:
raise Exception('No dataset with ShortID "{0}" exists in the database.'.format(dataset_short_id))
try:
tsession.query(dbmodel.UserDataSet).filter(dbmodel.UserDataSet.ID== user_dataset_id).one()
except:
raise Exception('No user dataset with TextID "{0}" exists in the database.'.format(user_dataset_id))
# Retrieve the mapping from chain -> residue ID -> wildtype residue
pdb_id, complex_chains = self.get_bound_pdb_set_details(complex_id, set_number)
chain_wt_residue_by_pos = self.get_pdb_residues_by_pos(pdb_id, strip_res_ids = True)
# Sanity checks on column indices
chain_ids = set()
for v in mutations_dataframe.columns.values:
error_msg = 'The column index "{0}" does not have the expected format: <chain>_<residue id> e.g. "A_123".'.format(v)
if v.find('_') == -1 or len(v.split('_')) != 2:
raise colortext.Exception(error_msg)
tokens = v.split('_')
chain_id = tokens[0]
residue_id = tokens[1]
if len(chain_id) != 1 or (not(residue_id.strip().isdigit()) and not(residue_id.strip()[:-1].isdigit())):
raise colortext.Exception(error_msg)
chain_ids.add(chain_id)
# Sanity checks on row indices
mut_aas = sorted(mutations_dataframe.index)
expected_mut_aas = set(residue_type_1to3_map.keys())
expected_mut_aas.remove('X')
assert(len(expected_mut_aas) == 20)
if set(mut_aas).difference(expected_mut_aas):
raise colortext.Exception('The row indices contain values which are non canonical residue types: "{0}".'.format('", "'.join(sorted(set(mut_aas).difference(expected_mut_aas)))))
# Extract the data into a list of point mutations, iterating by column/position then row/AA
# Add a single wildtype PPMutagenesis record (essentially a Complex with no corresponding mutation records)
# For all single PDB mutations in the list
# if not wildtype
# add a PPMutagenesis record and corresponding mutation records
# add a PPIDataSetDE record to represent the original data (experimental data) in the database
# add a UserPPDataSetExperiment record to be used to create prediction runs
# add a UserPPAnalysisSetDE record to be used when analyzing prediction runs against the experimental data
#
# Note that there will be one UserPPAnalysisSetDE record for each mutant but only one record for wildtype even though
# the wildtype sequence has exactly one corresponding DeltaE for each position. There will be exactly one UserPPAnalysisSetDE
# record per mutant and one wildtype record for each position however all of the wildtype UserPPAnalysisSetDE records
# will be associated to the sole wildtype UserPPAnalysisSetDE record.
colortext.warning('Adding data for complex #{0}, dataset "{1}", user dataset #{2}.'.format(complex_id, dataset_id, user_dataset_id))
record_number = 0
mut_aas = list(mutations_dataframe.index)
res_ids = list(mutations_dataframe.columns.values)
try:
# Add a PPMutagenesis record with no mutation records i.e. the wildtype/null 'mutagenesis'
pp_wt_mutagenesis = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesis, dict(
PPComplexID = complex_id,
SKEMPI_KEY = 'SSM {0}| WildType'.format(pdb_id), # todo: this format is ambiguous if we start to store multiple SSM datasets with different choices of bound partners. We should ideally check all PPMutagenesisMutation/PPMutagenesisPDBMutation records on the complex for a match. At present (2016), it is unlikely that we will have many SSM datasets for consideration, never mind overlapping sets.
WildType = 1,
), missing_columns = ['ID',])
pp_wt_mutagenesis_id = pp_wt_mutagenesis.ID
first_wt_record_number = None
for chain_res_id in res_ids:
tokens = chain_res_id.split('_')
assert(len(tokens) == 2)
chain_id = tokens[0]
assert(len(chain_id) == 1)
assert(chain_id in chain_wt_residue_by_pos)
res_id = tokens[1]
assert(res_id in chain_wt_residue_by_pos[chain_id])
wt_aa = chain_wt_residue_by_pos[chain_id][res_id]
for mut_aa in mut_aas:
record_number += 1
if record_number % 10 == 0:
colortext.wgreen('.')
sys.stdout.flush()
# Add the PPMutagenesis records for mutant cases
if mut_aa == wt_aa:
ppi_dataset_de_key = 'SSM {0}| WildType'.format(pdb_id)
if first_wt_record_number == None:
first_wt_record_number = record_number
analysis_set_record_number = first_wt_record_number
pp_mutagenesis_id = pp_wt_mutagenesis_id
else:
ppi_dataset_de_key = 'SSM {0}| {1} {2} {3} {4}'.format(pdb_id, chain_id, wt_aa, res_id, mut_aa) # SKEMPI_KEY is a bad name for a field!,
analysis_set_record_number = record_number
# Add a PPMutagenesis record with no mutation records i.e. the wildtype/null 'mutagenesis'
pp_mutagenesis = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesis, dict(
PPComplexID = complex_id,
SKEMPI_KEY = 'SSM {0}| {1} {2} {3} {4}'.format(pdb_id, chain_id, wt_aa, res_id, mut_aa), # SKEMPI_KEY is a bad name for a field!,
WildType = 0,
), missing_columns = ['ID'])
pp_mutagenesis_id = pp_mutagenesis.ID
#pprint.pprint(pp_mutagenesis.__dict__)
pp_mutagenesis_mutation = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesisMutation, dict(
PPMutagenesisID = pp_mutagenesis_id,
RecordKey = '{0} {1}{2}{3}'.format(chain_id, wt_aa, res_id, mut_aa),
ProteinID = None,
ResidueIndex = None,
WildTypeAA = wt_aa,
MutantAA = mut_aa,
), missing_columns = ['ID',])
pp_mutagenesis_mutation_id = pp_mutagenesis_mutation.ID
#pprint.pprint(pp_mutagenesis_mutation.__dict__)
pp_mutagenesis_pdb_mutation = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesisPDBMutation, dict(
PPMutagenesisID = pp_mutagenesis_id,
PPMutagenesisMutationID = pp_mutagenesis_mutation_id,
PPComplexID = complex_id,
SetNumber = set_number,
PDBFileID = pdb_id,
Chain = chain_id,
WildTypeAA = wt_aa,
ResidueID = PDB.ResidueID2String(res_id),
MutantAA = mut_aa,
), missing_columns = ['ID',])
pp_mutagenesis_pdb_mutation_id = pp_mutagenesis_pdb_mutation.ID
#pprint.pprint(pp_mutagenesis_pdb_mutation.__dict__)
# Add a DeltaE measurement record (PPIDataSetDE)
ppi_dataset_de = get_or_create_in_transaction(tsession, dbmodel.PPIDataSetDE, dict(
SecondaryID = ppi_dataset_de_key, # optional field
DataSetID = dataset_id,
Section = 'Supplementary Information II',
RecordNumber = record_number,
DE = mutations_dataframe[chain_res_id][mut_aa],
DEUnit = 'DeltaE (see DataSet.Description)',
PublishedError = None,
NumberOfMeasurements = None,
PPMutagenesisID = pp_mutagenesis_id,
PPComplexID = complex_id,
SetNumber = set_number,
PublishedPDBFileID = pdb_id,
PossibleError = False,
Remarks = None,
IsABadEntry = 0,
AddedBy = 'oconchus',
AddedDate = datetime.datetime.now(),
LastModifiedBy = 'oconchus',
LastModifiedDate = datetime.datetime.now(),
), missing_columns = ['ID',], variable_columns = ['AddedDate', 'LastModifiedDate'])
ppi_dataset_de_id = ppi_dataset_de.ID
# Add a record (UserPPDataSetExperiment) to be included in the associated prediction run
user_pp_dataset_experiment = get_or_create_in_transaction(tsession, dbmodel.UserPPDataSetExperiment, dict(
UserDataSetID = user_dataset_id,
PPMutagenesisID = pp_mutagenesis_id,
PDBFileID = pdb_id,
PPComplexID = complex_id,
SetNumber = set_number,
IsComplex = 1
), missing_columns = ['ID',])
user_pp_dataset_experiment_id = user_pp_dataset_experiment.ID
# dd a record (UserPPAnalysisSetDE) to be used in the analysis, linking the UserPPDataSetExperiment with the DeltaE (PPIDataSetDE) record
user_pp_analysis_set_de = get_or_create_in_transaction(tsession, dbmodel.UserPPAnalysisSetDE, dict(
Subset = 'Psd95-Cript',
Section = 'McLaughlin2012',
RecordNumber = analysis_set_record_number,
UserPPDataSetExperimentID = user_pp_dataset_experiment_id,
PPIDataSetDEID = ppi_dataset_de_id,
PPMutagenesisID = pp_mutagenesis_id,
), missing_columns = ['ID',])
user_pp_analysis_set_de_id = user_pp_analysis_set_de.ID
if debug:
colortext.warning('\nDEBUG MODE IS SET. THE CODE RAN SUCCESSFULLY BUT THE DATASET WILL NOT BE ADDED. RE-RUN THIS FUNCTION WITH debug = False.')
tsession.rollback()
tsession.close()
else:
tsession.commit()
tsession.close()
except Exception, e:
tsession.rollback()
tsession.close()
colortext.warning(traceback.format_exc())
raise colortext.Exception(str(e))
| mit |
HarllanAndrye/nilmtk | nilmtk/stats/goodsectionsresults.py | 6 | 6206 | from __future__ import print_function, division
import pandas as pd
from datetime import timedelta
import matplotlib.pyplot as plt
from ..results import Results
from nilmtk.timeframe import TimeFrame, convert_none_to_nat, convert_nat_to_none
from nilmtk.utils import get_tz, tz_localize_naive
from nilmtk.timeframegroup import TimeFrameGroup
class GoodSectionsResults(Results):
"""
Attributes
----------
max_sample_period_td : timedelta
_data : pd.DataFrame
index is start date for the whole chunk
`end` is end date for the whole chunk
`sections` is a TimeFrameGroups object (a list of nilmtk.TimeFrame objects)
"""
name = "good_sections"
def __init__(self, max_sample_period):
self.max_sample_period_td = timedelta(seconds=max_sample_period)
super(GoodSectionsResults, self).__init__()
def append(self, timeframe, new_results):
"""Append a single result.
Parameters
----------
timeframe : nilmtk.TimeFrame
new_results : {'sections': list of TimeFrame objects}
"""
new_results['sections'] = [TimeFrameGroup(new_results['sections'][0])]
super(GoodSectionsResults, self).append(timeframe, new_results)
def combined(self):
"""Merges together any good sections which span multiple segments,
as long as those segments are adjacent
(previous.end - max_sample_period <= next.start <= previous.end).
Returns
-------
sections : TimeFrameGroup (a subclass of Python's list class)
"""
sections = TimeFrameGroup()
end_date_of_prev_row = None
for index, row in self._data.iterrows():
row_sections = row['sections']
# Check if first TimeFrame of row_sections needs to be merged with
# last TimeFrame of previous section
if (end_date_of_prev_row is not None):
rows_are_adjacent = (
(end_date_of_prev_row - self.max_sample_period_td)
<= index <=
end_date_of_prev_row)
if rows_are_adjacent and row_sections[0].start is None:
assert sections[-1].end is None
sections[-1].end = row_sections[0].end
row_sections.pop(0)
else:
# row_sections[0] and sections[-1] were not in adjacent chunks
# so check if they are both open-ended and close them...
if sections and sections[-1].end is None:
try:
sections[-1].end = end_date_of_prev_row
except ValueError: # end_date_of_prev_row before sections[-1].start
pass
if row_sections and row_sections[0].start is None:
try:
row_sections[0].start = index
except ValueError:
pass
end_date_of_prev_row = row['end']
sections.extend(row_sections)
if sections:
sections[-1].include_end = True
if sections[-1].end is None:
sections[-1].end = end_date_of_prev_row
return sections
def unify(self, other):
super(GoodSectionsResults, self).unify(other)
for start, row in self._data.iterrows():
other_sections = other._data['sections'].loc[start]
intersection = row['sections'].intersection(other_sections)
self._data['sections'].loc[start] = intersection
def to_dict(self):
good_sections = self.combined()
good_sections_list_of_dicts = [timeframe.to_dict()
for timeframe in good_sections]
return {'statistics': {'good_sections': good_sections_list_of_dicts}}
def plot(self, **kwargs):
timeframes = self.combined()
return timeframes.plot(**kwargs)
def import_from_cache(self, cached_stat, sections):
# we (deliberately) use duplicate indices to cache GoodSectionResults
grouped_by_index = cached_stat.groupby(level=0)
tz = get_tz(cached_stat)
for tf_start, df_grouped_by_index in grouped_by_index:
grouped_by_end = df_grouped_by_index.groupby('end')
for tf_end, sections_df in grouped_by_end:
end = tz_localize_naive(tf_end, tz)
timeframe = TimeFrame(tf_start, end)
if timeframe in sections:
timeframes = []
for _, row in sections_df.iterrows():
section_start = tz_localize_naive(row['section_start'], tz)
section_end = tz_localize_naive(row['section_end'], tz)
timeframes.append(TimeFrame(section_start, section_end))
self.append(timeframe, {'sections': [timeframes]})
def export_to_cache(self):
"""
Returns
-------
DataFrame with three columns: 'end', 'section_end', 'section_start'.
Instead of storing a list of TimeFrames on each row,
we store one TimeFrame per row. This is because pd.HDFStore cannot
save a DataFrame where one column is a list if using 'table' format'.
We also need to strip the timezone information from the data columns.
When we import from cache, we assume the timezone for the data
columns is the same as the tz for the index.
"""
index_for_cache = []
data_for_cache = [] # list of dicts with keys 'end', 'section_end', 'section_start'
for index, row in self._data.iterrows():
for section in row['sections']:
index_for_cache.append(index)
data_for_cache.append(
{'end': row['end'],
'section_start': convert_none_to_nat(section.start),
'section_end': convert_none_to_nat(section.end)})
df = pd.DataFrame(data_for_cache, index=index_for_cache)
return df.convert_objects()
| apache-2.0 |
OshynSong/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
mattminuti/NeuralAudio | encode_multicore.py | 1 | 3541 | #!/usr/bin/python
import multiprocessing as mp
import scipy
import sys
import wave
import struct
from matplotlib import mlab
import os
from scipy.io import wavfile
inputfile = sys.argv[1]
blocksize = int(sys.argv[2])
errortolerance = int(sys.argv[3])
outputfile = sys.argv[4]
channels = 2
#load the wavefile
(samplerate, data) = scipy.io.wavfile.read(inputfile)
neededzeros = blocksize-(len(data)%blocksize)
data = scipy.append(data,scipy.zeros((channels,neededzeros)))
data = scipy.reshape(data,(-1,blocksize))
original_data = data
lutable = scipy.array([], dtype=int)
q = scipy.array([], dtype=int)
#finds which entries are within the percent error tolerance to the reference
while data.shape[0] != 0:
print "Generating lutable, " + str(len(data))
q = (data-data[0,:])**2
q_sum = scipy.sum(q,1)
q_n = scipy.sqrt(q_sum)
q_d = scipy.sqrt(scipy.sum(data[0,:]**2,0))
if scipy.all(data[0,:] == 0): #to avoid division by zero on an allzero block
print "This is a zero case"
lutable = scipy.append(lutable,data[0,:]) #add it to the lookup table
results = scipy.all(data == scipy.zeros(data.shape),1) #locate the allzero blocks
data = data[results == False] #eliminate the matches
else:
q = q_n / q_d * 100
results = q < errortolerance
lutable = scipy.append(lutable,data[0,:])
data = data[results == False]
lutable = scipy.reshape(lutable.astype(int),(-1,blocksize))
#isolate any zeros in the lookup table for easy reference later.
#we do this by checking where all entries in a block are equal to zero
lutable_zero_index = mlab.find(scipy.all(lutable==0,1))
#defining the parallel computation thingies
def matchfinder(stuff):
i,k = stuff
q = (k - lutable)**2
q_sum = scipy.sum(q,1)
q_n = scipy.sqrt(q_sum)
q_d = scipy.sqrt(sum(k**2,0))
if scipy.all(k ==0):
return (i, lutable_zero_index)
else:
q = q_n / q_d * 100
return (i, scipy.argmin(q))
data = original_data
index = scipy.zeros(len(data))
if __name__ == '__main__':
p = mp.Pool()
res = p.map_async(matchfinder, list(enumerate(data)), 1)
p.close()
p.join()
results = res.get()
for i,s in results:
index[i] = s
indexfilename = outputfile + "_index.bin"
file = open(indexfilename,'wb')
for s in index:
indexdata = struct.pack('>H',s)
file.write(indexdata)
file.close()
lutable = scipy.reshape(lutable.astype(int),(1,-1))
lutable = lutable[0]
lutablefilename = outputfile + "_lutable.bin"
file = open(lutablefilename,'wb')
for s in lutable:
ludata = struct.pack('>i',s)
file.write(ludata)
file.close()
zippercommand = 'tar -cf - ' + outputfile + '*.bin | lzma -c > ' + outputfile+'.tar.lzma'
os.system(zippercommand)
cleanupcommand = 'rm '+indexfilename+' '+lutablefilename
os.system(cleanupcommand)
#for testing, also generate a wav from the output
if sys.argv[5][-4:] == ".wav":
wavfilename = sys.argv[5]
lutable = scipy.reshape(lutable,(-1,blocksize))
for i,s in results:
index[i] = s
indexlength = len(index)
output = scipy.zeros(indexlength*blocksize, dtype=int)
print "Generating wav..."
counter = 0
for k in index:
output[counter*blocksize:counter*blocksize+blocksize] = lutable[k]
counter += 1
output = scipy.reshape(output.astype(int),(-1,channels))
wav_file = wave.open(wavfilename,'wb')
nchannels = 2
sampwidth = 2
framerate = 44100
comptype = "NONE"
compname = "not compressed"
wav_file.setparams((nchannels,sampwidth,framerate,'',comptype,compname))
print "Writing wav to disk..."
for s in output:
wav_file.writeframesraw(struct.pack('2h',s[0],s[1]))
wav_file.close()
| mit |
waltermateriais/xutils | xpython/xphysics/xschroedinger.py | 1 | 5709 | #!/usr/bin/python
# xschroedinger.py
# by Walter Dal'Maz Silva
# and Frankbelson dos Santos Azevedo
# 22nd October 2016 (version 0.1.0)
__version__ = '0.1.0'
DEBUG_MODULE = False
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import time
import numpy as np
import scipy.sparse as sparse
import scipy.sparse.linalg
import matplotlib.pyplot as plt
import xutils.xpython as _xu
# -----------------------------------------------------------------------------
# SymmetricSchLog
# -----------------------------------------------------------------------------
def SymmetricSchLog(rhoinf, size, L, D=2, cmpsparse=False):
""" SymmetricSchLog
"""
if L != 0:
d = -0.75 + D - 0.25 * D**2
else:
d = 1
l = L * (L + D - 2)
delRho = rhoinf/size
rho = np.linspace(delRho, rhoinf, size)
main = [np.log(rho)+(l-d)/rho**2+2/delRho**2]
if L != 0:
other = (-1/delRho**2)*np.ones(size-1)
sData = np.diagflat(main,0)+np.diagflat(other,-1)+np.diagflat(other,+1)
else:
upper = (-1/delRho**2+1/(2*rho[:-1]*delRho))
lower = (-1/delRho**2-1/(2*rho[1:]*delRho))
sData = np.diagflat(main,0)+np.diagflat(lower,-1)+np.diagflat(upper,+1)
if cmpsparse: sData = sparse.csc_matrix(sData)
return rho,sData
# -----------------------------------------------------------------------------
# Solver
# -----------------------------------------------------------------------------
def Solver(config):
""" Solver
"""
if DEBUG_MODULE: _xu._print_head(Solver.__name__, kwargs)
# -------------------------------------------------------------------------
# Parse configuration dictionary
# -------------------------------------------------------------------------
func, cmpsparse = config['func'], config['cmpsparse']
rhoinf, size = config['rhoinf'], config['size']
kplt, kmax, L = config['kplt'], config['kmax'], config['L']
# -------------------------------------------------------------------------
# Compute matrix and find eigenvalues/vectors
# -------------------------------------------------------------------------
start_time = time.time()
if cmpsparse:
rho, mat = func(rhoinf, size, L, cmpsparse=cmpsparse)
if L != 0:
values, vectors = scipy.sparse.linalg.eigsh(mat, k=kmax, sigma=0.0)
else:
values, vectors = scipy.sparse.linalg.eigs(mat, k=kmax, sigma=0.0)
tup = [(values[i],vectors[:,i]) for i in range(kmax)]
else:
rho, mat = func(rhoinf, size, L)
values, vectors = np.linalg.eig(mat)
tup = [(val,vec) for val,vec in zip(values,vectors)]
tup = sorted(tup, key=lambda p: p[0])
print("--- %s seconds ---" % (time.time() - start_time))
outfolder = _xu._get_outfolder('results-' + Solver.__name__)
# -------------------------------------------------------------------------
# Plot raw data (eigenvectors)
# -------------------------------------------------------------------------
plt.clf()
plt.figure(figsize=(6,4))
for i,(val,vec) in enumerate(tup[:kplt]):
val = val.real
vec = np.real(vec)
print('Eigenvalue : %.6e Last value : %.6e' % (val,vec[-1]))
label = ''.join(['$E_{n=',str(i+1),'}=',str('%.4f'% round(val,4)),'$'])
plt.plot(rho, vec,label=label)
plt.title(''.join(['For $L{}={}',str(L),'$']))
plt.ylabel('$\\Psi_{n}$')
plt.xlabel('$\\rho$')
if 'xlim' in config:
plt.xlim(config['xlim'])
plt.legend(loc=1)
outname = ''.join([func.__name__,'-raw-solution-',str(L),'.png'])
plt.savefig(os.path.join(outfolder, outname), dpi=300)
plt.close('all')
# -------------------------------------------------------------------------
# Plot normalized data (eigenvectors)
# -------------------------------------------------------------------------
plt.clf()
plt.figure(figsize=(6,4))
for i,(val,vec) in enumerate(tup[:kplt]):
val = val.real
vec = np.real(vec)
vec = vec**2
vec *= (1/np.trapz(vec,rho))
print('Eigenvalue : %.6e Last value : %.6e' % (val,vec[-1]))
label = ''.join(['$E_{n=',str(i+1),'}=',str('%.4f'% round(val,4)),'$'])
plt.plot(rho, vec,label=label)
plt.title(''.join(['For $L{}={}',str(L),'$']))
plt.ylabel('$\\Psi_{n}$')
plt.xlabel('$\\rho$')
if 'xlim' in config:
plt.xlim(config['xlim'])
if 'ylim' in config:
plt.ylim(config['ylim'])
plt.legend(loc=1)
outname = ''.join([func.__name__,'-norm-solution-',str(L),'.png'])
plt.savefig(os.path.join(outfolder, outname), dpi=300)
plt.close('all')
# -------------------------------------------------------------------------
# Plot effective potential
# -------------------------------------------------------------------------
plt.clf()
plt.figure(figsize=(6,4))
Veff = lambda x: (L/x)**2+np.log(x)
plt.title(''.join(['For $L{}={}',str(L),'$']))
plt.plot(rho, Veff(rho))
for i,(val,vec) in enumerate(tup[:kmax]):
plt.plot(rho,val.real*np.ones(size),color='k', linewidth=0.5)
plt.ylabel('$V_{eff}$')
plt.xlabel('$\\rho$')
plt.xlim((0,40))
plt.ylim((0,5))
outname = ''.join([func.__name__,'-potential-',str(L),'.png'])
plt.savefig(os.path.join(outfolder, outname), dpi=300)
plt.close('all')
# -----------------------------------------------------------------------------
# EOF
# -----------------------------------------------------------------------------
| unlicense |
koldunovn/geopandas | tests/test_io.py | 8 | 1845 | from __future__ import absolute_import
import fiona
from geopandas import GeoDataFrame, read_postgis, read_file
import tests.util
from .util import PANDAS_NEW_SQL_API, unittest
class TestIO(unittest.TestCase):
def setUp(self):
nybb_filename = tests.util.download_nybb()
path = '/nybb_14a_av/nybb.shp'
vfs = 'zip://' + nybb_filename
self.df = read_file(path, vfs=vfs)
with fiona.open(path, vfs=vfs) as f:
self.crs = f.crs
def test_read_postgis_default(self):
con = tests.util.connect('test_geopandas')
if con is None or not tests.util.create_db(self.df):
raise unittest.case.SkipTest()
try:
sql = "SELECT * FROM nybb;"
df = read_postgis(sql, con)
finally:
if PANDAS_NEW_SQL_API:
# It's not really a connection, it's an engine
con = con.connect()
con.close()
tests.util.validate_boro_df(self, df)
def test_read_postgis_custom_geom_col(self):
con = tests.util.connect('test_geopandas')
if con is None or not tests.util.create_db(self.df):
raise unittest.case.SkipTest()
try:
sql = """SELECT
borocode, boroname, shape_leng, shape_area,
geom AS __geometry__
FROM nybb;"""
df = read_postgis(sql, con, geom_col='__geometry__')
finally:
if PANDAS_NEW_SQL_API:
# It's not really a connection, it's an engine
con = con.connect()
con.close()
tests.util.validate_boro_df(self, df)
def test_read_file(self):
df = self.df.rename(columns=lambda x: x.lower())
tests.util.validate_boro_df(self, df)
self.assert_(df.crs == self.crs)
| bsd-3-clause |
arjunkhode/ASP | lectures/09-Sound-description/plots-code/mfcc.py | 25 | 1103 | import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
mfcc = ess.MFCC(numberCoefficients = 12)
x = ess.MonoLoader(filename = '../../../sounds/speech-male.wav', sampleRate = fs)()
mfccs = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
mfcc_bands, mfcc_coeffs = mfcc(mX)
mfccs.append(mfcc_coeffs)
mfccs = np.array(mfccs)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (speech-male.wav)')
plt.subplot(2,1,2)
numFrames = int(mfccs[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.pcolormesh(frmTime, 1+np.arange(12), np.transpose(mfccs[:,1:]))
plt.ylabel('coefficients')
plt.title('MFCCs')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('mfcc.png')
plt.show()
| agpl-3.0 |
GGoussar/scikit-image | skimage/viewer/canvastools/base.py | 43 | 3877 | import numpy as np
from matplotlib import lines
__all__ = ['CanvasToolBase', 'ToolHandles']
def _pass(*args):
pass
class CanvasToolBase(object):
"""Base canvas tool for matplotlib axes.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
"""
def __init__(self, manager, on_move=None, on_enter=None, on_release=None,
useblit=True, ax=None):
self.manager = manager
self.ax = manager.ax
self.artists = []
self.active = True
self.callback_on_move = _pass if on_move is None else on_move
self.callback_on_enter = _pass if on_enter is None else on_enter
self.callback_on_release = _pass if on_release is None else on_release
def ignore(self, event):
"""Return True if event should be ignored.
This method (or a version of it) should be called at the beginning
of any event callback.
"""
return not self.active
def hit_test(self, event):
return False
def redraw(self):
self.manager.redraw()
def set_visible(self, val):
for artist in self.artists:
artist.set_visible(val)
def on_key_press(self, event):
if event.key == 'enter':
self.callback_on_enter(self.geometry)
self.set_visible(False)
self.manager.redraw()
def on_mouse_press(self, event):
pass
def on_mouse_release(self, event):
pass
def on_move(self, event):
pass
def on_scroll(self, event):
pass
def remove(self):
self.manager.remove_tool(self)
@property
def geometry(self):
"""Geometry information that gets passed to callback functions."""
return None
class ToolHandles(object):
"""Control handles for canvas tools.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool handles are displayed.
x, y : 1D arrays
Coordinates of control handles.
marker : str
Shape of marker used to display handle. See `matplotlib.pyplot.plot`.
marker_props : dict
Additional marker properties. See :class:`matplotlib.lines.Line2D`.
"""
def __init__(self, ax, x, y, marker='o', marker_props=None):
self.ax = ax
props = dict(marker=marker, markersize=7, mfc='w', ls='none',
alpha=0.5, visible=False)
props.update(marker_props if marker_props is not None else {})
self._markers = lines.Line2D(x, y, animated=True, **props)
self.ax.add_line(self._markers)
self.artist = self._markers
@property
def x(self):
return self._markers.get_xdata()
@property
def y(self):
return self._markers.get_ydata()
def set_data(self, pts, y=None):
"""Set x and y positions of handles"""
if y is not None:
x = pts
pts = np.array([x, y])
self._markers.set_data(pts)
def set_visible(self, val):
self._markers.set_visible(val)
def set_animated(self, val):
self._markers.set_animated(val)
def closest(self, x, y):
"""Return index and pixel distance to closest index."""
pts = np.transpose((self.x, self.y))
# Transform data coordinates to pixel coordinates.
pts = self.ax.transData.transform(pts)
diff = pts - ((x, y))
dist = np.sqrt(np.sum(diff**2, axis=1))
return np.argmin(dist), np.min(dist)
| bsd-3-clause |
nandu959/pg_trans | App.py | 1 | 1301 | import pandas as pd
class App:
dictionary = {}
acmDAO=None
method_names=[]
dictionaries={}
def __init__(self):
acmDAO = AcmDAO()
def loadJournal(self):
obj = acmDAO.getJournal()
method_names = dir(obj)
objectsList = []
for x in method_names:
objectsList.append(getattr(obj, x)) #Adds all method objects in list
for k in objectsList:
print k #calling method objects -returns the value for each object in the objects list
#Put it in a dictionary as key value pair
def addToDict(self):
obj = acmDAO.getJournal()
method_names = dir(obj)
for x in method_names:
dictionary[x] = getattr(obj, x) #Adds all method objects in to a distionary in key value pairs
def loadJournals(self):
# To load a list of FJournal objects,convert it from dll to python data and store it into a dictonary
journals = acmDAO.getJournals()
var i =0
for journal in journals:
method_names = dir(journal)
for x in method_names:
dictionary[x] = getattr(journal, x)
dictionaries[i] = dictionary
i += 1
def populateDataFrame(self):
self.loadJournals()
# To do transformations and actions on a dataframe
# reference http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
pdf = pd.DataFrame(dictionaries)
print pdf
| apache-2.0 |
robintw/scikit-image | doc/examples/plot_template.py | 20 | 1663 | """
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on a single coin). Here, we
return a single match (the exact same coin), so the maximum value in the
``match_template`` result corresponds to the coin location. The other coins
look similar, and thus have local maxima; if you expect multiple matches, you
should use a proper peak-finding function.
The ``match_template`` function uses fast, normalized cross-correlation [1]_
to find instances of the template in the image. Note that the peaks in the
output of ``match_template`` correspond to the origin (i.e. top-left corner) of
the template.
.. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light and
Magic.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.coins()
coin = image[170:220, 75:130]
result = match_template(image, coin)
ij = np.unravel_index(np.argmax(result), result.shape)
x, y = ij[::-1]
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
ax1.imshow(coin)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
hcoin, wcoin = coin.shape
rect = plt.Rectangle((x, y), wcoin, hcoin, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
ax3.imshow(result)
ax3.set_axis_off()
ax3.set_title('`match_template`\nresult')
# highlight matched region
ax3.autoscale(False)
ax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
| bsd-3-clause |
jmmease/pandas | pandas/tests/test_compat.py | 12 | 2367 | # -*- coding: utf-8 -*-
"""
Testing that functions from compat work as expected
"""
from pandas.compat import (range, zip, map, filter, lrange, lzip, lmap,
lfilter, builtins, iterkeys, itervalues, iteritems,
next)
class TestBuiltinIterators(object):
@classmethod
def check_result(cls, actual, expected, lengths):
for (iter_res, list_res), exp, length in zip(actual, expected,
lengths):
assert not isinstance(iter_res, list)
assert isinstance(list_res, list)
iter_res = list(iter_res)
assert len(list_res) == length
assert len(iter_res) == length
assert iter_res == exp
assert list_res == exp
def test_range(self):
actual1 = range(10)
actual2 = lrange(10)
actual = [actual1, actual2],
expected = list(builtins.range(10)),
lengths = 10,
actual1 = range(1, 10, 2)
actual2 = lrange(1, 10, 2)
actual += [actual1, actual2],
lengths += 5,
expected += list(builtins.range(1, 10, 2)),
self.check_result(actual, expected, lengths)
def test_map(self):
func = lambda x, y, z: x + y + z
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual1 = map(func, *lst)
actual2 = lmap(func, *lst)
actual = [actual1, actual2],
expected = list(builtins.map(func, *lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_filter(self):
func = lambda x: x
lst = list(builtins.range(10))
actual1 = filter(func, lst)
actual2 = lfilter(func, lst)
actual = [actual1, actual2],
lengths = 9,
expected = list(builtins.filter(func, lst)),
self.check_result(actual, expected, lengths)
def test_zip(self):
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual = [zip(*lst), lzip(*lst)],
expected = list(builtins.zip(*lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_dict_iterators(self):
assert next(itervalues({1: 2})) == 2
assert next(iterkeys({1: 2})) == 1
assert next(iteritems({1: 2})) == (1, 2)
| bsd-3-clause |
tawsifkhan/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 133 | 3517 | """
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
ua-snap/downscale | old/old_bin/cru_cl20_1961_1990_climatology_preprocess.py | 2 | 12509 | import numpy as np # hack to solve a lib issue in the function args of xyztogrid
def cru_xyz_to_shp( in_xyz, lon_col, lat_col, crs, output_filename ):
'''
convert the cru cl2.0 1961-1990 Climatology data to a shapefile.
*can handle the .dat format even if compressed with .gzip extension.
PARAMETERS:
-----------
in_xyz = path to the .dat or .dat.gz downloaded cru cl2.0 file from UK Met Office site
lon_col = string name of column storing longitudes
lat_col = string name of column storing latitudes
crs = proj4string or epsg code
output_filename = string path to the output filename to be created
RETURNS
-------
output_filename as string
'''
colnames = ['lat', 'lon', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
from shapely.geometry import Point
import pandas as pd
import geopandas as gpd
import os
if os.path.splitext( in_xyz )[1] == '.gz':
cru_df = pd.read_csv( in_xyz, delim_whitespace=True, compression='gzip', header=None, names=colnames )
else:
cru_df = pd.read_csv( in_xyz, delim_whitespace=True, header=None, names=colnames )
# create a column named geometry with shapely geometry objects for each row
def f( x ):
''' return a Point shapely object for each x,y pair'''
return Point( x.lon, x.lat )
cru_df[ 'geometry' ] = cru_df.apply( f, axis=1 )
cru_df = gpd.GeoDataFrame( cru_df ) # convert to GeoDataFrame
cru_df.to_file( output_filename, 'ESRI Shapefile' )
return output_filename
def bounds_to_extent( bounds ):
'''
take input rasterio bounds object and return an extent
'''
l,b,r,t = bounds
return [ (l,b), (r,b), (r,t), (l,t), (l,b) ]
def extent_to_shapefile( extent, output_shapefile, proj4string ):
''' convert an extent to a shapefile using its proj4string '''
import geopandas as gpd
from shapely.geometry import Polygon
gpd.GeoDataFrame( {'extent_id':1, 'geometry':Polygon( extent )}, index=[1], crs=proj4string ).to_file( output_shapefile, 'ESRI Shapefile' )
return output_shapefile
def pad_bounds( rst, npixels, crs, output_shapefile ):
'''
convert the extents of 2 overlapping rasters to a shapefile with
an expansion of the intersection of the rasters extents by npixels
rst1: rasterio raster object
rst2: rasterio raster object
npixels: tuple of 4 (left(-),bottom(-),right(+),top(+)) number of pixels to
expand in each direction. for 5 pixels in each direction it would look like
this: (-5. -5. 5, 5) or just in the right and top directions like this:
(0,0,5,5).
crs: epsg code or proj4string defining the geospatial reference
system
output_shapefile: string full path to the newly created output shapefile
'''
import rasterio, os, sys
from shapely.geometry import Polygon
resolution = rst.res[0]
new_bounds = [ bound+(expand*resolution) for bound, expand in zip( rst.bounds, npixels ) ]
new_ext = bounds_to_extent( new_bounds )
return extent_to_shapefile( new_ext, output_shapefile, crs )
def xyz_to_grid( x, y, z, grid, method='cubic', output_dtype=np.float32 ):
'''
interpolate points to a grid. simple wrapper around
scipy.interpolate.griddata. Points and grid must be
in the same coordinate system
x = 1-D np.array of x coordinates / x,y,z must be same length
y = 1-D np.array of y coordinates / x,y,z must be same length
z = 1-D np.array of z coordinates / x,y,z must be same length
grid = tuple of meshgrid as made using numpy.meshgrid()
order (xi, yi)
method = one of 'cubic', 'near', linear
'''
import numpy as np
from scipy.interpolate import griddata
zi = griddata( (x, y), z, grid, method=method )
zi = np.flipud( zi.astype( output_dtype ) )
return zi
def crop_to_bounds2( rasterio_rst, bounds, output_filename, mask=None, mask_value=None ):
'''
take a rasterio raster object and crop it to a smaller bounding box
masking is supported where masked values are 0 and unmasked values are 1
PARAMETERS
----------
rasterio_rst = rasterio raster object
bounds = rasterio style bounds (left, bottom, right, top)
output_filename = string path to the raster file to be created
mask = a 2d numpy array of the same shape as rasterio_rst with
masked values = 0 and unmasked = 1
RETURNS
-------
file path to the newly created file -- essentially the value of output_filename
'''
from rasterio import Affine as A
window = rasterio_rst.window( *bounds )
xmin, ymin, xmax, ymax = rasterio_rst.window_bounds( window )
row_res, col_res = rasterio_rst.res
arr = rasterio_rst.read( 1, window=window )
if mask:
arr[ mask != 1 ] = mask_value
nodata = mask_value
else:
nodata = rasterio_rst.meta[ 'nodata' ]
meta = {}
meta.update( compress='lzw',
affine=A( col_res, 0.0, xmin, 0.0, -row_res, ymax ),
height=row_res,
width=col_res,
transform=[xmin, col_res, 0.0, ymax, 0.0, -row_res],
crs=rasterio_rst.meta,
nodata=nodata,
dtype=rasterio_rst.meta[ 'dtype' ],
count=1,
driver=u'GTiff' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write_band( 1, arr )
return output_filename
def crop_to_bounds( rasterio_rst, bounds ):
''' crop a raster by a window made from bounds of another domain '''
window = rasterio_rst.window( *bounds )
return rasterio_rst.read( 1, window=window )
def interpolate_akcan( x, y, z, grid, expanded_meta, template_rst, output_filename, method='cubic', output_dtype=np.float32 ):
'''
interpolate across the alaska canada domains and crop / mask to that extent
'''
cru_interp = xyz_to_grid( x, y, z, grid, method='cubic', output_dtype=np.float32 )
cru_interp = np.nan_to_num( cru_interp )
# convert to in memory rasterio object
expanded_meta.update( driver='MEM' )
cru_interpolated = rasterio.open( '', mode='w', **expanded_meta )
cru_interpolated.write_band( 1, cru_interp )
akcan = crop_to_bounds( cru_interpolated, template_rst.bounds )
meta = template_rst.meta
meta.update( compress='lzw' )
with rasterio.open( output_filename, 'w', **meta ) as out:
mask = template_rst.read_masks( 1 )
akcan[ mask == 0 ] = meta[ 'nodata' ]
akcan = np.ma.masked_where( mask == 0, akcan )
akcan.fill_value = meta[ 'nodata' ]
out.write_band( 1, akcan )
return output_filename
def run( args ):
return interpolate_akcan( **args )
if __name__ == '__main__':
import os, rasterio, glob, fiona
import numpy as np
import pandas as pd
import geopandas as gpd
from rasterio import Affine as A
from pathos import multiprocessing as mp
import argparse
# parse the commandline arguments
parser = argparse.ArgumentParser( description='preprocess CRU CL2.0 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-p", "--base_path", action='store', dest='base_path', type=str, help="path to parent directory with a subdirector(ies)y storing the data" )
parser.add_argument( "-cru", "--cru_filename", action='store', dest='cru_filename', type=str, help="string path to the .tar.gz file location, downloaded from the CRU site" )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="string abbreviated name of the variable being processed." )
parser.add_argument( "-tr", "--template_raster_fn", action='store', dest='template_raster_fn', type=str, help="string path to a template raster dataset to match the CRU CL2.0 to." )
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data'
# # open the Climatic Research Unit (CRU) CL2.0 data downloaded from:
# # http://www.cru.uea.ac.uk/cru/data/hrg/tmc/
# # cru_filename = os.path.join( cru_folder, 'grid_10min_tmp.dat.gz'
# cru_filename = os.path.join( cru_folder, 'grid_10min_sunp.dat.gz'
# variable = 'sunp'
# parse and unpack the args
args = parser.parse_args()
base_path = args.base_path
cru_filename = args.cru_filename
variable = args.variable
template_raster_fn = args.template_raster_fn
# build an output path to store the data generated with this script
cru_path = os.path.join( base_path, 'cru_ts20', variable )
if not os.path.exists( cru_path ):
os.makedirs( cru_path )
# read in the gzipped .dat file downloaded from the MET Office UK
colnames = [ 'lat', 'lon', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12' ]
cru_df = pd.read_csv( cru_filename, delim_whitespace=True, compression='gzip', header=None, names=colnames )
# convert to point shapefile
cru_shp_fn = os.path.join( cru_path, 'cru_'+variable+'_ts20_1961_1990_climatology.shp' )
cru_xyz_to_shp( cru_filename, 'lon', 'lat', {'init':'epsg:4326'}, cru_shp_fn )
# template dataset
template_raster = rasterio.open( template_raster_fn )
resolution = template_raster.res
template_meta = template_raster.meta
# pad the bounds of the akcan template dataset
crs = { 'init':'epsg:3338' }
extent_path = os.path.join( cru_path, 'extents' )
if not os.path.exists( extent_path ):
os.makedirs( extent_path )
new_ext_fn = os.path.join( extent_path, 'akcan_extent.shp' )
npixels = ( -200, -2000, 200, 200 )
pad_bounds( template_raster, npixels, crs, new_ext_fn )
# filename for a newly clipped and reprojected shapefile using the above padded bounds shape
intermediate_path = os.path.join( cru_path, 'intermediate' )
if not os.path.exists( intermediate_path ):
os.makedirs( intermediate_path )
expanded_ext_fn = os.path.join( intermediate_path, variable + '_cru_ts20_1961_1990_climatology_3338_akcan_expanded.shp' )
# reproject / crop to the AKCAN extent, the cru shapefile built above using ogr2ogr
os.system( "ogr2ogr -overwrite -f 'ESRI Shapefile' -clipdst " + new_ext_fn + " -s_srs 'EPSG:4326' -t_srs 'EPSG:3338' " + expanded_ext_fn + " " + cru_shp_fn )
# -wrapdateline -- removed since it is not a geog srs output
# generate metadata for the expanded extent to interpolate to
xmin, ymin, xmax, ymax = fiona.open( new_ext_fn ).bounds
cols = (xmax - xmin) / resolution[1]
rows = (ymax - ymin) / resolution[0]
# copy/update metadata to expanded extent
expanded_meta = template_meta
expanded_meta[ 'affine' ] = A( resolution[0], 0.0, xmin, 0.0, -resolution[1], ymax )
expanded_meta[ 'crs' ] = { 'init':'epsg:3338' }
expanded_meta[ 'height' ] = rows
expanded_meta[ 'width' ] = cols
expanded_meta[ 'transform' ] = expanded_meta[ 'affine' ].to_gdal()
# read in the clipped and reprojected cru shapefile using geopandas
cru_gdf = gpd.read_file( expanded_ext_fn )
# update lon and lat to the 3338
cru_gdf.lon = cru_gdf.geometry.apply( lambda x: x.x )
cru_gdf.lat = cru_gdf.geometry.apply( lambda x: x.y )
# build the interpolation input values
x = np.array(cru_gdf.lon.tolist())
y = np.array(cru_gdf.lat.tolist())
# build the output grid
xi = np.linspace( xmin, xmax, cols )
yi = np.linspace( ymin, ymax, rows )
xi, yi = np.meshgrid( xi, yi )
akcan_path = os.path.join( cru_path, 'akcan' )
if not os.path.exists( akcan_path ):
os.makedirs( akcan_path )
# build some args
months = ['01','02','03','04','05','06','07','08','09','10','11','12']
output_filenames = [ os.path.join( akcan_path, variable+'_cru_cl20_akcan_'+month+'_1961_1990.tif' ) for month in months ]
# run it in parallel -- the pool is not working currently! switching to serial
args_list = [ { 'x':x, 'y':y, 'z':np.array(cru_gdf[ month ]), 'grid':(xi,yi), 'expanded_meta':expanded_meta, 'template_rst':template_raster, 'output_filename':out_fn } for month, out_fn in zip( months, output_filenames ) ]
# pool = mp.Pool( 4 )
out = map( lambda x: run( x ), args_list )
# out = pool.map( lambda x: run( x ), args_list )
# pool.close()
# # # EXAMPLE OF USE # # #
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# cru_folder = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS20'
# # var_fn_dict = { 'hur':os.path.join( cru_folder, 'grid_10min_reh.dat.gz'),'tas':os.path.join( cru_folder, 'grid_10min_tmp.dat.gz'), 'sunp':os.path.join( cru_folder, 'grid_10min_sunp.dat.gz' ) }
# var_fn_dict = { 'pre':os.path.join( cru_folder, 'grid_10min_pre.dat.gz' ) } # just a test.
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final'
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# for variable, cru_filename in var_fn_dict.iteritems():
# print 'working on : %s' % variable
# os.system( 'ipython -- cru_cl20_1961_1990_climatology_preprocess.py -p ' + base_path + ' -cru ' + cru_filename + ' -v ' + variable + ' -tr ' + template_raster_fn )
| mit |
ilo10/scikit-learn | sklearn/neural_network/rbm.py | 206 | 12292 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
cpcloud/dask | dask/dataframe/tests/test_indexing.py | 2 | 12626 | import pandas as pd
import pandas.util.testing as tm
import numpy as np
import pytest
import dask
import dask.dataframe as dd
from dask.dataframe.indexing import _coerce_loc_index
from dask.dataframe.utils import assert_eq, make_meta
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
meta = make_meta({'a': 'i8', 'b': 'i8'}, index=pd.Index([], 'i8'))
d = dd.DataFrame(dsk, 'x', meta, [0, 5, 9, 9])
full = d.compute()
def test_loc():
assert d.loc[3:8].divisions[0] == 3
assert d.loc[3:8].divisions[-1] == 8
assert d.loc[5].divisions == (5, 5)
assert_eq(d.loc[5], full.loc[5:5])
assert_eq(d.loc[3:8], full.loc[3:8])
assert_eq(d.loc[:8], full.loc[:8])
assert_eq(d.loc[3:], full.loc[3:])
assert_eq(d.loc[[5]], full.loc[[5]])
assert_eq(d.loc[[3, 4, 1, 8]], full.loc[[3, 4, 1, 8]])
assert_eq(d.a.loc[5], full.a.loc[5:5])
assert_eq(d.a.loc[3:8], full.a.loc[3:8])
assert_eq(d.a.loc[:8], full.a.loc[:8])
assert_eq(d.a.loc[3:], full.a.loc[3:])
assert_eq(d.a.loc[[5]], full.a.loc[[5]])
assert_eq(d.a.loc[[3, 4, 1, 8]], full.a.loc[[3, 4, 1, 8]])
pytest.raises(KeyError, lambda: d.loc[1000])
assert_eq(d.loc[1000:], full.loc[1000:])
assert_eq(d.loc[-2000:-1000], full.loc[-2000:-1000])
assert sorted(d.loc[5].dask) == sorted(d.loc[5].dask)
assert sorted(d.loc[5].dask) != sorted(d.loc[6].dask)
def test_loc_non_informative_index():
df = pd.DataFrame({'x': [1, 2, 3, 4]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2, sort=True)
ddf.divisions = (None,) * 3
assert not ddf.known_divisions
ddf.loc[20:30].compute(get=dask.get)
assert_eq(ddf.loc[20:30], df.loc[20:30])
df = pd.DataFrame({'x': [1, 2, 3, 4]}, index=[10, 20, 20, 40])
ddf = dd.from_pandas(df, npartitions=2, sort=True)
assert_eq(ddf.loc[20], df.loc[20:20])
def test_loc_with_text_dates():
A = tm.makeTimeSeries(10).iloc[:5]
B = tm.makeTimeSeries(10).iloc[5:]
s = dd.Series({('df', 0): A, ('df', 1): B}, 'df', A,
[A.index.min(), B.index.min(), B.index.max()])
assert s.loc['2000': '2010'].divisions == s.divisions
assert_eq(s.loc['2000': '2010'], s)
assert len(s.loc['2000-01-03': '2000-01-05'].compute()) == 3
def test_loc_with_series():
assert_eq(d.loc[d.a % 2 == 0], full.loc[full.a % 2 == 0])
assert sorted(d.loc[d.a % 2].dask) == sorted(d.loc[d.a % 2].dask)
assert sorted(d.loc[d.a % 2].dask) != sorted(d.loc[d.a % 3].dask)
def test_loc_with_series_different_partition():
df = pd.DataFrame(np.random.randn(20, 5),
index=list('abcdefghijklmnopqrst'),
columns=list('ABCDE'))
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc[ddf.A > 0], df.loc[df.A > 0])
assert_eq(ddf.loc[(ddf.A > 0).repartition(['a', 'g', 'k', 'o', 't'])],
df.loc[df.A > 0])
def test_loc2d():
# index indexer is always regarded as slice for duplicated values
assert_eq(d.loc[5, 'a'], full.loc[5:5, 'a'])
# assert_eq(d.loc[[5], 'a'], full.loc[[5], 'a'])
assert_eq(d.loc[5, ['a']], full.loc[5:5, ['a']])
# assert_eq(d.loc[[5], ['a']], full.loc[[5], ['a']])
assert_eq(d.loc[3:8, 'a'], full.loc[3:8, 'a'])
assert_eq(d.loc[:8, 'a'], full.loc[:8, 'a'])
assert_eq(d.loc[3:, 'a'], full.loc[3:, 'a'])
assert_eq(d.loc[[8], 'a'], full.loc[[8], 'a'])
assert_eq(d.loc[3:8, ['a']], full.loc[3:8, ['a']])
assert_eq(d.loc[:8, ['a']], full.loc[:8, ['a']])
assert_eq(d.loc[3:, ['a']], full.loc[3:, ['a']])
assert_eq(d.loc[[3, 4, 3], ['a']], full.loc[[3, 4, 3], ['a']])
# 3d
with tm.assertRaises(pd.core.indexing.IndexingError):
d.loc[3, 3, 3]
# Series should raise
with tm.assertRaises(pd.core.indexing.IndexingError):
d.a.loc[3, 3]
with tm.assertRaises(pd.core.indexing.IndexingError):
d.a.loc[3:, 3]
with tm.assertRaises(pd.core.indexing.IndexingError):
d.a.loc[d.a % 2 == 0, 3]
def test_loc2d_with_known_divisions():
df = pd.DataFrame(np.random.randn(20, 5),
index=list('abcdefghijklmnopqrst'),
columns=list('ABCDE'))
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc['a', 'A'], df.loc[['a'], 'A'])
assert_eq(ddf.loc['a', ['A']], df.loc[['a'], ['A']])
assert_eq(ddf.loc['a':'o', 'A'], df.loc['a':'o', 'A'])
assert_eq(ddf.loc['a':'o', ['A']], df.loc['a':'o', ['A']])
assert_eq(ddf.loc[['n'], ['A']], df.loc[['n'], ['A']])
assert_eq(ddf.loc[['a', 'c', 'n'], ['A']], df.loc[['a', 'c', 'n'], ['A']])
assert_eq(ddf.loc[['t', 'b'], ['A']], df.loc[['t', 'b'], ['A']])
assert_eq(ddf.loc[['r', 'r', 'c', 'g', 'h'], ['A']],
df.loc[['r', 'r', 'c', 'g', 'h'], ['A']])
def test_loc2d_with_unknown_divisions():
df = pd.DataFrame(np.random.randn(20, 5),
index=list('abcdefghijklmnopqrst'),
columns=list('ABCDE'))
ddf = dd.from_pandas(df, 3)
ddf.divisions = (None, ) * len(ddf.divisions)
assert ddf.known_divisions is False
assert_eq(ddf.loc['a', 'A'], df.loc[['a'], 'A'])
assert_eq(ddf.loc['a', ['A']], df.loc[['a'], ['A']])
assert_eq(ddf.loc['a':'o', 'A'], df.loc['a':'o', 'A'])
assert_eq(ddf.loc['a':'o', ['A']], df.loc['a':'o', ['A']])
def test_loc2d_duplicated_columns():
df = pd.DataFrame(np.random.randn(20, 5),
index=list('abcdefghijklmnopqrst'),
columns=list('AABCD'))
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc['a', 'A'], df.loc[['a'], 'A'])
assert_eq(ddf.loc['a', ['A']], df.loc[['a'], ['A']])
assert_eq(ddf.loc['j', 'B'], df.loc[['j'], 'B'])
assert_eq(ddf.loc['j', ['B']], df.loc[['j'], ['B']])
assert_eq(ddf.loc['a':'o', 'A'], df.loc['a':'o', 'A'])
assert_eq(ddf.loc['a':'o', ['A']], df.loc['a':'o', ['A']])
assert_eq(ddf.loc['j':'q', 'B'], df.loc['j':'q', 'B'])
assert_eq(ddf.loc['j':'q', ['B']], df.loc['j':'q', ['B']])
assert_eq(ddf.loc['a':'o', 'B':'D'], df.loc['a':'o', 'B':'D'])
assert_eq(ddf.loc['a':'o', 'B':'D'], df.loc['a':'o', 'B':'D'])
assert_eq(ddf.loc['j':'q', 'B':'A'], df.loc['j':'q', 'B':'A'])
assert_eq(ddf.loc['j':'q', 'B':'A'], df.loc['j':'q', 'B':'A'])
assert_eq(ddf.loc[ddf.B > 0, 'B'], df.loc[df.B > 0, 'B'])
assert_eq(ddf.loc[ddf.B > 0, ['A', 'C']], df.loc[df.B > 0, ['A', 'C']])
def test_getitem():
df = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'B': [9, 8, 7, 6, 5, 4, 3, 2, 1],
'C': [True, False, True] * 3},
columns=list('ABC'))
ddf = dd.from_pandas(df, 2)
assert_eq(ddf['A'], df['A'])
# check cache consistency
tm.assert_series_equal(ddf['A']._meta, ddf._meta['A'])
assert_eq(ddf[['A', 'B']], df[['A', 'B']])
tm.assert_frame_equal(ddf[['A', 'B']]._meta, ddf._meta[['A', 'B']])
assert_eq(ddf[ddf.C], df[df.C])
tm.assert_series_equal(ddf.C._meta, ddf._meta.C)
assert_eq(ddf[ddf.C.repartition([0, 2, 5, 8])], df[df.C])
pytest.raises(KeyError, lambda: df['X'])
pytest.raises(KeyError, lambda: df[['A', 'X']])
pytest.raises(AttributeError, lambda: df.X)
# not str/unicode
df = pd.DataFrame(np.random.randn(10, 5))
ddf = dd.from_pandas(df, 2)
assert_eq(ddf[0], df[0])
assert_eq(ddf[[1, 2]], df[[1, 2]])
pytest.raises(KeyError, lambda: df[8])
pytest.raises(KeyError, lambda: df[[1, 8]])
def test_getitem_slice():
df = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'B': [9, 8, 7, 6, 5, 4, 3, 2, 1],
'C': [True, False, True] * 3},
index=list('abcdefghi'))
ddf = dd.from_pandas(df, 3)
assert_eq(ddf['a':'e'], df['a':'e'])
assert_eq(ddf['a':'b'], df['a':'b'])
assert_eq(ddf['f':], df['f':])
def test_loc_on_numpy_datetimes():
df = pd.DataFrame({'x': [1, 2, 3]},
index=list(map(np.datetime64, ['2014', '2015', '2016'])))
a = dd.from_pandas(df, 2)
a.divisions = list(map(np.datetime64, a.divisions))
assert_eq(a.loc['2014': '2015'], a.loc['2014': '2015'])
def test_loc_on_pandas_datetimes():
df = pd.DataFrame({'x': [1, 2, 3]},
index=list(map(pd.Timestamp, ['2014', '2015', '2016'])))
a = dd.from_pandas(df, 2)
a.divisions = list(map(pd.Timestamp, a.divisions))
assert_eq(a.loc['2014': '2015'], a.loc['2014': '2015'])
def test_coerce_loc_index():
for t in [pd.Timestamp, np.datetime64]:
assert isinstance(_coerce_loc_index([t('2014')], '2014'), t)
def test_loc_timestamp_str():
df = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.date_range('2011-01-01', freq='H', periods=100))
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df.loc['2011-01-02'],
ddf.loc['2011-01-02'])
assert_eq(df.loc['2011-01-02':'2011-01-10'],
ddf.loc['2011-01-02':'2011-01-10'])
# same reso, dask result is always DataFrame
assert_eq(df.loc['2011-01-02 10:00'].to_frame().T,
ddf.loc['2011-01-02 10:00'])
# series
assert_eq(df.A.loc['2011-01-02'],
ddf.A.loc['2011-01-02'])
assert_eq(df.A.loc['2011-01-02':'2011-01-10'],
ddf.A.loc['2011-01-02':'2011-01-10'])
# slice with timestamp (dask result must be DataFrame)
assert_eq(df.loc[pd.Timestamp('2011-01-02')].to_frame().T,
ddf.loc[pd.Timestamp('2011-01-02')])
assert_eq(df.loc[pd.Timestamp('2011-01-02'):pd.Timestamp('2011-01-10')],
ddf.loc[pd.Timestamp('2011-01-02'):pd.Timestamp('2011-01-10')])
assert_eq(df.loc[pd.Timestamp('2011-01-02 10:00')].to_frame().T,
ddf.loc[pd.Timestamp('2011-01-02 10:00')])
df = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.date_range('2011-01-01', freq='M', periods=100))
ddf = dd.from_pandas(df, 50)
assert_eq(df.loc['2011-01'], ddf.loc['2011-01'])
assert_eq(df.loc['2011'], ddf.loc['2011'])
assert_eq(df.loc['2011-01':'2012-05'], ddf.loc['2011-01':'2012-05'])
assert_eq(df.loc['2011':'2015'], ddf.loc['2011':'2015'])
# series
assert_eq(df.B.loc['2011-01'], ddf.B.loc['2011-01'])
assert_eq(df.B.loc['2011'], ddf.B.loc['2011'])
assert_eq(df.B.loc['2011-01':'2012-05'], ddf.B.loc['2011-01':'2012-05'])
assert_eq(df.B.loc['2011':'2015'], ddf.B.loc['2011':'2015'])
def test_getitem_timestamp_str():
df = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.date_range('2011-01-01', freq='H', periods=100))
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df['2011-01-02'],
ddf['2011-01-02'])
assert_eq(df['2011-01-02':'2011-01-10'],
df['2011-01-02':'2011-01-10'])
df = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.date_range('2011-01-01', freq='D', periods=100))
ddf = dd.from_pandas(df, 50)
assert_eq(df['2011-01'], ddf['2011-01'])
assert_eq(df['2011'], ddf['2011'])
assert_eq(df['2011-01':'2012-05'], ddf['2011-01':'2012-05'])
assert_eq(df['2011':'2015'], ddf['2011':'2015'])
def test_loc_period_str():
# .loc with PeriodIndex doesn't support partial string indexing
# https://github.com/pydata/pandas/issues/13429
pass
def test_getitem_period_str():
df = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.period_range('2011-01-01', freq='H', periods=100))
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df['2011-01-02'],
ddf['2011-01-02'])
assert_eq(df['2011-01-02':'2011-01-10'],
df['2011-01-02':'2011-01-10'])
# same reso, dask result is always DataFrame
df = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.period_range('2011-01-01', freq='D', periods=100))
ddf = dd.from_pandas(df, 50)
assert_eq(df['2011-01'], ddf['2011-01'])
assert_eq(df['2011'], ddf['2011'])
assert_eq(df['2011-01':'2012-05'], ddf['2011-01':'2012-05'])
assert_eq(df['2011':'2015'], ddf['2011':'2015'])
| bsd-3-clause |
jdanbrown/pydatalab | legacy_tests/bigquery/table_tests.py | 2 | 32614 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
from builtins import object
import calendar
import datetime as dt
import mock
from oauth2client.client import AccessTokenCredentials
import pandas
import unittest
import datalab.bigquery
import datalab.context
import datalab.utils
class TestCases(unittest.TestCase):
def _check_name_parts(self, table):
parsed_name = table._name_parts
self.assertEqual('test', parsed_name[0])
self.assertEqual('requestlogs', parsed_name[1])
self.assertEqual('today', parsed_name[2])
self.assertEqual('', parsed_name[3])
self.assertEqual('[test:requestlogs.today]', table._repr_sql_())
self.assertEqual('test:requestlogs.today', str(table))
def test_api_paths(self):
name = datalab.bigquery._utils.TableName('a', 'b', 'c', 'd')
self.assertEqual('/projects/a/datasets/b/tables/cd',
datalab.bigquery._api.Api._TABLES_PATH % name)
self.assertEqual('/projects/a/datasets/b/tables/cd/data',
datalab.bigquery._api.Api._TABLEDATA_PATH % name)
name = datalab.bigquery._utils.DatasetName('a', 'b')
self.assertEqual('/projects/a/datasets/b', datalab.bigquery._api.Api._DATASETS_PATH % name)
def test_parse_full_name(self):
table = TestCases._create_table('test:requestlogs.today')
self._check_name_parts(table)
def test_parse_local_name(self):
table = TestCases._create_table('requestlogs.today')
self._check_name_parts(table)
def test_parse_dict_full_name(self):
table = TestCases._create_table({'project_id': 'test', 'dataset_id': 'requestlogs',
'table_id': 'today'})
self._check_name_parts(table)
def test_parse_dict_local_name(self):
table = TestCases._create_table({'dataset_id': 'requestlogs', 'table_id': 'today'})
self._check_name_parts(table)
def test_parse_named_tuple_name(self):
table = TestCases._create_table(datalab.bigquery._utils.TableName('test',
'requestlogs', 'today', ''))
self._check_name_parts(table)
def test_parse_tuple_full_name(self):
table = TestCases._create_table(('test', 'requestlogs', 'today'))
self._check_name_parts(table)
def test_parse_tuple_local(self):
table = TestCases._create_table(('requestlogs', 'today'))
self._check_name_parts(table)
def test_parse_array_full_name(self):
table = TestCases._create_table(['test', 'requestlogs', 'today'])
self._check_name_parts(table)
def test_parse_array_local(self):
table = TestCases._create_table(['requestlogs', 'today'])
self._check_name_parts(table)
def test_parse_invalid_name(self):
with self.assertRaises(Exception):
TestCases._create_table('today@')
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_table_metadata(self, mock_api_tables_get):
name = 'test:requestlogs.today'
ts = dt.datetime.utcnow()
mock_api_tables_get.return_value = TestCases._create_table_info_result(ts=ts)
t = TestCases._create_table(name)
metadata = t.metadata
self.assertEqual('Logs', metadata.friendly_name)
self.assertEqual(2, metadata.rows)
self.assertEqual(2, metadata.rows)
self.assertTrue(abs((metadata.created_on - ts).total_seconds()) <= 1)
self.assertEqual(None, metadata.expires_on)
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_table_schema(self, mock_api_tables):
mock_api_tables.return_value = TestCases._create_table_info_result()
t = TestCases._create_table('test:requestlogs.today')
schema = t.schema
self.assertEqual(2, len(schema))
self.assertEqual('name', schema[0].name)
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_table_schema_nested(self, mock_api_tables):
mock_api_tables.return_value = TestCases._create_table_info_nested_schema_result()
t = TestCases._create_table('test:requestlogs.today')
schema = t.schema
self.assertEqual(4, len(schema))
self.assertEqual('name', schema[0].name)
self.assertEqual('val', schema[1].name)
self.assertEqual('more', schema[2].name)
self.assertEqual('more.xyz', schema[3].name)
self.assertIsNone(schema['value'])
self.assertIsNotNone(schema['val'])
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_malformed_response_raises_exception(self, mock_api_tables_get):
mock_api_tables_get.return_value = {}
t = TestCases._create_table('test:requestlogs.today')
with self.assertRaises(Exception) as error:
t.schema
self.assertEqual('Unexpected table response: missing schema', str(error.exception))
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_dataset_list(self, mock_api_datasets_get, mock_api_tables_list):
mock_api_datasets_get.return_value = None
mock_api_tables_list.return_value = TestCases._create_table_list_result()
ds = datalab.bigquery.Dataset('testds', context=TestCases._create_context())
tables = []
for table in ds:
tables.append(table)
self.assertEqual(2, len(tables))
self.assertEqual('test:testds.testTable1', str(tables[0]))
self.assertEqual('test:testds.testTable2', str(tables[1]))
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_table_list(self, mock_api_datasets_get, mock_api_tables_list):
mock_api_datasets_get.return_value = None
mock_api_tables_list.return_value = TestCases._create_table_list_result()
ds = datalab.bigquery.Dataset('testds', context=TestCases._create_context())
tables = []
for table in ds.tables():
tables.append(table)
self.assertEqual(2, len(tables))
self.assertEqual('test:testds.testTable1', str(tables[0]))
self.assertEqual('test:testds.testTable2', str(tables[1]))
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_view_list(self, mock_api_datasets_get, mock_api_tables_list):
mock_api_datasets_get.return_value = None
mock_api_tables_list.return_value = TestCases._create_table_list_result()
ds = datalab.bigquery.Dataset('testds', context=TestCases._create_context())
views = []
for view in ds.views():
views.append(view)
self.assertEqual(1, len(views))
self.assertEqual('test:testds.testView1', str(views[0]))
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_table_list_empty(self, mock_api_datasets_get, mock_api_tables_list):
mock_api_datasets_get.return_value = None
mock_api_tables_list.return_value = TestCases._create_table_list_empty_result()
ds = datalab.bigquery.Dataset('testds', context=TestCases._create_context())
tables = []
for table in ds:
tables.append(table)
self.assertEqual(0, len(tables))
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_table_exists(self, mock_api_tables_get):
mock_api_tables_get.return_value = None
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
self.assertTrue(tbl.exists())
mock_api_tables_get.side_effect = datalab.utils.RequestException(404, 'failed')
self.assertFalse(tbl.exists())
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_tables_create(self,
mock_api_datasets_get,
mock_api_tables_list,
mock_api_tables_insert):
mock_api_datasets_get.return_value = None
mock_api_tables_list.return_value = []
schema = TestCases._create_inferred_schema()
mock_api_tables_insert.return_value = {}
with self.assertRaises(Exception) as error:
TestCases._create_table_with_schema(schema)
self.assertEqual('Table test:testds.testTable0 could not be created as it already exists',
str(error.exception))
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
self.assertIsNotNone(TestCases._create_table_with_schema(schema), 'Expected a table')
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_insert_data_no_table(self,
mock_api_datasets_get,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_time_sleep,
mock_uuid):
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_tables_list.return_value = []
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_get.side_effect = datalab.utils.RequestException(404, 'failed')
mock_api_tabledata_insert_all.return_value = {}
mock_api_datasets_get.return_value = None
table = TestCases._create_table_with_schema(TestCases._create_inferred_schema())
df = TestCases._create_data_frame()
with self.assertRaises(Exception) as error:
table.insert_data(df)
self.assertEqual('Table %s does not exist.' % str(table), str(error.exception))
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
def test_insert_data_missing_field(self,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_api_datasets_get,
mock_time_sleep,
mock_uuid,):
# Truncate the schema used when creating the table so we have an unmatched column in insert.
schema = TestCases._create_inferred_schema()[:2]
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_datasets_get.return_value = None
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_list.return_value = []
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_insert_all.return_value = {}
table = TestCases._create_table_with_schema(schema)
df = TestCases._create_data_frame()
with self.assertRaises(Exception) as error:
table.insert_data(df)
self.assertEqual('Table does not contain field headers', str(error.exception))
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_insert_data_mismatched_schema(self,
mock_api_datasets_get,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_time_sleep,
mock_uuid):
# Change the schema used when creating the table so we get a mismatch when inserting.
schema = TestCases._create_inferred_schema()
schema[2]['type'] = 'STRING'
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_tables_list.return_value = []
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_insert_all.return_value = {}
mock_api_datasets_get.return_value = None
table = TestCases._create_table_with_schema(schema)
df = TestCases._create_data_frame()
with self.assertRaises(Exception) as error:
table.insert_data(df)
self.assertEqual('Field headers in data has type FLOAT but in table has type STRING',
str(error.exception))
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
def test_insert_data_dataframe(self,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_api_datasets_get,
mock_time_sleep, mock_uuid):
schema = TestCases._create_inferred_schema()
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_datasets_get.return_value = True
mock_api_tables_list.return_value = []
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_insert_all.return_value = {}
table = TestCases._create_table_with_schema(schema)
df = TestCases._create_data_frame()
result = table.insert_data(df)
self.assertIsNotNone(result, "insert_all should return the table object")
mock_api_tabledata_insert_all.assert_called_with(('test', 'testds', 'testTable0', ''), [
{'insertId': '#0', 'json': {u'column': 'r0', u'headers': 10.0, u'some': 0}},
{'insertId': '#1', 'json': {u'column': 'r1', u'headers': 10.0, u'some': 1}},
{'insertId': '#2', 'json': {u'column': 'r2', u'headers': 10.0, u'some': 2}},
{'insertId': '#3', 'json': {u'column': 'r3', u'headers': 10.0, u'some': 3}}
])
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
def test_insert_data_dictlist(self,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_api_datasets_get,
mock_time_sleep, mock_uuid):
schema = TestCases._create_inferred_schema()
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_datasets_get.return_value = True
mock_api_tables_list.return_value = []
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_insert_all.return_value = {}
table = TestCases._create_table_with_schema(schema)
result = table.insert_data([
{u'column': 'r0', u'headers': 10.0, u'some': 0},
{u'column': 'r1', u'headers': 10.0, u'some': 1},
{u'column': 'r2', u'headers': 10.0, u'some': 2},
{u'column': 'r3', u'headers': 10.0, u'some': 3}
])
self.assertIsNotNone(result, "insert_all should return the table object")
mock_api_tabledata_insert_all.assert_called_with(('test', 'testds', 'testTable0', ''), [
{'insertId': '#0', 'json': {u'column': 'r0', u'headers': 10.0, u'some': 0}},
{'insertId': '#1', 'json': {u'column': 'r1', u'headers': 10.0, u'some': 1}},
{'insertId': '#2', 'json': {u'column': 'r2', u'headers': 10.0, u'some': 2}},
{'insertId': '#3', 'json': {u'column': 'r3', u'headers': 10.0, u'some': 3}}
])
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
def test_insert_data_dictlist_index(self,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_api_datasets_get,
mock_time_sleep, mock_uuid):
schema = TestCases._create_inferred_schema('Index')
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_datasets_get.return_value = True
mock_api_tables_list.return_value = []
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_insert_all.return_value = {}
table = TestCases._create_table_with_schema(schema)
result = table.insert_data([
{u'column': 'r0', u'headers': 10.0, u'some': 0},
{u'column': 'r1', u'headers': 10.0, u'some': 1},
{u'column': 'r2', u'headers': 10.0, u'some': 2},
{u'column': 'r3', u'headers': 10.0, u'some': 3}
], include_index=True)
self.assertIsNotNone(result, "insert_all should return the table object")
mock_api_tabledata_insert_all.assert_called_with(('test', 'testds', 'testTable0', ''), [
{'insertId': '#0', 'json': {u'column': 'r0', u'headers': 10.0, u'some': 0, 'Index': 0}},
{'insertId': '#1', 'json': {u'column': 'r1', u'headers': 10.0, u'some': 1, 'Index': 1}},
{'insertId': '#2', 'json': {u'column': 'r2', u'headers': 10.0, u'some': 2, 'Index': 2}},
{'insertId': '#3', 'json': {u'column': 'r3', u'headers': 10.0, u'some': 3, 'Index': 3}}
])
@mock.patch('uuid.uuid4')
@mock.patch('time.sleep')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tabledata_insert_all')
def test_insert_data_dictlist_named_index(self,
mock_api_tabledata_insert_all,
mock_api_tables_get,
mock_api_tables_insert,
mock_api_tables_list,
mock_api_datasets_get,
mock_time_sleep, mock_uuid):
schema = TestCases._create_inferred_schema('Row')
mock_uuid.return_value = TestCases._create_uuid()
mock_time_sleep.return_value = None
mock_api_datasets_get.return_value = True
mock_api_tables_list.return_value = []
mock_api_tables_insert.return_value = {'selfLink': 'http://foo'}
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_insert_all.return_value = {}
table = TestCases._create_table_with_schema(schema)
result = table.insert_data([
{u'column': 'r0', u'headers': 10.0, u'some': 0},
{u'column': 'r1', u'headers': 10.0, u'some': 1},
{u'column': 'r2', u'headers': 10.0, u'some': 2},
{u'column': 'r3', u'headers': 10.0, u'some': 3}
], include_index=True, index_name='Row')
self.assertIsNotNone(result, "insert_all should return the table object")
mock_api_tabledata_insert_all.assert_called_with(('test', 'testds', 'testTable0', ''), [
{'insertId': '#0', 'json': {u'column': 'r0', u'headers': 10.0, u'some': 0, 'Row': 0}},
{'insertId': '#1', 'json': {u'column': 'r1', u'headers': 10.0, u'some': 1, 'Row': 1}},
{'insertId': '#2', 'json': {u'column': 'r2', u'headers': 10.0, u'some': 2, 'Row': 2}},
{'insertId': '#3', 'json': {u'column': 'r3', u'headers': 10.0, u'some': 3, 'Row': 3}}
])
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.jobs_insert_load')
@mock.patch('datalab.bigquery._api.Api.jobs_get')
def test_table_load(self, mock_api_jobs_get, mock_api_jobs_insert_load, mock_api_tables_get):
schema = TestCases._create_inferred_schema('Row')
mock_api_jobs_get.return_value = {'status': {'state': 'DONE'}}
mock_api_jobs_insert_load.return_value = None
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
job = tbl.load('gs://foo')
self.assertIsNone(job)
mock_api_jobs_insert_load.return_value = {'jobReference': {'jobId': 'bar'}}
job = tbl.load('gs://foo')
self.assertEquals('bar', job.id)
@mock.patch('datalab.bigquery._api.Api.table_extract')
@mock.patch('datalab.bigquery._api.Api.jobs_get')
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_table_extract(self, mock_api_tables_get, mock_api_jobs_get, mock_api_table_extract):
mock_api_tables_get.return_value = {}
mock_api_jobs_get.return_value = {'status': {'state': 'DONE'}}
mock_api_table_extract.return_value = None
tbl = datalab.bigquery.Table('testds.testTable0', context=self._create_context())
job = tbl.extract('gs://foo')
self.assertIsNone(job)
mock_api_table_extract.return_value = {'jobReference': {'jobId': 'bar'}}
job = tbl.extract('gs://foo')
self.assertEquals('bar', job.id)
@mock.patch('datalab.bigquery._api.Api.tabledata_list')
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_table_to_dataframe(self, mock_api_tables_get, mock_api_tabledata_list):
schema = self._create_inferred_schema()
mock_api_tables_get.return_value = {'schema': {'fields': schema}}
mock_api_tabledata_list.return_value = {
'rows': [
{'f': [{'v': 1}, {'v': 'foo'}, {'v': 3.1415}]},
{'f': [{'v': 2}, {'v': 'bar'}, {'v': 0.5}]},
]
}
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
df = tbl.to_dataframe()
self.assertEquals(2, len(df))
self.assertEquals(1, df['some'][0])
self.assertEquals(2, df['some'][1])
self.assertEquals('foo', df['column'][0])
self.assertEquals('bar', df['column'][1])
self.assertEquals(3.1415, df['headers'][0])
self.assertEquals(0.5, df['headers'][1])
def test_encode_dict_as_row(self):
when = dt.datetime(2001, 2, 3, 4, 5, 6, 7)
row = datalab.bigquery.Table._encode_dict_as_row({'fo@o': 'b@r', 'b+ar': when}, {})
self.assertEqual({'foo': 'b@r', 'bar': '2001-02-03T04:05:06.000007'}, row)
def test_decorators(self):
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
tbl2 = tbl.snapshot(dt.timedelta(hours=-1))
self.assertEquals('test:testds.testTable0@-3600000', str(tbl2))
with self.assertRaises(Exception) as error:
tbl2 = tbl2.snapshot(dt.timedelta(hours=-2))
self.assertEqual('Cannot use snapshot() on an already decorated table',
str(error.exception))
with self.assertRaises(Exception) as error:
tbl2.window(dt.timedelta(hours=-2), 0)
self.assertEqual('Cannot use window() on an already decorated table',
str(error.exception))
with self.assertRaises(Exception) as error:
tbl.snapshot(dt.timedelta(days=-8))
self.assertEqual(
'Invalid snapshot relative when argument: must be within 7 days: -8 days, 0:00:00',
str(error.exception))
with self.assertRaises(Exception) as error:
tbl.snapshot(dt.timedelta(days=-8))
self.assertEqual(
'Invalid snapshot relative when argument: must be within 7 days: -8 days, 0:00:00',
str(error.exception))
tbl2 = tbl.snapshot(dt.timedelta(days=-1))
self.assertEquals('test:testds.testTable0@-86400000', str(tbl2))
with self.assertRaises(Exception) as error:
tbl.snapshot(dt.timedelta(days=1))
self.assertEqual('Invalid snapshot relative when argument: 1 day, 0:00:00',
str(error.exception))
with self.assertRaises(Exception) as error:
tbl2 = tbl.snapshot(1000)
self.assertEqual('Invalid snapshot when argument type: 1000',
str(error.exception))
self.assertEquals('test:testds.testTable0@-86400000', str(tbl2))
when = dt.datetime.utcnow() + dt.timedelta(1)
with self.assertRaises(Exception) as error:
tbl.snapshot(when)
self.assertEqual('Invalid snapshot absolute when argument: %s' % when,
str(error.exception))
when = dt.datetime.utcnow() - dt.timedelta(8)
with self.assertRaises(Exception) as error:
tbl.snapshot(when)
self.assertEqual('Invalid snapshot absolute when argument: %s' % when,
str(error.exception))
def test_window_decorators(self):
# The at test above already tests many of the conversion cases. The extra things we
# have to test are that we can use two values, we get a meaningful default for the second
# if we pass None, and that the first time comes before the second.
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
tbl2 = tbl.window(dt.timedelta(hours=-1))
self.assertEquals('test:testds.testTable0@-3600000-0', str(tbl2))
with self.assertRaises(Exception) as error:
tbl2 = tbl2.window(-400000, 0)
self.assertEqual('Cannot use window() on an already decorated table',
str(error.exception))
with self.assertRaises(Exception) as error:
tbl2.snapshot(-400000)
self.assertEqual('Cannot use snapshot() on an already decorated table',
str(error.exception))
with self.assertRaises(Exception) as error:
tbl.window(dt.timedelta(0), dt.timedelta(hours=-1))
self.assertEqual(
'window: Between arguments: begin must be before end: 0:00:00, -1 day, 23:00:00',
str(error.exception))
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.table_update')
def test_table_update(self, mock_api_table_update, mock_api_tables_get):
schema = self._create_inferred_schema()
info = {'schema': {'fields': schema}, 'friendlyName': 'casper',
'description': 'ghostly logs',
'expirationTime': calendar.timegm(dt.datetime(2020, 1, 1).utctimetuple()) * 1000}
mock_api_tables_get.return_value = info
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
new_name = 'aziraphale'
new_description = 'demon duties'
new_schema = [{'name': 'injected', 'type': 'FLOAT'}]
new_schema.extend(schema)
new_expiry = dt.datetime(2030, 1, 1)
tbl.update(new_name, new_description, new_expiry, new_schema)
name, info = mock_api_table_update.call_args[0]
self.assertEqual(tbl.name, name)
self.assertEqual(new_name, tbl.metadata.friendly_name)
self.assertEqual(new_description, tbl.metadata.description)
self.assertEqual(new_expiry, tbl.metadata.expires_on)
self.assertEqual(len(new_schema), len(tbl.schema))
def test_table_to_query(self):
tbl = datalab.bigquery.Table('testds.testTable0', context=TestCases._create_context())
q = tbl.to_query()
self.assertEqual('SELECT * FROM [test:testds.testTable0]', q.sql)
q = tbl.to_query('foo, bar')
self.assertEqual('SELECT foo, bar FROM [test:testds.testTable0]', q.sql)
q = tbl.to_query(['bar', 'foo'])
self.assertEqual('SELECT bar,foo FROM [test:testds.testTable0]', q.sql)
@staticmethod
def _create_context():
project_id = 'test'
creds = AccessTokenCredentials('test_token', 'test_ua')
return datalab.context.Context(project_id, creds)
@staticmethod
def _create_table(name):
return datalab.bigquery.Table(name, TestCases._create_context())
@staticmethod
def _create_table_info_result(ts=None):
if ts is None:
ts = dt.datetime.utcnow()
epoch = dt.datetime.utcfromtimestamp(0)
timestamp = (ts - epoch).total_seconds() * 1000
return {
'description': 'Daily Logs Table',
'friendlyName': 'Logs',
'numBytes': 1000,
'numRows': 2,
'creationTime': timestamp,
'lastModifiedTime': timestamp,
'schema': {
'fields': [
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'val', 'type': 'INTEGER', 'mode': 'NULLABLE'}
]
}
}
@staticmethod
def _create_table_info_nested_schema_result(ts=None):
if ts is None:
ts = dt.datetime.utcnow()
epoch = dt.datetime.utcfromtimestamp(0)
timestamp = (ts - epoch).total_seconds() * 1000
return {
'description': 'Daily Logs Table',
'friendlyName': 'Logs',
'numBytes': 1000,
'numRows': 2,
'creationTime': timestamp,
'lastModifiedTime': timestamp,
'schema': {
'fields': [
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'val', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'more', 'type': 'RECORD', 'mode': 'REPEATED',
'fields': [
{'name': 'xyz', 'type': 'INTEGER', 'mode': 'NULLABLE'}
]
}
]
}
}
@staticmethod
def _create_dataset(dataset_id):
return datalab.bigquery.Dataset(dataset_id, context=TestCases._create_context())
@staticmethod
def _create_table_list_result():
return {
'tables': [
{
'type': 'TABLE',
'tableReference': {'projectId': 'test', 'datasetId': 'testds', 'tableId': 'testTable1'}
},
{
'type': 'VIEW',
'tableReference': {'projectId': 'test', 'datasetId': 'testds', 'tableId': 'testView1'}
},
{
'type': 'TABLE',
'tableReference': {'projectId': 'test', 'datasetId': 'testds', 'tableId': 'testTable2'}
}
]
}
@staticmethod
def _create_table_list_empty_result():
return {
'tables': []
}
@staticmethod
def _create_data_frame():
data = {
'some': [
0, 1, 2, 3
],
'column': [
'r0', 'r1', 'r2', 'r3'
],
'headers': [
10.0, 10.0, 10.0, 10.0
]
}
return pandas.DataFrame(data)
@staticmethod
def _create_inferred_schema(extra_field=None):
schema = [
{'name': 'some', 'type': 'INTEGER'},
{'name': 'column', 'type': 'STRING'},
{'name': 'headers', 'type': 'FLOAT'},
]
if extra_field:
schema.append({'name': extra_field, 'type': 'INTEGER'})
return schema
@staticmethod
def _create_table_with_schema(schema, name='test:testds.testTable0'):
return datalab.bigquery.Table(name, TestCases._create_context()).create(schema)
class _uuid(object):
@property
def hex(self):
return '#'
@staticmethod
def _create_uuid():
return TestCases._uuid()
| apache-2.0 |
jlandmann/oggm | oggm/tests/test_utils.py | 2 | 14316 | from __future__ import division
import warnings
warnings.filterwarnings("once", category=DeprecationWarning)
import unittest
import os
import time
import shutil
import salem
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal, assert_allclose
import oggm
from oggm import utils
from oggm import cfg
from oggm.tests import is_download
# Globals
TEST_DIR = os.path.join(cfg.PATHS['test_dir'], 'tmp_download')
utils.mkdir(TEST_DIR)
# In case some logging happens or so
cfg.PATHS['working_dir'] = cfg.PATHS['test_dir']
class TestFuncs(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_signchange(self):
ts = pd.Series([-2., -1., 1., 2., 3], index=np.arange(5))
sc = utils.signchange(ts)
assert_array_equal(sc, [0, 0, 1, 0, 0])
ts = pd.Series([-2., -1., 1., 2., 3][::-1], index=np.arange(5))
sc = utils.signchange(ts)
assert_array_equal(sc, [0, 0, 0, 1, 0])
def test_smooth(self):
a = np.array([1., 4, 7, 7, 4, 1])
b = utils.smooth1d(a, 3, kernel='mean')
assert_allclose(b, [3, 4, 6, 6, 4, 3])
kernel = [0.60653066, 1., 0.60653066]
b = utils.smooth1d(a, 3, kernel=kernel)
c = utils.smooth1d(a, 3)
assert_allclose(b, c)
def test_filter_rgi_name(self):
name = 'Tustumena Glacier \x9c'
expected = 'Tustumena Glacier'
self.assertTrue(utils.filter_rgi_name(name), expected)
name = 'Hintereisferner À'
expected = 'Hintereisferner'
self.assertTrue(utils.filter_rgi_name(name), expected)
name = 'SPECIAL GLACIER 3'
expected = 'Special Glacier'
self.assertTrue(utils.filter_rgi_name(name), expected)
def test_year_to_date(self):
r = utils.year_to_date(0)
self.assertEqual(r, (0, 1))
y, m = utils.year_to_date([0, 1])
np.testing.assert_array_equal(y, [0, 1])
np.testing.assert_array_equal(m, [1, 1])
y, m = utils.year_to_date([0.00001, 1.00001])
np.testing.assert_array_equal(y, [0, 1])
np.testing.assert_array_equal(m, [1, 1])
y, m = utils.year_to_date([0.99999, 1.99999])
np.testing.assert_array_equal(y, [0, 1])
np.testing.assert_array_equal(m, [12, 12])
yr = 1998 + cfg.CUMSEC_IN_MONTHS[2] / cfg.SEC_IN_YEAR
r = utils.year_to_date(yr)
self.assertEqual(r, (1998, 4))
yr = 1998 + (cfg.CUMSEC_IN_MONTHS[2] - 1) / cfg.SEC_IN_YEAR
r = utils.year_to_date(yr)
self.assertEqual(r, (1998, 3))
def test_date_to_year(self):
r = utils.date_to_year(0, 1)
self.assertEqual(r, 0)
r = utils.date_to_year(1, 1)
self.assertEqual(r, 1)
r = utils.date_to_year([0, 1], [1, 1])
np.testing.assert_array_equal(r, [0, 1])
yr = utils.date_to_year([1998, 1998], [6, 7])
y, m = utils.year_to_date(yr)
np.testing.assert_array_equal(y, [1998, 1998])
np.testing.assert_array_equal(m, [6, 7])
yr = utils.date_to_year([1998, 1998], [2, 3])
y, m = utils.year_to_date(yr)
np.testing.assert_array_equal(y, [1998, 1998])
np.testing.assert_array_equal(m, [2, 3])
time = pd.date_range('1/1/1800', periods=300*12, freq='MS')
yr = utils.date_to_year(time.year, time.month)
y, m = utils.year_to_date(yr)
np.testing.assert_array_equal(y, time.year)
np.testing.assert_array_equal(m, time.month)
myr = utils.monthly_timeseries(1800, 2099)
y, m = utils.year_to_date(myr)
np.testing.assert_array_equal(y, time.year)
np.testing.assert_array_equal(m, time.month)
myr = utils.monthly_timeseries(1800, ny=300)
y, m = utils.year_to_date(myr)
np.testing.assert_array_equal(y, time.year)
np.testing.assert_array_equal(m, time.month)
time = pd.period_range('0001-01', '3000-12', freq='M')
myr = utils.monthly_timeseries(1, 3000)
y, m = utils.year_to_date(myr)
np.testing.assert_array_equal(y, time.year)
np.testing.assert_array_equal(m, time.month)
with self.assertRaises(ValueError):
utils.monthly_timeseries(1)
class TestInitialize(unittest.TestCase):
def setUp(self):
cfg.initialize()
self.homedir = os.path.expanduser('~')
def test_defaults(self):
expected = os.path.join(self.homedir, 'OGGM_WORKING_DIRECTORY')
self.assertEqual(cfg.PATHS['working_dir'], expected)
def test_pathsetter(self):
cfg.PATHS['working_dir'] = os.path.join('~', 'my_OGGM_wd')
expected = os.path.join(self.homedir, 'my_OGGM_wd')
self.assertEqual(cfg.PATHS['working_dir'], expected)
class TestDataFiles(unittest.TestCase):
def setUp(self):
cfg.initialize()
cfg.PATHS['dl_cache_dir'] = os.path.join(TEST_DIR, 'dl_cache')
cfg.PATHS['working_dir'] = os.path.join(TEST_DIR, 'wd')
cfg.PATHS['tmp_dir'] = os.path.join(TEST_DIR, 'extract')
self.reset_dir()
def tearDown(self):
if os.path.exists(TEST_DIR):
shutil.rmtree(TEST_DIR)
def reset_dir(self):
if os.path.exists(TEST_DIR):
shutil.rmtree(TEST_DIR)
utils.mkdir(cfg.PATHS['dl_cache_dir'])
utils.mkdir(cfg.PATHS['working_dir'])
utils.mkdir(cfg.PATHS['tmp_dir'])
def test_download_demo_files(self):
f = utils.get_demo_file('Hintereisferner.shp')
self.assertTrue(os.path.exists(f))
sh = salem.read_shapefile(f)
self.assertTrue(hasattr(sh, 'geometry'))
# Data files
cfg.initialize()
lf, df = utils.get_wgms_files()
self.assertTrue(os.path.exists(lf))
lf = utils.get_glathida_file()
self.assertTrue(os.path.exists(lf))
def test_srtmzone(self):
z = utils.srtm_zone(lon_ex=[-112, -112], lat_ex=[57, 57])
self.assertTrue(len(z) == 1)
self.assertEqual('14_01', z[0])
z = utils.srtm_zone(lon_ex=[-72, -73], lat_ex=[-52, -53])
self.assertTrue(len(z) == 1)
self.assertEqual('22_23', z[0])
# Alps
ref = sorted(['39_04', '38_03', '38_04', '39_03'])
z = utils.srtm_zone(lon_ex=[6, 14], lat_ex=[41, 48])
self.assertTrue(len(z) == 4)
self.assertEqual(ref, z)
def test_asterzone(self):
z, u = utils.aster_zone(lon_ex=[137.5, 137.5],
lat_ex=[-72.5, -72.5])
self.assertTrue(len(z) == 1)
self.assertTrue(len(u) == 1)
self.assertEqual('S73E137', z[0])
self.assertEqual('S75E135', u[0])
z, u= utils.aster_zone(lon_ex=[-95.5, -95.5],
lat_ex=[30.5, 30.5])
self.assertTrue(len(z) == 1)
self.assertTrue(len(u) == 1)
self.assertEqual('N30W096', z[0])
self.assertEqual('N30W100', u[0])
z, u= utils.aster_zone(lon_ex=[-96.5, -95.5],
lat_ex=[30.5, 30.5])
self.assertTrue(len(z) == 2)
self.assertTrue(len(u) == 2)
self.assertEqual('N30W096', z[1])
self.assertEqual('N30W100', u[1])
self.assertEqual('N30W097', z[0])
self.assertEqual('N30W100', u[0])
def test_dem3_viewpano_zone(self):
test_loc = {'ISL': [-25., -12., 63., 67.], # Iceland
'SVALBARD': [10., 34., 76., 81.],
'JANMAYEN': [-10., -7., 70., 72.],
'FJ': [36., 66., 79., 82.], # Franz Josef Land
'FAR': [-8., -6., 61., 63.], # Faroer
'BEAR': [18., 20., 74., 75.], # Bear Island
'SHL': [-3., 0., 60., 61.], # Shetland
# Antarctica tiles as UTM zones, FILES ARE LARGE!!!!!
# '01-15': [-180., -91., -90, -60.],
# '16-30': [-91., -1., -90., -60.],
# '31-45': [-1., 89., -90., -60.],
# '46-60': [89., 189., -90., -60.],
# Greenland tiles
# 'GL-North': [-78., -11., 75., 84.],
# 'GL-West': [-68., -42., 64., 76.],
# 'GL-South': [-52., -40., 59., 64.],
# 'GL-East': [-42., -17., 64., 76.]
}
# special names
for key in test_loc:
z = utils.dem3_viewpano_zone([test_loc[key][0], test_loc[key][1]],
[test_loc[key][2], test_loc[key][3]])
self.assertTrue(len(z) == 1)
self.assertEqual(key, z[0])
# weird Antarctica tile
# z = utils.dem3_viewpano_zone([-91., -90.], [-72., -68.])
# self.assertTrue(len(z) == 1)
# self.assertEqual('SR15', z[0])
# normal tile
z = utils.dem3_viewpano_zone([-179., -178.], [65., 65.])
self.assertTrue(len(z) == 1)
self.assertEqual('Q01', z[0])
# normal tile
z = utils.dem3_viewpano_zone([107, 107], [69, 69])
self.assertTrue(len(z) == 1)
self.assertEqual('R48', z[0])
# Alps
ref = sorted(['K31', 'K32', 'K33', 'L31', 'L32',
'L33', 'M31', 'M32', 'M33'])
z = utils.dem3_viewpano_zone([6, 14], [41, 48])
self.assertTrue(len(z) == 9)
self.assertEqual(ref, z)
def test_lrufilecache(self):
f1 = os.path.join(TEST_DIR, 'f1.txt')
f2 = os.path.join(TEST_DIR, 'f2.txt')
f3 = os.path.join(TEST_DIR, 'f3.txt')
open(f1, 'a').close()
open(f2, 'a').close()
open(f3, 'a').close()
assert os.path.exists(f1)
lru = utils.LRUFileCache(maxsize=2)
lru.append(f1)
assert os.path.exists(f1)
lru.append(f2)
assert os.path.exists(f1)
lru.append(f3)
assert not os.path.exists(f1)
assert os.path.exists(f2)
lru.append(f2)
assert os.path.exists(f2)
open(f1, 'a').close()
lru = utils.LRUFileCache(l0=[f2, f3], maxsize=2)
assert os.path.exists(f1)
assert os.path.exists(f2)
assert os.path.exists(f3)
lru.append(f1)
assert os.path.exists(f1)
assert not os.path.exists(f2)
assert os.path.exists(f3)
def test_lruhandler(self):
self.reset_dir()
f1 = os.path.join(TEST_DIR, 'f1.txt')
f2 = os.path.join(TEST_DIR, 'f2.txt')
f3 = os.path.join(TEST_DIR, 'f3.txt')
open(f1, 'a').close()
time.sleep(0.1)
open(f2, 'a').close()
time.sleep(0.1)
open(f3, 'a').close()
l = cfg.get_lru_handler(TEST_DIR, maxsize=3, ending='.txt')
assert os.path.exists(f1)
assert os.path.exists(f2)
assert os.path.exists(f3)
l = cfg.get_lru_handler(TEST_DIR, maxsize=2, ending='.txt')
assert not os.path.exists(f1)
assert os.path.exists(f2)
assert os.path.exists(f3)
@is_download
def test_srtmdownload(self):
# this zone does exist and file should be small enough for download
zone = '68_11'
fp = utils._download_srtm_file(zone)
self.assertTrue(os.path.exists(fp))
fp = utils._download_srtm_file(zone)
self.assertTrue(os.path.exists(fp))
@is_download
def test_srtmdownloadfails(self):
# this zone does not exist
zone = '41_20'
self.assertTrue(utils._download_srtm_file(zone) is None)
@is_download
def test_asterdownload(self):
# this zone does exist and file should be small enough for download
zone = 'S73E137'
unit = 'S75E135'
fp = utils._download_aster_file(zone, unit)
self.assertTrue(os.path.exists(fp))
@is_download
def test_gimp(self):
fp, z = utils.get_topo_file([], [], rgi_region=5)
self.assertTrue(os.path.exists(fp[0]))
self.assertEqual(z, 'GIMP')
@is_download
def test_iceland(self):
fp, z = utils.get_topo_file([-20, -20], [65, 65])
self.assertTrue(os.path.exists(fp[0]))
@is_download
def test_asterdownloadfails(self):
# this zone does not exist
zone = 'bli'
unit = 'S75E135'
self.assertTrue(utils._download_aster_file(zone, unit) is None)
@is_download
def test_alternatedownload(self):
# this is a simple file
fp = utils._download_alternate_topo_file('iceland.tif')
self.assertTrue(os.path.exists(fp))
@is_download
def test_download_cru(self):
tmp = cfg.PATHS['cru_dir']
cfg.PATHS['cru_dir'] = os.path.join(TEST_DIR, 'cru_extract')
of = utils.get_cru_file('tmp')
self.assertTrue(os.path.exists(of))
cfg.PATHS['cru_dir'] = tmp
@is_download
def test_download_rgi(self):
tmp = cfg.PATHS['rgi_dir']
cfg.PATHS['rgi_dir'] = os.path.join(TEST_DIR, 'rgi_extract')
of = utils.get_rgi_dir()
of = os.path.join(of, '01_rgi50_Alaska', '01_rgi50_Alaska.shp')
self.assertTrue(os.path.exists(of))
cfg.PATHS['rgi_dir'] = tmp
@is_download
def test_download_dem3_viewpano(self):
# this zone does exist and file should be small enough for download
zone = 'L32'
fp = utils._download_dem3_viewpano(zone)
self.assertTrue(os.path.exists(fp))
zone = 'U44'
fp = utils._download_dem3_viewpano(zone)
self.assertTrue(os.path.exists(fp))
@is_download
def test_download_dem3_viewpano_fails(self):
# this zone does not exist
zone = 'dummy'
fp = utils._download_dem3_viewpano(zone)
self.assertTrue(fp is None)
@is_download
def test_auto_topo(self):
# Test for combine
fdem, src = utils.get_topo_file([6, 14], [41, 41])
self.assertEqual(src, 'SRTM')
self.assertEqual(len(fdem), 2)
for fp in fdem:
self.assertTrue(os.path.exists(fp))
fdem, src = utils.get_topo_file([-143, -131], [61, 61])
self.assertEqual(src, 'DEM3')
self.assertEqual(len(fdem), 3)
for fp in fdem:
self.assertTrue(os.path.exists(fp))
| gpl-3.0 |
johnmgregoire/NanoCalorimetry | PnSC_dataimport.py | 1 | 24610 | import pylab
import matplotlib.cm as cm
import numpy
import h5py
import os, os.path, time, copy
import struct
from PnSC_ui import *
from PnSC_math import *
from PnSC_h5io import *
def getemptybatchattrdict():
batchattrdict={}
for k in ['grpname', 'protname','path','durSpinBox','nnoiseSpinBox', 'naboveSpinBox', 'nsigSpinBox', 'firstderptsSpinBox','secderptsSpinBox','secdervalSpinBox','savegrpname']:
batchattrdict[k]=None
return batchattrdict
def FileImport(parent, protocolname, batchattrdict=None):
if 'PatDAQ' in protocolname:
fn='.dat'
ms='Select .dat text file'
else:
fn='.dat'
ms='Select a data file. If >1 cycle, choose file for only one cycle'
if batchattrdict is None:
p=mygetopenfile(parent=parent, markstr=ms, filename=fn)
else:
p=batchattrdict['path']
if p=='':
return False
ans=FileFormatFunctionLibrary[protocolname](parent, p, batchattrdict=batchattrdict)
if not ans:
return ans
AttrDict, DataSetDict, SegmentData=ans
AttrDict['importpath']=str(p)
AttrDict['protocolname']=protocolname
if batchattrdict is None:
if not importcheck(parent, AttrDict, title='Attributes of the Heat Program'):
return False
for nam, (d, arr) in DataSetDict.iteritems():
if not importcheck(parent, d, arr=arr, title='Dataset %s' %nam):
return False
else:
for k in AttrDict.keys():
if k in batchattrdict.keys():
print 'replacing AttrDict key %s from %s to %s' %(k, `AttrDict[k]`, `batchattrdict[k]`)
AttrDict[k]=copy.copy(batchattrdict[k])
return AttrDict, DataSetDict, SegmentData
def importcheck(parent, AttrDict, arr=None, title=''):#AttrDict is a pointer to a dictionary that may be changed
repeat=True
count=0
while repeat:
if count==1:
title='PLEASE CONFIRM CHANGES! '+title
idialog=attreditorDialog(parent, AttrDict, arr=arr, title=title)
if not idialog.exec_():
return False
if idialog.edited:
for k, v in idialog.attrd.iteritems():
AttrDict[k]=v
else:
repeat=False
count+=1
return True
def checkfornegvoltage(arr):
x=arr.flatten()
pos=len(numpy.where(x>0.)[0])
neg=len(numpy.where(x<0.)[0])
flip=neg>(1.1*pos)
return -2*flip+1
def nanhandler(s):
if ('NaN' in s):
return numpy.nan
try:
return eval(s)
except:
return numpy.nan
def truncate_arrlist_shortest(arrlist):
ln=numpy.uint32([arr.shape for arr in arrlist]).T
ind=(numpy.uint32(range(l.min())) for l in ln)
for i, l in enumerate(ln):
l=l.min()
arrlist=[arr.take(range(l), axis=i) for arr in arrlist]
return numpy.array(arrlist)
def JimDAQ_getcell(filepath):
folderpath, fn=os.path.split(filepath)
while len(fn)>0 and not (fn[0].isdigit() and fn[0]!='0'): #skip any leading zeros to avoid eval problem
fn=fn[1:]
s=''
while len(fn)>0 and fn[0].isdigit():
s+=fn[0]
fn=fn[1:]
try:
return eval(s)
except:
return 0
#The data reading functions should return 3 things, 0: a dictionary containing the attr for the heat program group, 1: a dictionary with key=datasetname, and val=tuple with 0th element an attr dict and 1st element the array, 2:(ms array, mA array) for segments
def JimDAQ_SC(parent, filepath, batchattrdict=None):
dlist, arrlist=JimDAQ_fileiterator(filepath)
arr=truncate_arrlist_shortest(arrlist)
print arr.shape
d=dlist[0]
d['daqHz']=100000.
d['ncycles']=len(dlist)
ds={}
ds['samplecurrent']=({'Aunit':0.001}, arr[:, 0, :])
ds['samplevoltage']=({'Vunit':checkfornegvoltage(arr[:, 3, :])*0.001}, arr[:, 3, :])
d['CELLNUMBER']=JimDAQ_getcell(filepath)
d['ambient_atmosphere']='vacuum'
d['ambient_tempC']=20.
return d, ds, [[], []]
def JimDAQ_fileiterator(filepath):#filepath is a .dat file from any cycle
folderpath, filename=os.path.split(filepath)
a, c=os.path.splitext(filename)
a, b, n=a.rpartition('_of_')
a, atemp, i=a.rpartition('_')
a+=atemp
i=eval(i)
n=eval(n)
filelist=os.listdir(folderpath)
dlist=[]
arrlist=[]
for cnum in range(1, n+1):
fn='%s%d%s%d%s' %(a, cnum, b, n, c)
if fn in filelist:
p=os.path.join(folderpath, fn)
print 'reading: ', p
t1, t2=readdat_JimDAQ(p)
dlist+=[t1]
arrlist+=[t2]
return dlist, arrlist
def readdat_JimDAQ(path):
d={}
f=open(path, mode='r')
lines=f.readlines()
f.close()
a, b, c=lines[1].partition(':')
d['epoch']=time.mktime(time.strptime(c.strip(),'%a, %m/%d/%Y %I:%M:%S %p'))
a, b, c=lines[0].partition(':')
d['operator']=c.strip()
v=[]
t=[]
for i, l in enumerate(lines[11:]):
t=[]
l=l.strip()
while len(l)>0:
a, b, l=l.partition('\t')
t+=[a]
v+=[[eval(x) for x in t]]
return d, numpy.float32(v).T
def JimDAQ2011_SC(parent, filepath, batchattrdict=None):
dlist, arrlist=JimDAQ2011_fileiterator(filepath)
arr=truncate_arrlist_shortest(arrlist)
d=JimDAQ2011_translateheader(dlist[0])
d['ncycles']=len(dlist)
ds={}
ds['samplecurrent']=({'Aunit':0.001}, arr[:, :, 0])
ds['samplevoltage']=({'Vunit':checkfornegvoltage(arr[:, :, 3])*0.001}, arr[:, :, 3])
d['CELLNUMBER']=JimDAQ_getcell(filepath)
return d, ds, [[], []]
def JimDAQ2011_DSC(parent, filepath, batchattrdict=None):
dlist, arrlist=JimDAQ2011_fileiterator(filepath)
arr=truncate_arrlist_shortest(arrlist)
d=JimDAQ2011_translateheader(dlist[0])
d['ncycles']=len(dlist)
ds={}
ds['samplecurrent']=({'Aunit':0.001}, arr[:, :, 0])
ds['samplevoltage']=({'Vunit':checkfornegvoltage(arr[:, :, 3])*0.001}, arr[:, :, 3])
ds['refcurrent']=({'Aunit':0.001}, arr[:, :, 1])
ds['refvoltage']=({'Vunit':checkfornegvoltage(arr[:, :, 3])*0.001}, arr[:, :, 2])
d['CELLNUMBER']=JimDAQ_getcell(filepath)
return d, ds, [[], []]
def JimDAQ2011_acSC(parent, filepath, batchattrdict=None):
dlist, arrlist=JimDAQ2011_fileiterator(filepath)
arr=truncate_arrlist_shortest(arrlist)
print arr.shape
d=JimDAQ2011_translateheader(dlist[0])
d['ncycles']=len(dlist)
if not 'pts_sincycle' in d.keys():
d['pts_sincycle']=30.
ds={}
ds['samplecurrent']=({'Aunit':0.001}, arr[:, :, 0])
ds['samplevoltage']=({'Vunit':checkfornegvoltage(arr[:, :, 3])*0.001}, arr[:, :, 3])
ds['samplefilteredvoltage']=({'Vunit':0.001}, arr[:, :, 4])
d['CELLNUMBER']=JimDAQ_getcell(filepath)
return d, ds, [[], []]
def JimDAQ2011_translateheader(d):
dummyfcn=lambda x:x
key_headerkey_fcn_dflt=[\
('operator', 'name', dummyfcn,''),\
('epoch', 'date', lambda c:time.mktime(time.strptime(c.strip(),'%a, %m/%d/%Y %I:%M:%S %p')), 0), \
('ambient_tempC', 'furnace temp (C)', dummyfcn, 20.),\
('ambient_atmosphere', 'atmosphere', dummyfcn, 'vacuum'),\
('daqHz', 'daqtime_us', lambda c:1.e6/c, 301204.8), \
]
for k, hk, f, dflt in key_headerkey_fcn_dflt:
if hk in d.keys():
temp=d[hk]
del d[hk]
d[k]=f(temp)
else:
d[k]=dflt
return d
def JimDAQ2011_fileiterator(filepath):#filepath is a .dat file from any cycle
folderpath, filename=os.path.split(filepath)
a, c=os.path.splitext(filename)
a, b, n=a.rpartition('_of_')
a, atemp, i=a.rpartition('_')
a+=atemp
i=eval(i)
n=eval(n)
filelist=os.listdir(folderpath)
dlist=[]
arrlist=[]
for cnum in range(1, n+1):
fn='%s%d%s%d%s' %(a, cnum, b, n, c)
if fn in filelist:
p=os.path.join(folderpath, fn)
print 'reading: ', p
t1, t2=readdat_JimDAQ2011(p)
dlist+=[t1]
arrlist+=[t2]
return dlist, arrlist
def CHESSDAQ2011(parent, filepath, batchattrdict=None):
dlist, arrlist=CHESSDAQ2011_fileiterator(filepath)
arr=truncate_arrlist_shortest(arrlist)
print arr.shape
d=CHESSDAQ2011_translateheader(dlist[0])
d['ncycles']=len(dlist)
ds={}
ds['samplevoltage']=({'Vunit':checkfornegvoltage(arr[:, 0, :])*0.001}, arr[:, 0, :])
ds['samplecurrent']=({'Aunit':0.001}, arr[:, 1, :])
if arr.shape[1]==3:
ds['samplefilteredvoltage']=({'Vunit':0.001}, arr[:, 2, :])
if not 'pts_sincycle' in d.keys():
d['pts_sincycle']=30.
d['CELLNUMBER']=JimDAQ_getcell(filepath)
return d, ds, [[], []]
def readdat_CHESSDAQ2011(path, startofheader=':header_start:', endofheader=':header_end:', startofdata=':data_start:', endofdata=':data_end:'):
#read all keyword attributes and any in the comments section
f=open(path, 'rb')
bdata=f.read()
f.close()
headstr, garb, datasection=bdata.partition(endofheader)
def attemptnumericconversion(s):
if (s.replace('.', '', 1).replace('e', '', 1).replace('+', '', 1).replace('-', '', 1)).isalnum():
try:
return eval(s)
except:
pass
return s
def interpretline_header(a, d):
if a.startswith(':coms:'):
b, garb, c=a[1:].partition(':')
i=0
while i<len(c)-1:
j=c.find(':', i)
k=c.find(':', i+j+1)
if j<0 or k<0:
break
i=c.find(':', k+1)
if i<0:
i=len(c)
d[c[j+1:k]]=attemptnumericconversion((c[k+1:i]).strip())
elif a.startswith(':'):
b, garb, c=a[1:].partition(':')
if ':' in c and not 'date' in b:
print c
c, col, aa=c.partition(':')
print c, '***', col, '***', aa
interpretline_header(col+aa, d)
d[b]=attemptnumericconversion(c.strip())
garb, garb, headstr=headstr.partition(startofheader)
d={}
while len(headstr)>0:
a, garb, headstr=headstr.partition('\n')
a=a.strip()
interpretline_header(a, d)
fullscalelist=listeval(d['fullscale_fields'])
rangelist=listeval(d['NIai_mVrange'])
nchan=len(fullscalelist)
garb, garb, datasection=datasection.partition(startofdata)
datastr, garb, datasection=datasection.partition(endofdata)
if len(datastr)%nchan>0:
print 'cut off some data because the shape of the binary data string was not understood'
datastr=datastr[:-1*(len(datastr)%nchan)]
z=[struct.unpack('>f',datastr[i:i+4])[0] for i in range(0,len(datastr),4)] # '>d' is big-endian double float, Labview uses big-endian
z=numpy.reshape(numpy.float32(z), (nchan, (len(z)//nchan)))
z=numpy.float32([za/(rng*0.001)*fs for za, fs, rng in zip(z, fullscalelist, rangelist)])
return d, z
def listeval(c):
returnlist=[]
if '\t' in c:
delim='\t'
else:
delim=' '
while len(c)>0:
a, garb, c=c.partition(delim)
a=a.strip()
c=c.strip()
a=a.lstrip('0').rstrip('.')
a=(a=='' and (0,) or (eval(a),))[0]
returnlist+=[a]
return returnlist
def CHESSDAQ2011_translateheader(d):
dummyfcn=lambda x:x
key_headerkey_fcn_dflt=[\
('operator', 'name', dummyfcn,''),\
('epoch', 'date', lambda c:time.mktime(time.strptime(c.strip(),'%a, %m/%d/%Y %I:%M:%S %p')), 0), \
('ambient_atmosphere', 'atmosphere', dummyfcn, 'vacuum'),\
('pts_sincycle', 'writepts_sincycle', lambda n: n*d['writeHz']/d['daqHz'], None), \
('spec', 'specscan', lambda n: n, None), \
]
for k, hk, f, dflt in key_headerkey_fcn_dflt:
if hk in d.keys():
temp=d[hk]
del d[hk]
d[k]=f(temp)
elif not dflt is None:
d[k]=dflt
return d
def CHESSDAQ2011_fileiterator(filepath):#filepath is a .dat file from any cycle
folderpath, filename=os.path.split(filepath)
a, c=os.path.splitext(filename)
a, b, n=a.rpartition('_of_')
a, atemp, i=a.rpartition('_')
a+=atemp
i=eval(i)
n=eval(n)
filelist=os.listdir(folderpath)
dlist=[]
arrlist=[]
for cnum in range(1, n+1):
fn='%s%d%s%d%s' %(a, cnum, b, n, c)
if fn in filelist:
p=os.path.join(folderpath, fn)
print 'reading: ', p
t1, t2=readdat_CHESSDAQ2011(p)
dlist+=[t1]
arrlist+=[t2]
return dlist, arrlist
def uint16tofloat32(x, offsetbinary=True, posfullscale=1.):#a negative posfullscale value will invert
if not offsetbinary:
x+=32768
return numpy.float32(posfullscale*(x/32768.-1.))
def readdat_JimDAQ2011(path, startofheader=':header_start:', endofheader=':header_end:\r\n'):
#startofheader does not have to be start of file but endofheader has to include the character that is just before binary uint16 data
#read all keyword attributes and any in the comments section
f=open(path, 'rb')
bdata=f.read()
f.close()
headstr, garb, uintdata=bdata.partition(endofheader)
if len(uintdata)%8>0:
uintdata=uintdata[:-1*(len(uintdata)%8)]
z=[struct.unpack('>H',uintdata[i:i+2]) for i in range(0,len(uintdata),2)] # '>8' is big-endian uint16, Labview uses big-endian
z=numpy.reshape(numpy.uint16(z),(len(z)//8,8))
def attemptnumericconversion(s):
if (s.replace('.', '', 1).replace('e', '', 1).replace('+', '', 1).replace('-', '', 1)).isalnum():
try:
return eval(s)
except:
pass
return s
garb, garb, headstr=headstr.partition(startofheader)
d={}
while len(headstr)>0:
a, garb, headstr=headstr.partition('\n')
a=a.strip()
if a.startswith(':'):
b, garb, c=a[1:].partition(':')
d[b]=attemptnumericconversion(c.strip())
if a.startswith(':coms:'):
b, garb, c=a[1:].partition(':')
i=0
while i<len(c)-1:
j=c.find(':', i)
k=c.find(':', i+j+1)
if j<0 or k<0:
break
i=c.find(':', k+1)
if i<0:
i=len(c)-1
d[c[j+1:k]]=attemptnumericconversion((c[k+1:i]).strip())
fullscalelist=[]
c=d['fullscale_fields']
if '\t' in c:
delim='\t'
else:
delim=' '
while len(c)>0:
a, garb, c=c.partition(delim)
a=a.strip()
c=c.strip()
a=a.lstrip('0').rstrip('.')
a=(a=='' and (0,) or (eval(a),))[0]
fullscalelist+=[a]
z=numpy.float32([uint16tofloat32(z[:, i], offsetbinary=ziz, posfullscale=fullsc) for i, (ziz, fullsc) in enumerate(zip([0, 0, 0, 0, 0, 1, 1, 0], fullscalelist))]).T
return d, z
def PatDAQ_filenamedecode(filepath):
folderpath, filename=os.path.split(filepath)
fn=filename.lower()
d={}
d['CELLNUMBER']=0
d['ambient_atmosphere']='vacuum'
SegmentData=([], [])
if 'cell' in fn:
a, b, c=fn.partition('cell')
n=''
c=c.strip()
while len(c)>0 and c[0].isdigit():
n+=c[0]
c=c[1:]
c=c.strip()
try:
n=eval(n.lstrip('0'))
d['CELLNUMBER']=n
except:
pass
fn=a+c
if 'mt' in fn:
underscoreallowed=True
a, b, c=fn.partition('mt')
n=''
a=a.strip()
while len(a)>0 and (a[-1].isdigit() or (underscoreallowed and a[-1]=='_')):
if a[-1].isdigit():
n=a[-1]+n
else:
underscoreallowed=False #so only one underscore can be deleted
a=a[:-1]
a=a.strip()
d['ambient_atmosphere']=str(n+'mT')
for temp in ['He', 'N2', 'Ar', 'H2', 'CO2', 'O2']:
if temp in filename:
d['ambient_atmosphere']+=str(' '+temp)
fn=a+c
if 'c' in fn:#this is for the number of cycles, just remove this character and any neighboring numbers
a, b, c=fn.partition('c')
a=a.strip()
while len(a)>0 and a[-1].isdigit():
a=a[:-1]
a=a.strip()
c=c.strip()
while len(c)>0 and c[0].isdigit():
c=c[0]
c=c.strip()
fn=fn.partition('_')[2]
ma=[]
ms=[]
if 'ma' in fn:
a, b, c=fn.rpartition('ma')
ma=PatDAQ_extractlist(a.replace('ma', ''))
if 'ms' in fn:
a, b, c=fn.rpartition('ms')
ms=PatDAQ_extractlist(a.replace('ms', ''))
if len(ma)>2 and len(ms)>2:
sma=[]
sms=[]
totms=None
for mav, msv in zip(ma, ms):
sma+=[mav, mav]
if totms is None:
totms=0.
sms+=[totms]
else:
sms+=[totms+0.01]
totms+=msv
sms+=[totms]
SegmentData=(numpy.float32(sms), numpy.float32(sma))
durationguess=SegmentData[0].max()
# elif len(ms)==2 and len(ma)==1:
# durationguess=max(ms)
elif len(ms)>0:
durationguess=max(ms)
else:
durationguess=None
return d, SegmentData, durationguess
def PatDAQ_extractlist(s):
nlist=[]
c=''
while len(s)>0 and (s[-1].isdigit() or s[-1]=='_'):
c=s[-1]+c
s=s[:-1]
s=s.strip()
while len(c)>0:
a, b, c=c.partition('_')
nlist+=[a]
return [eval(n) for n in nlist if len(n)>0 and not (False in [nc.isdigit() for nc in n])]
#print PatDAQ_filenamedecode('c:/2010Nov27_Cell5_1mA_50ms_500ms_Ro_10C.dat')
def readdat_PatDAQ(path):
f=open(path, mode='r')
lines=f.readlines()
f.close()
v=[]
t=[]
for i, l in enumerate(lines):
t=[]
l=l.strip().strip('\t')
while len(l)>0:
a, b, l=l.partition('\t')
t+=[a]
try:
v+=[[eval(x) for x in t]]
except:
print 'Error evaluating ', t
return numpy.float32(v).T
def PatDAQ_SC(parent, filepath, batchattrdict=None):
d, SegmentData, durationguess=PatDAQ_filenamedecode(filepath)
arr=readdat_PatDAQ(filepath)
d['daqHz']=100000.
d['operator']=''
d['epoch']=os.path.getmtime(filepath)
if durationguess is None:
durationguess=arr.shape[1]
else:
durationguess*=d['daqHz']/1000.#durationguess from filename is in ms
if 'ncycles' in d.keys() and not (d['ncycles'] is None):
durationguess=int(round(1.*arr.shape[1]/d['ncycles']))
if batchattrdict is None:
idialog=PatDAQCycleEditor(parent, arr[0], durationguess, os.path.split(filepath)[1])
if not idialog.exec_():
return False
else:
if 'ncycles' in batchattrdict.keys() and not (batchattrdict['ncycles'] is None):
durationguess=int(round(1.*arr.shape[1]/batchattrdict['ncycles']))
idialog=PatDAQCycleEditor(parent, arr[0], durationguess, os.path.split(filepath)[1])
for sb, k in [(idialog.durSpinBox, 'durSpinBox'), (idialog.nnoiseSpinBox, 'nnoiseSpinBox'), (idialog.nsigSpinBox, 'nsigSpinBox'), (idialog.naboveSpinBox, 'naboveSpinBox')]:
if k in batchattrdict and not (batchattrdict[k] is None):
sb.setValue(batchattrdict[k])
idialog.calccycles()
idialog.ExitRoutine()
temp=idialog.partition_array_triggers(arr[0])
temp2=idialog.partition_array_triggers(arr[1])
if temp is None or temp2 is None:
QMessageBox.warning(self,"FAILED", "ABORTED due to failure in partitioning data into cycles")
return False
mAcycles=temp[0]
Vcycles=temp2[0]
d['ncycles']=mAcycles.shape[0]
ds={}
ds['samplecurrent']=({'Aunit':0.01}, mAcycles[:, :])
ds['samplevoltage']=({'Vunit':1.}, Vcycles[:, :])
d['ambient_tempC']=20.
return d, ds, SegmentData
def readdat_PatDAQ2011(path):
f=open(path, mode='r')
lines=f.readlines()
f.close()
def evalrow(l):
t=[]
l=l.strip().strip('\t')
while len(l)>0:
a, b, l=l.partition('\t')
t+=[a]
try:
v=numpy.array([eval(x) for x in t])
except:
v=t
if len(v)==1:
return v[0]
else:
return v
v=[]
d={}
for i, l in enumerate(lines):
if l.startswith('%'):
k, garb, s=l.partition('%')[2].partition(':')
if len(k)>0 and len(s)>0:
d[k]=evalrow(s)
else:
l=l.strip().strip('\t')
if len(l)>0:
v+=[evalrow(l)]
return d, numpy.float32(v).T
def PatDAQ2011_SC(parent, filepath, batchattrdict=None):
durationguess=None
#SegmentData=?
d, arr=readdat_PatDAQ2011(filepath)
if not 'daqHz' in d.keys():
d['daqHz']=100000.
if not 'operator' in d.keys():
d['operator']=''
d['epoch']=os.path.getmtime(filepath)
if durationguess is None:
durationguess=arr.shape[1]
else:
durationguess*=d['daqHz']/1000.#durationguess from filename is in ms
if 'ncycles' in d.keys() and not (d['ncycles'] is None):
durationguess=int(round(1.*arr.shape[1]/d['ncycles']))
if batchattrdict is None:
idialog=PatDAQCycleEditor(parent, arr[0], durationguess, os.path.split(filepath)[1])
if not idialog.exec_():
return False
else:
if 'ncycles' in batchattrdict.keys() and not (batchattrdict['ncycles'] is None):
durationguess=int(round(1.*arr.shape[1]/batchattrdict['ncycles']))
idialog=PatDAQCycleEditor(parent, arr[0], durationguess, os.path.split(filepath)[1])
for sb, k in [(idialog.durSpinBox, 'durSpinBox'), (idialog.nnoiseSpinBox, 'nnoiseSpinBox'), (idialog.nsigSpinBox, 'nsigSpinBox'), (idialog.naboveSpinBox, 'naboveSpinBox')]:
if k in batchattrdict and not (batchattrdict[k] is None):
sb.setValue(batchattrdict[k])
idialog.calccycles()
idialog.ExitRoutine()
temp=idialog.partition_array_triggers(arr[0])
temp2=idialog.partition_array_triggers(arr[1])
if temp is None or temp2 is None:
QMessageBox.warning(self,"FAILED", "ABORTED due to failure in partitioning data into cycles")
return False
mAcycles=temp[0]
Vcycles=temp2[0]
d['ncycles']=mAcycles.shape[0]
ds={}
ds['samplecurrent']=({'Aunit':0.01}, mAcycles[:, :])
ds['samplevoltage']=({'Vunit':1.}, Vcycles[:, :])
d['ambient_tempC']=20.
return d, ds, SegmentData
FileFormatFunctionLibrary={\
'CHESSDAQ2011':CHESSDAQ2011, \
'JimDAQ2011_acSC':JimDAQ2011_acSC, \
'PatDAQ_SC':PatDAQ_SC, \
'PatDAQ2011_SC':PatDAQ2011_SC, \
'JimDAQ_SC':JimDAQ_SC, \
'JimDAQ2011_SC':JimDAQ2011_SC, \
'JimDAQ2011_DSC':JimDAQ2011_DSC, \
}
#p='C:/Users/JohnnyG/Documents/HarvardWork/ACcal/20110708_initACtests/cell29_10Ohm_10mAdc_9mA10kHz_9kHz11kHzfilter_wTtokeithley_1_of_1.dat'
#dlist, arrlist=JimDAQ2011_fileiterator(p)
def xreaddat_CHESSDAQ2011(path, startofheader=':header_start:', endofheader=':header_end:', startofdata=':data_start:', endofdata=':data_end:'):
#read all keyword attributes and any in the comments section
f=open(path, 'rb')
bdata=f.read()
f.close()
headstr, garb, datasection=bdata.partition(endofheader)
def attemptnumericconversion(s):
if (s.replace('.', '', 1).replace('e', '', 1).replace('+', '', 1).replace('-', '', 1)).isalnum():
try:
return eval(s)
except:
pass
return s
fullscalelist=[1, 1, 1]
rangelist=[1, 1, 1]
nchan=len(fullscalelist)
garb, garb, datasection=datasection.partition(startofdata)
datastr, garb, datasection=datasection.partition(endofdata)
if len(datastr)%nchan>0:
datastr=datastr[:-1*(len(datastr)%nchan)]
z=[struct.unpack('>f',datastr[i:i+4])[0] for i in range(0,len(datastr),4)] # '>d' is big-endian double float, Labview uses big-endian
z=numpy.reshape(numpy.float32(z), (nchan, (len(z)//nchan)))
z=numpy.float32([za/(rng*0.001)*fs for za, fs, rng in zip(z, fullscalelist, rangelist)])
return z
#p='E:/testchessdaq/a3chan_1_of_1.dat'
#a=xreaddat_CHESSDAQ2011(p)
| bsd-3-clause |
Morisset/pySSN | pyssn/qt/pyssn_qt.py | 1 | 220441 | """
This is the window manager part of pySSN
pySSN is available under the GNU licence providing you cite the developpers names:
Ch. Morisset (Instituto de Astronomia, Universidad Nacional Autonoma de Mexico)
D. Pequignot (Meudon Observatory, France)
Inspired by a demo code by:
Eli Bendersky ([email protected])
"""
import sys, os
from PyQt4 import QtCore, QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
from pyssn import log_, __version__
from ..core.spectrum import spectrum
from ..utils.misc import get_parser
from collections import OrderedDict
from ..utils.physics import CST
log_.level = 4
#ToDo :
class NavigationToolbar( NavigationToolbar2QT ):
curs = QtCore.pyqtSignal(bool)
def __init__(self, canvas, parent ):
NavigationToolbar2QT.__init__(self,canvas,parent)
self.clearButtons=[]
# Search through existing buttons
# next use for placement of custom button
next=None
for c in self.findChildren(QtGui.QToolButton):
if next is None:
next=c
# Don't want to see subplots and customize
"""
if str(c.text()) in ('Subplots', 'Customize'):
c.defaultAction().setVisible(False)
continue
"""
# Need to keep track of pan and zoom buttons
# Also grab toggled event to clear checked status of picker button
if str(c.text()) in ('Pan','Zoom'):
c.toggled.connect(self.clearCurs)
self.clearButtons.append(c)
next=None
# create custom button
pm=QtGui.QPixmap(32,32)
pm.fill(QtGui.QApplication.palette().color(QtGui.QPalette.Normal,QtGui.QPalette.Button))
painter=QtGui.QPainter(pm)
painter.fillRect(6,6,20,20,QtCore.Qt.red)
painter.fillRect(15,3,3,26,QtCore.Qt.blue)
painter.fillRect(3,15,26,3,QtCore.Qt.blue)
painter.end()
icon=QtGui.QIcon(pm)
ac = self.addAction(icon, "Toggle Curs")
ac.setCheckable(True)
#Ver como inicializar
#ac.setChecked(True)
ac.toggled.connect(self.curs_toggle)
self.ac = ac
#button=QtGui.QToolButton(self)
#button.setDefaultAction(self.ac)
# Add it to the toolbar, and connect up event
#self.insertWidget(next.defaultAction(),button)
# Grab the picked event from the canvas
canvas.mpl_connect('pick_event',self.canvasPicked)
def clearCurs(self, checked):
if checked:
self.ac.setChecked(False)
def curs_toggle(self, checked):
self.curs.emit(checked)
def canvasPicked(self, event):
if self.ac.isChecked():
self.curs.emit(event.ind)
class AppForm(QtGui.QMainWindow):
def __init__(self, parent=None, init_filename=None, post_proc_file=None, use_workspace=False):
self.calling = 'pySSN GUI'
self.use_workspace = use_workspace
QtGui.QMainWindow.__init__(self, parent)
self.setWindowTitle('pySSN')
self.sp = None
self.axes = None
self.axes2 = None
self.axes3 = None
self.fig = None
self.init_file_name = init_filename
self.init_line_num = None
self.init_ion = None
self.init_xmin = None
self.init_xmax = None
self.init_y1min = None
self.init_y1max = None
self.init_y3min = None
self.init_y3max = None
self.init_legend_fontsize = None
self.init_legend_loc = None
self.init_nearby_line_num = None
self.init_nearby_ion = None
self.init_nearby_xmin = None
self.init_nearby_xmax = None
self.init_nearby_y1min = None
self.init_nearby_y1max = None
self.init_nearby_y3min = None
self.init_nearby_y3max = None
self.init_nearby_legend_fontsize = None
self.init_nearby_legend_loc = None
self.init_cont_line_num = None
self.init_cont_ion = None
self.init_cont_xmin = None
self.init_cont_xmax = None
self.init_cont_y1min = None
self.init_cont_y1max = None
self.init_cont_y3min = None
self.init_cont_y3max = None
self.init_cont_legend_fontsize = None
self.init_cont_legend_loc = None
self.call_on_draw = True
self.cursor_on = False
self.line_info_ref = 0
self.x_plot_lims = None
self.y1_plot_lims = None
self.y2_plot_lims = None
self.y3_plot_lims = None
self.xscale = None
self.yscale = None
self.post_proc_file = post_proc_file
self.tick_file = None
self.save_parameters_file = None
self.do_save = True
self.cont_par_changed = False
self.axes_fixed = False
self.showErrorBox = True
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.exec_init()
self.cont_pars_dialog = None
self.cursor_w1 = None
self.cursor_w2 = None
self.nearbyLines = None
self.nearbyLines_sort_by = 'i_tot'
self.nearbyLines_sort_reverse = True
self.nearbyLines_dialog = None
self.nearbyLines_selected_ions = None
self.line_info_dialog = None
self.instr_prof_dialog = None
self.refine_wave_dialog = None
self.refine_wave_as_table = False
self.interpol_cont_dialog = None
self.interpol_cont_as_table = False
self.fig_prof = None
self.green_tick_shown = False
self.magenta_tick_shown = False
self.addGreenTickToLegend = True
self.show_true_ions = False
self.nearbyDialogFilterIsActive = False
self.get_user_cont_points = False
self.del_user_cont_points = False
self.user_cont_editBox = None
self.showHelpBrowser = False
def closeEvent(self, evnt):
if self.sp.get_conf('save_parameters_on_exit'):
self.save_pars_as()
if self.cont_pars_dialog is not None:
self.cont_pars_dialog.close()
if self.nearbyLines_dialog is not None:
self.nearbyLines_dialog.close()
if self.line_info_dialog is not None:
self.line_info_dialog.close()
self.line_info_table.close()
if self.instr_prof_dialog is not None:
self.instr_prof_dialog.close()
if self.refine_wave_dialog is not None:
self.refine_wave_dialog.close()
if self.interpol_cont_dialog is not None:
self.interpol_cont_dialog.close()
def image_extension_list(self):
filetypes = self.canvas.get_supported_filetypes()
file_extensions = filetypes.keys()
file_extensions.sort()
return file_extensions
def image_filter(self, fileExt=''):
filetypes = self.canvas.get_supported_filetypes_grouped()
imagetype_list = filetypes.keys()
imagetype_list.sort()
s = ''
k = 0
for imagetype in imagetype_list:
extension_list = filetypes[ imagetype ]
if fileExt in extension_list:
k = imagetype_list.index(imagetype)
s = s + str(imagetype)
s1 = ' (*.' + str(extension_list[0])
for extension in extension_list[1:]:
s1 = s1 + ' *.' + str(extension)
s1 = s1 + ')'
s = s + s1 + s1 + ';;'
filter_str = s[:-2]
selectedFilter = s.split(';;')[k]
return filter_str, selectedFilter
def save_plot(self):
path = self.sp.get_conf('plot_filename')
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Plot saved to file %s' % path, 2000)
def save_plot_as(self):
path = self.sp.get_conf('plot_filename')
extension = os.path.splitext(path)[1][1:].lower()
file_choices, selectedFilter = self.image_filter(extension)
path = unicode(QtGui.QFileDialog.getSaveFileName(self, 'Save plot to file', path, file_choices, selectedFilter))
if path:
extension = os.path.splitext(path)[1][1:].lower()
if extension in self.image_extension_list():
self.sp.set_conf('plot_filename', path)
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Plot saved to file %s' % path, 2000)
else:
title = 'Error saving plot'
msg = 'Format "{0}" not supported.'.format(extension)
msg = msg + '\nSupported formats: '
extension_list = self.image_extension_list()
n = len(extension_list)-1
s = ''
for i in range(0,n):
s = s + extension_list[i] + ', '
s = s + extension_list[n] + '.'
msg = msg + s
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
def on_about(self):
msg = """ pySSN (Spectral Synthesis for Nebulae):
"""
QtGui.QMessageBox.about(self, "About the demo", msg.strip())
def set_cursor(self, checked):
self.cursor_on = checked
self.sp.firstClick = True
def on_click(self, event):
if self.get_user_cont_points and self.user_cont_editBox is not None:
wave = event.xdata
i_list = [i for i in range(len(self.sp.w)-1) if self.sp.w[i] <= wave <= self.sp.w[i+1] or self.sp.w[i+1] <= wave <= self.sp.w[i]]
if len(i_list) == 1:
i = i_list[0]
c = self.sp.cont[i] - self.sp.conts['user'][i]
self.user_cont_editBox.append('{:<7.1f} {:.2f}'.format(event.xdata, event.ydata-c))
self.update_user_cont()
elif ( self.del_user_cont_points and
self.user_cont_editBox is not None and
self.sp.get_conf('cont_user_table') is not None ):
wave = event.xdata
points = self.sp.get_conf('cont_user_table')[:]
if points is not None and len(points) > 0:
points.remove(min(points, key=lambda x:abs(x[0]-wave)))
self.user_cont_list2table(points)
self.update_user_cont()
elif self.cursor_on:
do_print = not self.sp.get_conf('qt_show_dialogs', True)
nearbyLines = self.sp.nearby_lines(event, do_print, sort='i_tot', reverse=True)
if nearbyLines is None:
return
self.nearbyLines = nearbyLines
if not do_print:
self.show_nearbyLines_dialog()
def sort_nearbyLines(self, sort, reverse=False):
if self.nearbyLines is None:
return
if sort == 'proc':
sorts = np.argsort([ self.sp.process[str(line_num)[-9]] for line_num in self.nearbyLines['num'] ])
else:
sorts = np.argsort(self.nearbyLines[sort])
if reverse:
sorts = sorts[::-1]
self.nearbyLines = np.array(self.nearbyLines)[sorts]
def create_main_frame(self):
if self.use_workspace:
self.main_frame = QtGui.QWorkspace()
else:
self.main_frame = QtGui.QWidget()
# Create the mpl Figure and FigCanvas objects.
#
self.dpi = 100
#self.fig = plt.figure(figsize=(15,15))
self.fig = plt.figure(figsize=(15,15))
# self.fig = plt.figure(figsize=(20.0, 15.0), dpi=self.dpi)
log_.debug('creating figure {}'.format(id(self.fig)), calling=self.calling)
self.canvas = FigureCanvas(self.fig)
if self.use_workspace:
self.main_frame.addWindow(self.canvas)
self.fig2 = Figure((20.0, 15.0), dpi=self.dpi)
self.canvas2 = FigureCanvas(self.fig2)
#self.main_frame.addWindow(self.canvas2)
else:
self.canvas.setParent(self.main_frame)
self.canvas.mpl_connect('button_press_event', self.on_click)
self.canvas.mpl_connect('figure_leave_event', self.leave_fig)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
self.mpl_toolbar.curs.connect(self.set_cursor)
# Other GUI controls
#
self.fix_axes_cb = QtGui.QCheckBox("fix")
self.fix_axes_cb.setChecked(False)
self.connect(self.fix_axes_cb, QtCore.SIGNAL('stateChanged(int)'), self.fix_axes)
self.xlim_min_box = QtGui.QLineEdit()
self.xlim_min_box.setMinimumWidth(50)
#self.connect(self.xlim_min_box, QtCore.SIGNAL('editingFinished()'), self.validate_xlim_min)
self.connect(self.xlim_min_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.xlim_max_box = QtGui.QLineEdit()
self.xlim_max_box.setMinimumWidth(50)
#self.connect(self.xlim_max_box, QtCore.SIGNAL('editingFinished()'), self.validate_xlim_max)
#self.xlim_max_box.editingFinished.connect(self.validate_xlim_max)
self.connect(self.xlim_max_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y1lim_min_box = QtGui.QLineEdit()
self.y1lim_min_box.setMinimumWidth(50)
#self.connect(self.y1lim_min_box, QtCore.SIGNAL('editingFinished()'), self.validate_y1lim_min)
self.connect(self.y1lim_min_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y1lim_max_box = QtGui.QLineEdit()
self.y1lim_max_box.setMinimumWidth(50)
#self.connect(self.y1lim_max_box, QtCore.SIGNAL('editingFinished()'), self.validate_y1lim_max)
self.connect(self.y1lim_max_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y3lim_min_box = QtGui.QLineEdit()
self.y3lim_min_box.setMinimumWidth(50)
#self.connect(self.y3lim_min_box, QtCore.SIGNAL('editingFinished()'), self.validate_y3lim_min)
self.connect(self.y3lim_min_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y3lim_max_box = QtGui.QLineEdit()
self.y3lim_max_box.setMinimumWidth(50)
#self.connect(self.y3lim_max_box, QtCore.SIGNAL('editingFinished()'), self.validate_y3lim_max)
self.connect(self.y3lim_max_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.run_button = QtGui.QPushButton("Run")
self.connect(self.run_button, QtCore.SIGNAL('clicked()'), self.rerun)
self.draw_button = QtGui.QPushButton("Draw")
self.connect(self.draw_button, QtCore.SIGNAL('clicked()'), self.on_draw)
self.Command_GroupBox = QtGui.QGroupBox("Execute")
self.Command_GroupBox.setCheckable(False)
self.ObsSpec_GroupBox = QtGui.QGroupBox("Parameters of the synthetic spectrum")
self.ObsSpec_GroupBox.setCheckable(False)
self.SpecPlot_GroupBox = QtGui.QGroupBox("Plot of spectra")
self.SpecPlot_GroupBox.setCheckable(False)
self.lineIDs_GroupBox = QtGui.QGroupBox("Show lines")
self.lineIDs_GroupBox.setCheckable(True)
self.lineIDs_GroupBox.setChecked(True)
self.connect(self.lineIDs_GroupBox, QtCore.SIGNAL('clicked()'), self.show_lines_clicked)
self.lineIDs_GroupBox_ToolTip = 'Check to show ticks at the central positions of the spectral lines and plot the lines of selected ions'
self.residual_GroupBox = QtGui.QGroupBox("Plot of residuals")
self.residual_GroupBox.setCheckable(True)
self.residual_GroupBox.setChecked(True)
self.connect(self.residual_GroupBox, QtCore.SIGNAL('clicked()'), self.residual_box_clicked)
self.residual_GroupBox_ToolTip = 'Check to display the residual plot'
self.adjust_button = QtGui.QPushButton("Update")
self.adjust_button.setChecked(False)
self.connect(self.adjust_button, QtCore.SIGNAL('clicked()'), self.adjust)
self.post_proc_button = QtGui.QPushButton("Post proc")
self.post_proc_button.setChecked(False)
self.connect(self.post_proc_button, QtCore.SIGNAL('clicked()'), self.apply_post_proc)
self.update_profile_button = QtGui.QPushButton("Update profiles")
self.update_profile_button.setChecked(False)
self.connect(self.update_profile_button, QtCore.SIGNAL('clicked()'), self.update_profile)
self.sp_min_box = QtGui.QLineEdit()
self.sp_min_box.setMinimumWidth(50)
#self.connect(self.sp_min_box, QtCore.SIGNAL('editingFinished()'), self.set_limit_sp)
self.connect(self.sp_min_box, QtCore.SIGNAL('returnPressed()'), self.set_limit_sp_and_run)
self.sp_max_box = QtGui.QLineEdit()
self.sp_max_box.setMinimumWidth(50)
#self.connect(self.sp_max_box, QtCore.SIGNAL('editingFinished()'), self.set_limit_sp)
self.connect(self.sp_max_box, QtCore.SIGNAL('returnPressed()'), self.set_limit_sp_and_run)
self.sp_norm_box = QtGui.QLineEdit()
self.sp_norm_box.setMinimumWidth(50)
self.connect(self.sp_norm_box, QtCore.SIGNAL('returnPressed()'), self.sp_norm)
self.obj_velo_box = QtGui.QLineEdit()
self.obj_velo_box.setMinimumWidth(50)
self.connect(self.obj_velo_box, QtCore.SIGNAL('returnPressed()'), self.obj_velo)
self.ebv_box = QtGui.QLineEdit()
self.ebv_box.setMinimumWidth(50)
self.connect(self.ebv_box, QtCore.SIGNAL('returnPressed()'), self.ebv)
self.resol_box = QtGui.QLineEdit()
self.resol_box.setMinimumWidth(50)
self.connect(self.resol_box, QtCore.SIGNAL('returnPressed()'), self.resol)
self.cut2_box = QtGui.QLineEdit()
self.cut2_box.setMinimumWidth(50)
self.connect(self.cut2_box, QtCore.SIGNAL('returnPressed()'), self.cut2)
self.cut_cb = QtGui.QCheckBox('')
self.cut_cb.setChecked(False)
self.connect(self.cut_cb, QtCore.SIGNAL('clicked()'), self.cut_cb_changed)
self.ion_box = QtGui.QLineEdit()
self.ion_box.setMinimumWidth(70)
self.connect(self.ion_box, QtCore.SIGNAL('returnPressed()'), self.draw_ion)
self.ion_cb = QtGui.QCheckBox('')
self.ion_cb.setChecked(False)
self.connect(self.ion_cb, QtCore.SIGNAL('clicked()'), self.ion_cb_changed)
self.line_info_box = QtGui.QLineEdit()
self.line_info_box.setFixedWidth(130)
self.connect(self.line_info_box, QtCore.SIGNAL('returnPressed()'), self.line_info)
self.mpl_toolbar.addSeparator()
self.mpl_toolbar.addWidget(QtGui.QLabel(' line number '))
self.mpl_toolbar.addWidget(self.line_info_box)
self.magenta_box = QtGui.QLineEdit()
self.magenta_box.setMinimumWidth(50)
self.connect(self.magenta_box, QtCore.SIGNAL('returnPressed()'), self.magenta_line)
self.magenta_label_box = QtGui.QLineEdit()
self.magenta_label_box.setMinimumWidth(50)
self.connect(self.magenta_label_box, QtCore.SIGNAL('returnPressed()'), self.magenta_line)
self.cyan_box = QtGui.QLineEdit()
self.cyan_box.setMinimumWidth(50)
self.connect(self.cyan_box, QtCore.SIGNAL('returnPressed()'), self.cyan_line)
self.cyan_label_box = QtGui.QLineEdit()
self.cyan_label_box.setMinimumWidth(50)
self.connect(self.cyan_label_box, QtCore.SIGNAL('returnPressed()'), self.cyan_line)
self.setStyleSheet("""QToolTip {
background-color: black;
color: lightgray;
min-width: 20em;
font-size: 14px;
font-family: "sans-serif";
border: black solid 10px
}""")
s = 'Click to execute the synthesis from the beginning.'
self.run_button_ToolTip = s
s = 'Click to update synthesis with changes in line intensities, profiles, and continuum parameters.'
self.adjust_button_ToolTip = s
s = 'Enter line number to get information on\n' \
'the reference line and on its satellites.'
self.line_info_box_ToolTip = s
s = 'Color excess E(B-V)\n\n' \
'Set with: \n' \
' e_bv = <float>\n\n' \
'Comment: \n' \
u' E(B-V) \u2248 C(H\u03B2) / 1.5'
self.ebv_box_ToolTip = s
s = 'Radial velocity in km/s\n\n' \
'Set with: \n' \
' obj_velo = <float>'
self.obj_velo_box_ToolTip = s
s = 'Minimum wavelength of the synthetic spectrum (in angstroms)\n\n' \
'Set with: \n' \
' limit_sp = (<xmin>, <xmax>)'
self.sp_min_box_ToolTip = s
s = 'Maximum wavelength of the synthetic spectrum (in angstroms)\n\n' \
'Set with: \n' \
' limit_sp = (<xmin>, <xmax>)'
self.sp_max_box_ToolTip = s
s = 'Minimum wavelength in the plots of spectra and residuals (in angstroms)\n\n' \
'Set with: \n' \
' x_plot_lims = (<xmin>, <xmax>)'
self.xlim_min_box_ToolTip = s
s = 'Maximum wavelength in the plots of spectra and residuals (in angstroms)\n\n' \
'Set with: \n' \
' x_plot_lims = (<xmin>, <xmax>)'
self.xlim_max_box_ToolTip = s
s = 'Minimum ordinate in the plot of spectra, in units of relative intensity \n\n' \
'Set with: \n' \
' y1_plot_lims = (<ymin>, <ymax>)'
self.y1lim_min_box_ToolTip = s
s = 'Maximum ordinate in the plot of spectra, in units of relative intensity\n\n' \
'Set with: \n' \
' y1_plot_lims = (<ymin>, <ymax>)'
self.y1lim_max_box_ToolTip = s
s = 'Minimum ordinate in the plot of residuals, in units of relative intensity\n\n' \
'Set with: \n' \
' y3_plot_lims = (<ymin>, <ymax>)'
self.y3lim_min_box_ToolTip = s
s = 'Maximum ordinate in the plot of residuals, in units of relative intensity\n\n' \
'Set with: \n' \
' y3_plot_lims = (<ymin>, <ymax>)'
self.y3lim_max_box_ToolTip = s
s = 'Check to retain the current limits of the plots while zooming and panning.'
self.fix_axes_cb_ToolTip = s
s = 'Check to show only lines with intensities above cut. \n\n' \
'Set with: \n' \
' show_selected_intensities_only = <boolean>'
self.cut_cb_ToolTip = s
s = 'Check to show only lines of selected ions. \n\n' \
'Set with: \n' \
' show_selected_ions_only = <boolean>'
self.ion_cb_ToolTip = s
s = 'Normalization factor, ratio between the intensity and the \n' \
u'observed flux of the reference line, usually 10\u2074/F(H\u03B2)\n\n' \
'Set with: \n' \
' sp_norm = <float>'
self.sp_norm_box_ToolTip = s
s = 'Rebinning factor, the odd integer factor by which the number of points \n' \
'of the original spectrum is multiplied in the rebinning process\n\n' \
'Set with: \n' \
' resol = <integer>\n\n' \
'Usage: \n' \
' Set to \'1\' if the resolution of the observed spectrum is large enough'
self.resol_box_ToolTip = s
s = 'Minimum relative intensity of lines to be shown. \n\n' \
'Set with: \n' \
' cut_plot2 = <float>'
self.cut2_box_ToolTip = s
s = 'Comma-separated list of selected ions, elements, or line numbers to be shown. \n\n' \
'Set with: \n' \
' selected_ions = [<ion1>,<ion2>,...]\n\n' \
'Examples: \n' \
' \'O III\' (or \'O_III\') to show the lines of O III\n' \
' \'O III*\' (or \'O_III*\') to show the lines of O III, O IIIfl, O III5g, etc\n' \
' \'O III, O IV\' to show the lines of O III and O IV\n' \
' \'O\' to show the lines of all O ions\n' \
' \'Fe, N\' to show the lines of all Fe and N ions\n' \
' <line number> to show the lines of that same ion'
self.ion_box_ToolTip = s
#
# Layout with box sizers
#
CommandLayout = QtGui.QGridLayout()
wList = [self.run_button,self.adjust_button]
Nrow = 2
for w in wList:
k = wList.index( w )
i = k%Nrow
j = 1+2*(k/Nrow)
CommandLayout.addWidget(w,i,j)
CommandLayout.setAlignment(w,QtCore.Qt.AlignCenter)
self.Command_GroupBox.setLayout(CommandLayout)
ObsSpecLayout = QtGui.QGridLayout()
lList = ['xmin', 'xmax', u'10\u2074/F(H\u03B2)', 'radial vel.', 'E(B-V)', 'N']
wList = [self.sp_min_box, self.sp_max_box, self.sp_norm_box, self.obj_velo_box, self.ebv_box, self.resol_box ]
Nrow = 2
for l in lList:
w = QtGui.QLabel(l)
k = lList.index( l )
i = k%Nrow
j = 2*(k/Nrow)
ObsSpecLayout.addWidget(w,i,j)
ObsSpecLayout.setAlignment(w,QtCore.Qt.AlignRight)
for w in wList:
k = wList.index( w )
i = k%Nrow
j = 1+2*(k/Nrow)
ObsSpecLayout.addWidget(w,i,j)
ObsSpecLayout.setAlignment(w,QtCore.Qt.AlignRight)
self.ObsSpec_GroupBox.setLayout(ObsSpecLayout)
SpecPlotLayout = QtGui.QGridLayout()
SpecPlotLayout.addWidget(QtGui.QLabel('xmin'),0,0)
SpecPlotLayout.addWidget(QtGui.QLabel('xmax'),1,0)
SpecPlotLayout.addWidget(QtGui.QLabel('ymin'),0,2)
SpecPlotLayout.addWidget(QtGui.QLabel('ymax'),1,2)
SpecPlotLayout.addWidget(self.xlim_min_box,0,1)
SpecPlotLayout.addWidget(self.xlim_max_box,1,1)
SpecPlotLayout.addWidget(self.y1lim_min_box,0,3)
SpecPlotLayout.addWidget(self.y1lim_max_box,1,3)
SpecPlotLayout.addWidget(self.fix_axes_cb,0,4)
self.SpecPlot_GroupBox.setLayout(SpecPlotLayout)
LineIDLayout = QtGui.QGridLayout()
LineIDLayout.addWidget(QtGui.QLabel('cut'),0,0)
LineIDLayout.addWidget(self.cut2_box,0,1)
LineIDLayout.addWidget(self.cut_cb,0,2)
LineIDLayout.addWidget(QtGui.QLabel('ion'),1,0)
LineIDLayout.addWidget(self.ion_box,1,1)
LineIDLayout.addWidget(self.ion_cb,1,2)
self.lineIDs_GroupBox.setLayout(LineIDLayout)
ResidualLayout = QtGui.QGridLayout()
ResidualLayout.addWidget(QtGui.QLabel('ymin'),0,0)
ResidualLayout.addWidget(QtGui.QLabel('ymax'),1,0)
ResidualLayout.addWidget(self.y3lim_min_box,0,1)
ResidualLayout.addWidget(self.y3lim_max_box,1,1)
self.residual_GroupBox.setLayout(ResidualLayout)
grid = QtGui.QGridLayout()
grid.addWidget(self.Command_GroupBox, 0, 1 )
grid.addWidget(self.ObsSpec_GroupBox, 0, 2 )
grid.addWidget(self.SpecPlot_GroupBox, 0, 3 )
grid.addWidget(self.residual_GroupBox, 0, 4 )
grid.addWidget(self.lineIDs_GroupBox, 0, 5 )
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
vbox.addLayout(grid)
#vbox.setAlignment(QtCore.Qt.AlignBottom)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QtGui.QLabel("pySSN, v{}".format(__version__))
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("File")
open_init_action = self.create_action("Open init file",
shortcut="",
slot=self.select_init,
tip="Open the initialization file and run the synthesis")
save_pars_action = self.create_action("Save parameters",
shortcut="Ctrl+S",
slot=self.save_pars_as,
tip="Save synthesis and plot parameters to file")
save_pars_as_action = self.create_action("Save parameters as",
shortcut="Ctrl+Shift+S",
slot=self.save_pars_as,
tip="Select file name and save parameters of the synthesis")
self.save_plot_action = self.create_action("Save plot",
shortcut="Ctrl+P",
slot=self.save_plot_as,
tip="Save plot to file")
save_synthesis_action = self.create_action("Save synthesis",
shortcut="",
slot=self.save_synthesis_as,
tip="Save synthesis to file")
save_plot_as_action = self.create_action("Save plot as",
shortcut="Ctrl+Shift+P",
slot=self.save_plot_as,
tip="Select file name and save plot")
save_lines_action = self.create_action("Save lines",
shortcut="Ctrl+L",
slot=self.save_lines_as,
tip="Save list of lines to file")
save_lines_as_action = self.create_action("Save lines as",
shortcut="Ctrl+Shift+L",
slot=self.save_lines_as,
tip="Select file name and save list of lines")
self.add_actions(self.file_menu,
(open_init_action, save_pars_action, None, self.save_plot_action, None, save_synthesis_action, None, save_lines_action))
#(open_init_action, save_pars_action, save_pars_as_action, None, self.save_plot_action, save_plot_as_action, None, save_lines_action, save_lines_as_action))
self.line_sort_list = ['wavelength', 'decreasing wavelength', 'intensity', 'decreasing intensity', 'ion' , 'decreasing ion' ]
s = 'Sort lines by:\n'
for i in range(len(self.line_sort_list)):
s = s + ' ' + str(i) + ' - ' + self.line_sort_list[i] + '\n'
s = s + '\nSet with:\n' + ' save_lines_sort = <integer>'
self.line_sort_ag = QtGui.QActionGroup(self, exclusive=True)
self.line_sort_menu = self.file_menu.addMenu("Sort lines by")
self.line_sort_menu_ToolTip = ''
for i in range(len(self.line_sort_list)):
a = self.line_sort_ag.addAction(QtGui.QAction(self.line_sort_list[i], self, checkable=True))
self.line_sort_menu.addAction(a)
self.line_sort_ag.triggered.connect(self.line_sort)
self.line_print_dic = OrderedDict( [
( 'num' , 'line number' ),
( 'id' , 'ion' ),
( 'lambda' , 'wavelength' ),
( 'l_shift' , 'wavelength shift' ),
( 'l_tot' , 'corrected wavelength' ),
( 'i_rel' , 'intensity' ),
( 'i_cor' , 'intensity correction factor' ),
( 'i_tot' , 'corrected intensity' ),
( 'ref' , 'reference line number' ),
( 'profile' , 'line profile code number' ),
( 'vitesse' , 'natural line width' ),
( 'comment' , 'comment' ) ])
items = list(self.line_print_dic.values())
s = 'Fields to be printed:\n'
for i in range(len(items)):
s = s + ' ' + str(i) + ' - ' + items[i] + '\n'
s = s + '\nSet with:\n' + ' save_lines_fields = <list>'
self.line_field_menu = self.file_menu.addMenu("Show fields")
self.line_field_menu_ToolTip = ''
for i in range(len(items)):
a = self.create_action(items[i],
shortcut='', slot=self.set_line_fields_to_print, checkable=True,
tip=None)
self.line_field_menu.addAction(a)
self.file_menu.addMenu(self.line_field_menu)
self.show_header_action = self.create_action("Show header",
slot=self.set_show_header,
shortcut="",
checkable=True,
tip="Show header in list of lines")
self.file_menu.addAction(self.show_header_action)
self.open_cosmetic_file_action = self.create_action("Open cosmetic file",
slot=self.set_cosmetic_file,
shortcut="",
tip="Open the cosmetic file")
self.clean_cosmetic_file_action = self.create_action("Clean cosmetic file",
slot=self.clean_cosmetic_file,
shortcut="",
tip="Remove the unchanged lines from the cosmetic file")
self.empty_cosmetic_file_action = self.create_action("Empty cosmetic file",
slot=self.empty_cosmetic_file,
shortcut="",
tip="Remove all lines from the cosmetic file")
self.order_cosmetic_file_action = self.create_action("Order cosmetic file",
slot=self.order_cosmetic_file,
shortcut="",
tip="Order the cosmetic file by line number and remove duplicate lines")
quit_action = self.create_action("&Quit",
slot=self.fileQuit,
shortcut="Ctrl+Q",
tip="Close the application")
self.add_actions(self.file_menu, (None, self.open_cosmetic_file_action, self.clean_cosmetic_file_action,
self.order_cosmetic_file_action, self.empty_cosmetic_file_action, None, quit_action))
self.run_menu = self.menuBar().addMenu("Execute")
run_action = self.create_action("Run",
shortcut="Ctrl+F9",
slot=self.rerun,
tip="Execute synthesis from the beginning")
update_action = self.create_action("Update",
shortcut="F9",
slot=self.adjust,
tip="Update synthesis with changes in line intensities, profiles, and continuum parameters")
draw_action = self.create_action("Draw",
shortcut="F8",
slot=self.set_plot_limits_and_draw,
tip="Redraw plots")
post_proc_action = self.create_action("Post-process",
shortcut="Ctrl+F8",
slot=self.apply_post_proc,
tip="Edit the plots with python commands defined in an external file")
open_profile_action = self.create_action("Instrumental profile",
shortcut="F7",
slot=self.apply_instr_prof,
tip="Open the instrumental profile file and run the synthesis")
refine_wavelengths_action = self.create_action("Wavelength-refining",
slot=self.refine_wavelengths,
shortcut="F6",
tip="Refine the wavelength calibration")
self.add_actions(self.run_menu, (update_action, run_action, draw_action, None,
post_proc_action, open_profile_action, refine_wavelengths_action))
self.line_menu = self.menuBar().addMenu('Lines')
self.show_line_ticks_action = self.create_action('Plot line ticks',
shortcut='Alt+L', slot=self.show_line_ticks_action_clicked, checkable=True,
tip='Check to show line ticks')
self.plot_lines_action = self.create_action('Plot spectra of selected ions',
shortcut='Alt+P', slot=self.show_line_ticks_action_clicked, checkable=True,
tip='Check to plot spectra of selected ions')
self.selected_intensities_action = self.create_action('Only above the cut',
shortcut='Alt+K', slot=self.selected_lines_clicked, checkable=True,
tip='Check to show the ticks for lines with intensities above cut only')
self.selected_ions_action = self.create_action('Only for selected ions',
shortcut='Alt+I', slot=self.selected_lines_clicked, checkable=True,
tip='Check to show the line ticks for selected ions only')
self.add_actions(self.line_menu,
(self.plot_lines_action, None, self.show_line_ticks_action, self.selected_intensities_action, self.selected_ions_action))
self.diff_lines_list = ['ion and reference line', 'ion and process', 'ion', 'element' ]
s = 'Differentiate lines by:\n'
for i in range(len(self.diff_lines_list)):
s = s + ' ' + str(i) + ' - ' + self.diff_lines_list[i] + '\n'
s = s + '\nSet with:\n' + ' diff_lines_by = <integer>'
self.diff_lines_ag = QtGui.QActionGroup(self, exclusive=True)
self.diff_lines_menu = self.line_menu.addMenu("Differentiate lines by")
self.diff_lines_menu_ToolTip = ''
for i in range(len(self.diff_lines_list)):
a = self.diff_lines_ag.addAction(QtGui.QAction(self.diff_lines_list[i], self, checkable=True))
a.setShortcut('Alt+' + str(i+1))
self.diff_lines_menu.addAction(a)
self.diff_lines_ag.triggered.connect(self.diff_lines)
self.cycle_forwards_ions_action = self.create_action('Cycle forwards selected ions',
shortcut='Alt+0', slot=self.cycle_forwards_ions, checkable=False,
tip='Click to cycle forwards the selected ions')
self.cycle_backwards_ions = self.create_action('Cycle backwards selected ions',
shortcut='Alt+9', slot=self.cycle_backwards_ions, checkable=False,
tip='Click to cycle backwards the selected ions')
self.add_actions(self.line_menu,
(None, self.cycle_forwards_ions_action, self.cycle_backwards_ions, None))
self.line_tick_ax_menu = self.line_menu.addMenu('Window of line ticks')
self.line_tick_ax_list = ['Plot of spectra', 'Plot of residuals', 'Separate plot' ]
s = 'Show line ticks on:\n'
for i in range(len(self.line_tick_ax_list)):
s = s + ' ' + str(i) + ' - ' + self.line_tick_ax_list[i] + '\n'
s = s + '\nSet with:\n' + ' line_tick_ax = <integer>'
self.line_tick_ax_ag = QtGui.QActionGroup(self, exclusive=True)
self.line_tick_ax_menu_ToolTip = ''
for i in range(len(self.line_tick_ax_list)):
a = self.line_tick_ax_ag.addAction(QtGui.QAction(self.line_tick_ax_list[i], self, checkable=True))
self.line_tick_ax_menu.addAction(a)
self.line_tick_ax_ag.triggered.connect(self.set_plot_ax2)
self.line_tick_pos_menu = self.line_menu.addMenu('Position of line ticks')
self.line_tick_pos_list = ['Top', 'Middle', 'Bottom' ]
s = 'Position line ticks:\n'
for i in range(len(self.line_tick_pos_list)):
s = s + ' ' + str(i) + ' - ' + self.line_tick_pos_list[i] + '\n'
s = s + '\nSet with:\n' + ' line_tick_pos = <integer>'
self.line_tick_pos_ag = QtGui.QActionGroup(self, exclusive=True)
self.line_tick_pos_menu_ToolTip = ''
for i in range(len(self.line_tick_pos_list)):
a = self.line_tick_pos_ag.addAction(QtGui.QAction(self.line_tick_pos_list[i], self, checkable=True))
self.line_tick_pos_menu.addAction(a)
self.line_tick_pos_ag.triggered.connect(self.set_plot_ax2)
self.line_tick_color_action = self.create_action('Color of line ticks',
shortcut=None, slot=self.line_tick_color_clicked, checkable=False,
tip='Set color of line ticks')
self.toggle_legend_action = self.create_action('Toggle legend position and zoom',
shortcut='Alt+Shift+L', slot=self.toggle_legend_clicked, checkable=False,
tip='Toggle the legend position and zoom')
self.line_menu.addAction(self.toggle_legend_action)
self.editing_lines_action = self.create_action('Allow editing line parameters',
slot=self.editing_lines_clicked, checkable=True,
tip='Check to allow editing line parameters in line info dialog')
self.update_lines_action = self.create_action('Update after editing line parameters',
shortcut='Alt+U', slot=self.update_lines_clicked, checkable=True,
tip='Check to update synthesis after editing line parameters in line info dialog')
self.show_line_ticks_from_file_action = self.create_action('Plot line ticks from file',
shortcut='F4', slot=self.show_line_ticks_from_file,
tip='Check to show line ticks defined in an external file')
self.ask_tickfile_action = self.create_action("Ask for file name",
checkable=True, tip="Check to be always asked for the text file containing a list of wavelengths to be ticked")
self.add_actions(self.line_menu, (None, self.show_line_ticks_from_file_action))
self.cont_menu = self.menuBar().addMenu('Continuum')
self.plot_cont_action = self.create_action('Plot continuum',
shortcut="Alt+C",
slot=self.plot_cont_action_clicked,
checkable=True,
tip='Check to plot the different components of the continuum spectrum')
self.cont_action = self.create_action('Parameters',
shortcut="Shift+Alt+C",
slot=self.cont_dialog,
tip='Parameters of the continuum spectrum')
self.interpol_cont_action = self.create_action('User-defined continuum',
shortcut="F5",
slot=self.user_continuum,
tip='Open dialog to set the user-defined continuum spectrum')
self.add_actions(self.cont_menu,
(self.plot_cont_action, self.cont_action, self.interpol_cont_action,))
self.settings_menu = self.menuBar().addMenu('Settings')
self.verbosity_list = ['None', 'Errors', 'Errors and warnings', 'Errors, warnings, and comments', 'Debug messages' ]
s = 'Verbosity level:\n'
for i in range(len(self.verbosity_list)):
s = s + ' ' + str(i) + ' - ' + self.verbosity_list[i] + '\n'
s = s + '\nSet with:\n' + ' log_level = <integer>'
self.verbosity_ag = QtGui.QActionGroup(self, exclusive=True)
#self.verbosity_menu = self.menuBar().addMenu("Verbosity")
self.verbosity_menu = self.settings_menu.addMenu("Verbosity")
self.verbosity_menu_ToolTip = ''
for i in range(len(self.verbosity_list)):
a = self.verbosity_ag.addAction(QtGui.QAction(self.verbosity_list[i], self, checkable=True))
self.verbosity_menu.addAction(a)
self.verbosity_ag.triggered.connect(self.verbosity)
self.style_list = list(QtGui.QStyleFactory.keys())
s = 'Widget styles:\n'
for i in range(len(self.style_list)):
s = s + ' ' + str(i) + ' - ' + self.style_list[i] + '\n'
s = s + '\nSet with:\n' + ' qt_style = <integer>'
self.style_ag = QtGui.QActionGroup(self, exclusive=True)
self.style_menu = self.settings_menu.addMenu('Widget style')
self.style_menu_ToolTip = ''
for i in range(len(self.style_list)):
a = self.style_ag.addAction(QtGui.QAction(self.style_list[i], self, checkable=True))
self.style_menu.addAction(a)
self.style_ag.triggered.connect(self.style)
self.enable_tooltips_action = self.create_action('Enable tooltips',
slot=self.enable_tooltips_action_clicked, checkable=True,
tip='Check to enable tooltips')
self.adjust_fig_action = self.create_action('Adjust figure',
slot=self.adjust_fig_action_clicked, checkable=True,
tip='Automatically adjust figure to avoid overlaps and to minimize the empty borders.')
self.show_uncor_obs_action = self.create_action('Show uncorrected spectrum',
slot=self.show_uncor_obs_action_clicked, checkable=True,
tip='Show observational spectrum without the wavelength refining.')
self.add_actions(self.settings_menu,
(None, self.enable_tooltips_action, self.adjust_fig_action, None, self.editing_lines_action, self.update_lines_action, self.show_uncor_obs_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def fileQuit(self):
self.close()
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QtGui.QAction(text, self)
if icon is not None:
action.setIcon(QtGui.QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, QtCore.SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def isInteger(self, str_):
try:
int(str_)
return True
except ValueError:
return False
def isPositiveInteger(self, str_):
if self.isInteger(str_):
n = int(str_)
if n > 0:
return True
else:
return False
else:
return False
def isPositiveOdd(self, str_):
if self.isInteger(str_):
n = int(str_)
if n%2 == 1 and n > 0:
return True
else:
return False
else:
return False
def isFloat(self, str_):
try:
np.float(str_)
return True
except ValueError:
return False
def floatFixFormat(self, r, fix_fmt, align='>'):
"""
floatFixFormat(1.23456789, '{:7.3f}') = ' 1.234'
floatFixFormat(-1.23456789, '{:7.3f}') = ' -1.234'
floatFixFormat(123.456789, '{:7.3f}') = ' 1.23e2'
floatFixFormat(-123.456789, '{:7.3f}') = '-1.23e2'
floatFixFormat(1.23456789e+04, '{:7.3f}') = ' 1.23e4'
floatFixFormat(1.23456789e-04, '{:7.3f}') = ' 1.2e-4'
floatFixFormat(1.23456789e+34, '{:7.3f}') = ' 1.2e34'
floatFixFormat(99.999999, '{:7.3f}') = ' 1.2e34'
"""
if not ( 'f' in fix_fmt and self.isFloat(r) ):
return None
s = fix_fmt.strip('{')
s = s.strip('}')
s = s.strip(':')
s = s.strip('f')
k = s.index('.')
w = int(s[:k])
p = int(s[k+1:])
s0 = '{:{align}{w}.{p}f}'.format(float(abs(r)), w=w-1, p=p, align=align)
s = '{:0.{w}e}'.format(float(abs(r)), w=w)
if r < 0:
sgn = '-'
else:
sgn = ''
k = s.index('e')
mantissa = s[:k]
mantissa = mantissa[:p+2]
e = int(s[k+1:])
if p+e+2>w-3-len(str(e)) and len(s0) < w:
s = s0.strip()
else:
s = '{:0.{p}e}'.format(float(abs(r)), p=min(p,w-4-len(str(e))))
k = s.index('e')
mantissa = s[:k]
exponent = str(int(s[k+1:]))
s = mantissa + 'e' + exponent
s = '{:{align}{w}}'.format(sgn+s, w=w, align=align)
return s
def rightFormat(self, s, field):
if field == 'comment':
output = s.strip()
return output
try:
if field == 'profile':
r = int(s)
else:
r = np.float(s)
fmt = self.sp.field_format[field]
if 'f' in fmt:
s = self.floatFixFormat(r, fmt)
else:
s = fmt.format(r)
if len(s) == self.sp.field_width[field] and not np.isinf(r):
if field == 'vitesse' and (r < 0 or s.strip() == '0.00'):
output = None
else:
output = s
else:
output = None
except:
output = None
return output
def ConvStrToValidTypes(self, str_):
str_ = str_.replace('Error in ','')
str_ = str_.replace(' ','')
if str_ == '':
result = None
elif str_.isdigit():
result = int(str_)
elif self.isFloat(str_):
result = np.float(str_)
elif str_.capitalize() == 'True':
result = True
elif str_.capitalize() == 'False':
result = False
elif str_.find('(') >= 0:
try:
str_ = str_.replace('[','')
str_ = str_.replace(']','')
str_ = str_.strip('[]()')
result = [(float(s.split(',')[0]),float(s.split(',')[1])) for s in str_.split('),(')]
except:
result = None
elif str_.find(',') >= 0:
try:
str_ = str_.replace('[','')
str_ = str_.replace(']','')
result = [float(i) for i in str_.split(',')]
except:
result = None
else:
result = str_
return result
def save_par_in_file(self, field, value, path, help_=None):
if self.isValidFilename(path):
if os.path.isfile(path):
f = open(path, 'r')
lines = f.readlines()[::-1]
f.close()
else:
lines = []
j = 0
found = False
while ( j < len(lines) ) and ( not found ):
line = str(lines[j])
if line.find(field) == 0:
if type(value) is str:
s0 = ' = \''
s1 = '\'\n'
else:
s0 = ' = '
s1 = '\n'
line = '# ' + line + field + s0 + value + s1
lines[j] = line
found = True
break
j += 1
if not found:
if help_ is not None:
lines.insert(0, '\n# ' + help_ + '\n')
lines.insert(0, field + ' = ' + value + '\n')
lines = lines[::-1]
f = open(path, 'w')
f.writelines(lines)
f.close()
def save_cont_pars(self):
file_choices = "Python files (*.py) (*.py);;Text files (*.txt *.dat) (*.txt *.dat);;All Files (*) (*)"
filename = self.sp.config_file.split('/')[-1]
path = unicode(QtGui.QFileDialog.getSaveFileName(self, 'Save to file', filename, file_choices))
if path:
if os.path.isfile(path):
f = open(path, 'r')
lines = f.readlines()[::-1]
f.close()
else:
lines = []
for i in range(0, self.table.rowCount()):
field = str(self.table.item(i,0).text())
value = str(self.table.item(i,1).text())
help_ = str(self.table.item(i,2).text().toUtf8())
help_ = help_.replace('\xce\xb2', 'beta')
help_ = help_.replace('\xe2\x81\xbb\xc2\xb3', '-3')
help_ = help_.replace('\xce\xb1', 'alpha')
help_ = help_.replace('\xce\xbb/5000 \xe2\x84\xab', 'lambda/5000 A')
j = 0
found = False
while ( j < len(lines) ) and ( not found ):
line = str(lines[j])
if line.find(field) == 0:
k = line.find('#')
if k > 0:
comment = ' ' + line[k:]
else:
comment = '\n'
line = field + ' = ' + value + comment
lines[j] = line
found = True
break
j += 1
if not found:
lines.insert(0, '\n# ' + help_ + '\n')
lines.insert(0, field + ' = ' + value + '\n')
lines = lines[::-1]
f = open(path, 'w')
f.writelines(lines)
f.close()
def get_shifts_from_profile(self, profile_key):
if profile_key not in self.sp.emis_profiles:
profile_key = '1'
vel = self.sp.emis_profiles[profile_key]['vel']
par_list = self.sp.emis_profiles[profile_key]['params']
shift_list = []
for item in par_list:
shift = np.float(item[2])
intensity = np.float(item[1])
if item[0]=='G' and ( intensity > 0.2 ):
shift_list.append(shift)
shift_list.sort()
return shift_list, vel
def plot_tick_at(self, wavelength, ion, line_num):
if self.green_tick_shown:
self.on_draw()
color = 'green'
ion = ion.replace('_',' ').strip()
to_select = (self.sp.liste_raies['num'] == np.int(line_num))
vitesse = self.sp.liste_raies[to_select]['vitesse']
profile_key = str(self.sp.liste_raies[to_select]['profile'][0])
shift_list, vel = self.get_shifts_from_profile(profile_key)
line_num = line_num.strip().strip('0')
# label = ion + ' (' + line_num.strip() + ')'
label = ion + ' {:.2f}'.format(wavelength)
posTick = self.getTickPosOfSelectedLine()
y1, y2 = self.get_line_tick_lim(posTick)
k = self.sp.get_conf('line_tick_ax')
if not (k == 1 and self.residual_GroupBox.isChecked()):
k = 0
if len(shift_list) > 0:
if posTick == 0:
ys1 = 2*y1-y2
ys2 = y1
ym = y1
else:
ys1 = y2
ys2 = 2*y2-y1
ym = y2
if k == 0:
yy1 = self.y1_plot_lims[0] + ym*(self.y1_plot_lims[1] - self.y1_plot_lims[0])
else:
yy1 = self.y3_plot_lims[0] + ym*(self.y3_plot_lims[1] - self.y3_plot_lims[0])
current_legend_loc = self.sp.legend_loc
f = 0.15
r = (self.x_plot_lims[1] - self.x_plot_lims[0])/2
if wavelength - self.x_plot_lims[0] < 2*r*f:
current_legend_loc = 1
if self.x_plot_lims[1] - wavelength < 2*r*f:
current_legend_loc = 2
self.fig.axes[k].axvline( wavelength, y1, y2, color = color, linestyle = 'solid', linewidth = 2.5 )
wave_shifts = -vitesse*wavelength*shift_list / CST.CLIGHT * 1e5 + wavelength*vel / CST.CLIGHT * 1e5
if len(wave_shifts) > 0:
max_wave_shift = max(abs(wave_shifts))
else:
max_wave_shift = 0
# Ticks for the profiles components are not shown if they are within 1000*f percent of the x-axis width.
f = 0.001
if max_wave_shift > f*(self.x_plot_lims[1] - self.x_plot_lims[0]):
x1 = (wavelength - self.x_plot_lims[0])/(self.x_plot_lims[1] - self.x_plot_lims[0])
for shift in wave_shifts:
self.fig.axes[k].axvline( wavelength+shift, ys1, ys2, color = color, linestyle = '--', linewidth = 2.5 )
x2 = (wavelength + shift - self.x_plot_lims[0])/(self.x_plot_lims[1] - self.x_plot_lims[0])
self.fig.axes[k].axhline( yy1, x1, x2, color = color, linestyle = '-', linewidth = 1.0 )
if self.addGreenTickToLegend:
self.fig.axes[k].step( [0,0], [0,100], color = color, linestyle = 'solid', label = label, linewidth = 2.5 )
self.fig.axes[k].legend(loc=current_legend_loc, fontsize=self.sp.legend_fontsize)
self.fig.canvas.draw()
self.green_tick_shown = True
self.magenta_tick_shown = False
def show_line_info_dialog(self):
def get_window_size_and_position():
if self.line_info_dialog is None:
font = QtGui.QFont()
width = QtGui.QFontMetrics(font).width('='*120)
self.line_info_dialog_width = width
self.line_info_dialog_height = 470
sG = QtGui.QApplication.desktop().screenGeometry()
self.line_info_dialog_x = sG.width()-self.line_info_dialog_width
self.line_info_dialog_y = 0
else:
self.line_info_dialog_width = self.line_info_dialog.width()
self.line_info_dialog_height = self.line_info_dialog.height()
self.line_info_dialog_x = self.line_info_dialog.pos().x()
self.line_info_dialog_y = self.line_info_dialog.pos().y()
def save_initial_plot_pars():
self.init_line_num = self.line_info_box.text()
self.init_ion = self.ion_box.text()
self.init_xmin = self.xlim_min_box.text()
self.init_xmax = self.xlim_max_box.text()
self.init_y1min = self.y1lim_min_box.text()
self.init_y1max = self.y1lim_max_box.text()
self.init_y3min = self.y3lim_min_box.text()
self.init_y3max = self.y3lim_max_box.text()
self.init_legend_fontsize = self.sp.legend_fontsize
self.init_legend_loc = self.sp.legend_loc
def toggle_statusbar():
self.showStatusBar = not self.showStatusBar
statusBar.setVisible(self.showStatusBar)
def redo_initial_plot():
self.line_info_box.setText(self.init_line_num)
self.ion_box.setText(self.init_ion)
self.xlim_min_box.setText(self.init_xmin)
self.xlim_max_box.setText(self.init_xmax)
self.y1lim_min_box.setText(self.init_y1min)
self.y1lim_max_box.setText(self.init_y1max)
self.y3lim_min_box.setText(self.init_y3min)
self.y3lim_max_box.setText(self.init_y3max)
self.sp.legend_fontsize = self.init_legend_fontsize
self.sp.legend_loc = self.init_legend_loc
self.set_plot_limits_and_draw()
#self.save_from_lim_boxes()
#self.draw_ion()
def do_reset():
self.curr_line_num = self.init_line_num
get_info(self.curr_line_num)
fill_line_info_table()
redo_initial_plot()
def toggle_show_satellites():
self.show_satellites = (self.show_satellites + 1)%3
fill_line_info_table()
def on_click():
item = self.line_info_table.currentItem()
row = item.row()
col = item.column()
s = item.text()
if col == col_ion:
ion = self.line_info_table.item(row, col).text()
self.ion_box.setText(ion)
self.draw_ion()
if not self.isFloat(s):
return
if col in [col_num, col_ref] and int(s) != 0:
self.curr_line_num = s
get_info(self.curr_line_num)
self.line_info_box.setText(self.curr_line_num)
fill_line_info_table()
def on_doubleClick():
item = self.line_info_table.currentItem()
row = item.row()
col = item.column()
s = item.text()
if col == col_ion:
ion = self.line_info_table.item(row, col).text()
self.ion_box.setText(ion)
self.draw_ion()
if not self.isFloat(s):
return
if col in [col_num, col_ref] and int(s) != 0:
self.curr_line_num = s
get_info(self.curr_line_num)
self.line_info_box.setText(self.curr_line_num)
fill_line_info_table()
def on_itemClicked():
# to avoid blinking with itemSelectionChanged
item = self.line_info_table.currentItem()
if item == self.selected_item:
on_itemSelectionChanged()
def on_itemSelectionChanged():
if self.green_tick_shown:
self.on_draw()
self.green_tick_shown = False
item = self.line_info_table.currentItem()
if item == None:
self.draw_ion()
return
self.selected_item = item
row = item.row()
col = item.column()
s = item.text()
l_shift_refline = np.float(self.sp.fieldStrFromLine(self.refline,'l_shift'))
if col == col_wave:
wavelength = np.float(s)
ion = str(self.line_info_table.item(row, col_ion).text())
line_num = str(self.line_info_table.item(row, col_num).text())
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
if wavelength > min_wave and wavelength < max_wave:
l_shift = np.float(self.line_info_table.item(row, col_lshift).text())
wavelength = wavelength + l_shift + l_shift_refline
r = (self.x_plot_lims[1] - self.x_plot_lims[0])/2
f = 0.05
if (wavelength < self.x_plot_lims[0] + f*r) or (wavelength > self.x_plot_lims[1] - f*r):
if wavelength-r < min_wave:
self.x_plot_lims = (min_wave-r*f, min_wave-r*f+2*r)
elif wavelength+r > max_wave:
self.x_plot_lims = (max_wave+r*f-2*r , max_wave+r*f)
else:
self.x_plot_lims = (wavelength-r,wavelength+r)
if not self.axes_fixed:
self.update_lim_boxes()
self.restore_axes()
self.plot_tick_at(wavelength, ion, line_num)
elif wavelength == 1:
if str(self.line_info_table.item(row, col_ref).text()) == '0000000000000':
satellites = self.satellites
else:
satellites = self.sp.read_satellites(self.sp.phyat_file, int(line_num))
satellites = add_satellites_of_subreferences(satellites)
SelectedSatellites = []
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
for i in range(0, len(satellites)):
wavelength = np.float(self.sp.fieldStrFromLine(satellites[i],'lambda'))
if (wavelength > min_wave) and (wavelength < max_wave):
SelectedSatellites.append(satellites[i])
satellites = SelectedSatellites
self.plot_line_ticks_for(satellites, ion, line_num, self.refline)
def isRefLine(line):
s = self.sp.fieldStrFromLine(line,'ref').strip()
if s == '0000000000000':
return True
else:
return False
def isSubRefLine(line):
wavelength = np.float(self.sp.fieldStrFromLine(line,'lambda'))
if not isRefLine(line) and (wavelength < 2.0):
return True
else:
return False
def fill_data(i, line, cat=''):
if line == None:
return
editableCols = []
if self.sp.get_conf('qt_allow_editing_lines', False):
if cat == 'sat':
if do_cosmetics:
editableCols = ['l_shift', 'i_cor', 'profile', 'vitesse', 'comment']
else:
editableCols = []
elif cat == 'subref':
if do_cosmetics:
editableCols = ['i_cor', 'comment']
else:
editableCols = []
elif cat == 'ref':
editableCols = ['l_shift', 'i_cor', 'i_rel', 'profile', 'vitesse', 'comment']
for j in range(0,len(fieldItems)):
s = self.sp.fieldStrFromLine(line, fieldItems[j])
s = s.strip()
if j == col_ion:
if self.show_true_ions:
s = self.sp.true_ion(s).replace('_',' ').strip()
isPseudoIon = self.sp.isPseudoIon(s)
if j == fieldItems.index('proc'):
if isRefLine(line):
s = ''
elif isPseudoIon:
s = ''
else:
s = self.sp.process[s]
item = QtGui.QTableWidgetItem(s)
if fieldItems[j] in editableCols:
item.setBackgroundColor(self.editableCells_bg_color)
else:
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.line_info_table.setItem(i,j,item)
def fill_text(i, text):
item = QtGui.QTableWidgetItem(text)
item.setFlags(item.flags() ^ (QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled))
item.setBackgroundColor(self.readOnlyCells_bg_color)
item.setTextAlignment(QtCore.Qt.AlignBottom)
item.setTextColor(QtCore.Qt.blue)
self.line_info_table.setItem(i,0,item)
self.line_info_table.setSpan(i,0,2,len(fieldItems))
def add_satellites_of_subreferences(satellites):
subref_list = []
all_satellites = satellites
for sat_line in satellites:
if isSubRefLine(sat_line):
subref_list.append(sat_line)
i = 0
while i < len(subref_list):
sat_line_num = self.sp.fieldStrFromLine(subref_list[i],'num')
new_satellites = self.sp.read_satellites(self.sp.phyat_file, int(sat_line_num))
for line in new_satellites:
if isSubRefLine(line):
subref_list.append(line)
i += 1
for line in new_satellites:
if not line in all_satellites:
all_satellites.append(line)
return all_satellites
def get_info(line_num):
line = None
refline = None
subrefline = None
LineList = []
if int(line_num) == 0:
return
while refline == None:
refline = self.sp.read_line(self.sp.fic_model, int(line_num))
if refline is None:
if do_cosmetics:
curr_line = self.sp.read_line(self.sp.fic_cosmetik, int(line_num))
else:
curr_line = None
if self.sp.cosmetic_line_ok(curr_line) is not True:
curr_line = None
if curr_line == None:
curr_line = self.sp.read_line(self.sp.phyat_file, int(line_num))
LineList.append(curr_line)
line_num = self.sp.fieldStrFromLine(curr_line,'ref')
if len(LineList) > 0:
if isSubRefLine(LineList[0]):
subrefline = LineList[:1]
else:
line = LineList[0]
if len(LineList) > 1:
subrefline = LineList[1:]
if subrefline is not None:
n_subref = len(subrefline)
else:
n_subref = 0
subsatellites = []
for k in range(0, n_subref):
subsat = []
subrefline_num = self.sp.fieldStrFromLine(subrefline[k], 'num')
subsat = self.sp.read_satellites(self.sp.phyat_file, int(subrefline_num))
n_subsat = len(subsat)
if do_cosmetics:
for i in range(0,n_subsat):
sat_line = subsat[i]
sat_line_num = int(self.sp.fieldStrFromLine(sat_line,'num'))
cosmetic_line = self.sp.read_line(self.sp.fic_cosmetik, sat_line_num)
if cosmetic_line is not None:
subsat[i] = cosmetic_line
subsatellites = subsatellites + subsat
subsatellites = add_satellites_of_subreferences(subsatellites)
n_subsat = len(subsatellites)
if refline is not None:
refline_num = self.sp.fieldStrFromLine(refline,'num')
satellites = self.sp.read_satellites(self.sp.phyat_file, int(refline_num))
satellites = add_satellites_of_subreferences(satellites)
n_sat = len(satellites)
if do_cosmetics:
for i in range(0,n_sat):
sat_line = satellites[i]
sat_line_num = int(self.sp.fieldStrFromLine(sat_line,'num'))
cosmetic_line = self.sp.read_line(self.sp.fic_cosmetik, sat_line_num)
if cosmetic_line is not None:
satellites[i] = cosmetic_line
else:
n_sat = 0
if line is None and refline is None:
title = 'Error in line info dialog'
msg = 'Line number not found.'
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
self.line = line
self.subrefline = subrefline
self.refline = refline
self.subsatellites = subsatellites
self.satellites = satellites
self.n_sat = n_sat
self.n_subsat = n_subsat
self.n_subref = n_subref
def do_sort(lines):
waves = []
for i in range(0,len(lines)):
waves.append(self.sp.fieldStrFromLine(lines[i], 'lambda'))
lines = [x for _,x in sorted(zip(waves,lines))]
return lines
def fill_line_info_table():
self.line_info_table.blockSignals(True)
line = self.line
subrefline = self.subrefline
refline = self.refline
subsatellites = self.subsatellites
satellites = self.satellites
n_sat = self.n_sat
n_subsat = self.n_subsat
n_subref = self.n_subref
SelectedSatellites = []
SelectedSubSatellites = []
if self.show_satellites == 0:
n_sat = 0
n_subsat = 0
else:
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
for i in range(0, len(satellites)):
wavelength = np.float(self.sp.fieldStrFromLine(satellites[i],'lambda'))
if self.show_satellites == 2 or \
(self.show_satellites == 1 and (wavelength > min_wave) and (wavelength < max_wave)):
SelectedSatellites.append(satellites[i])
for i in range(0, len(subsatellites)):
wavelength = np.float(self.sp.fieldStrFromLine(subsatellites[i],'lambda'))
if self.show_satellites == 2 or \
(self.show_satellites == 1 and (wavelength > min_wave) and (wavelength < max_wave)):
SelectedSubSatellites.append(subsatellites[i])
n_sat = len(SelectedSatellites)
n_subsat = len(SelectedSubSatellites)
self.line_info_table.clearContents()
self.line_info_table.setRowCount(n_sat+n_subsat+20)
self.line_info_table.clearSpans()
k = 0
sat_list = []
if line is not None:
fill_text(k,'Line:')
k += 2
fill_data(k, line, 'sat')
k += 1
if subrefline is not None:
fill_text(k,'Subreference line:')
k += 2
for i in range(0,n_subref):
fill_data(k, subrefline[i], 'subref')
k += 1
if n_subsat > 0:
SelectedSubSatellites = do_sort(SelectedSubSatellites)
fill_text(k, str(n_subsat) + ' satellites:')
sat_list.append([k,n_subsat])
k += 2
for i in range(0,n_subsat):
if isSubRefLine(SelectedSubSatellites[i]):
fill_data(k+i, SelectedSubSatellites[i], 'subref')
else:
fill_data(k+i, SelectedSubSatellites[i], 'sat')
k += n_subsat
fill_text(k,'Reference line:')
k += 2
fill_data(k, refline, 'ref')
k += 1
if n_sat > 0:
SelectedSatellites = do_sort(SelectedSatellites)
fill_text(k, str(n_sat) + ' satellites:')
sat_list.append([k,n_sat])
k += 2
for i in range(0,n_sat):
if isSubRefLine(SelectedSatellites[i]):
fill_data(k+i, SelectedSatellites[i], 'subref')
else:
fill_data(k+i, SelectedSatellites[i], 'sat')
k += n_sat
self.line_info_table.setRowCount(k)
self.line_info_table.resizeColumnsToContents()
self.line_info_table.resizeRowsToContents()
self.line_info_table.blockSignals(False)
self.line_info_table.blockSignals(True)
if self.show_satellites == 1:
s0 = ' (in the synthesis range)'
elif self.show_satellites == 2:
s0 = ' (in the entire database and including subreferences)'
else:
s0 = ''
for i in sat_list:
k = i[0]
n = i[1]
fill_text(k, str(n) + ' satellites:' + s0)
self.line_info_table.blockSignals(False)
def on_itemChanged():
self.line_info_table.blockSignals(True)
item = self.line_info_table.currentItem()
if not (item.flags() & QtCore.Qt.ItemIsEditable):
self.line_info_table.blockSignals(False)
return
row = item.row()
col = item.column()
s = str(item.text())
value = self.rightFormat(s, fieldItems[col])
if value != None:
self.line_info_table.setItem(row, col, QtGui.QTableWidgetItem(value.strip()))
self.line_info_table.item(row, col).setBackgroundColor(self.editableCells_bg_color)
save_change(row,col)
else:
self.line_info_table.item(row, col).setBackgroundColor(QtGui.QColor('red'))
title = 'Invalid format for the ' + self.sp.field_tip[fieldItems[col]]
s0 = self.sp.field_format[fieldItems[col]]
s0 = s0[2:-1]
msg = "'" + s + "' can not be converted into the proper field format: " + s0
if col == self.sp.fields.index('vitesse'):
msg = msg + '\nor it is not a positive number.'
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
get_info(self.curr_line_num)
fill_line_info_table()
self.line_info_table.blockSignals(False)
def get_line_from_table(row):
line = ' '*85
jList = range(0,len(fieldItems))
jList.remove(col_proc)
for j in jList:
s = self.line_info_table.item(row,j).text()
width = self.sp.field_width[fieldItems[j]]
align = self.sp.field_align[fieldItems[j]]
pos = self.sp.field_pos[fieldItems[j]]
s = '{:{a}{w}s}'.format(s, a=align, w=width)
line = line[:pos] + s + line[pos:]
line = line.rstrip()
return line
def save_change(row, col):
line = get_line_from_table(row)
if isRefLine(line):
filename = self.sp.fic_model
else:
filename = self.sp.fic_cosmetik
self.sp.replace_line(filename, line)
if col != self.sp.fields.index('comment') and \
self.sp.get_conf('qt_update_after_editing_lines', False):
self.adjust()
self.nearbyLines = self.sp.get_nearby_lines(self.cursor_w1, self.cursor_w2, do_print=False)
if self.nearbyLines is not None and self.nearbyLines_dialog.isVisible():
self.fill_nearbyLines_table()
def init_lines():
self.line = None
self.subrefline = None
self.refline = None
self.subsatellites = []
self.satellites = []
self.n_sat = 0
self.n_subsat = 0
self.n_subref = 0
statusBar = QtGui.QStatusBar()
s = 'Click on \"Satellites\" to cycle the tri-state display of satellite lines:\n' \
' 1 - The satellite lines in the spectral range of the synthesis are shown; \n' \
' 2 - All satellite lines (including subreference lines and lines outside the spectral range of the synthesis) are shown. \n' \
' 3 - No satellite line is shown; \n' \
'Double-click on a line number to show the data for that line. \n' \
'Double-click on an ion to plot line ticks and spectrum for that single ion. \n' \
'Select or click on a wavelength to draw a tick at that position and recenter the spectrum if necessary. \n' \
'Click on \"Reset\" to return to the original line and plot settings. \n' \
'The green fields are editable.'
statusBar.addWidget(QtGui.QLabel(s),1)
self.showStatusBar = False
statusBar.setVisible(self.showStatusBar)
self.show_satellites = 1
get_window_size_and_position()
if self.line_info_dialog is not None:
self.line_info_dialog.close()
self.line_info_table.close()
self.line_info_dialog = QtGui.QDialog()
self.line_info_dialog.resize(self.line_info_dialog_width,self.line_info_dialog_height)
self.line_info_dialog.move(self.line_info_dialog_x,self.line_info_dialog_y)
self.line_info_table = QtGui.QTableWidget()
fieldItems = self.sp.fields
fieldNames = [ self.sp.field_abbr[item] for item in fieldItems ]
col_num = fieldItems.index('num')
col_ion = fieldItems.index('id')
col_wave = fieldItems.index('lambda')
col_proc = fieldItems.index('proc')
col_lshift = fieldItems.index('l_shift')
col_irel = fieldItems.index('i_rel')
col_icor = fieldItems.index('i_cor')
col_ref = fieldItems.index('ref')
col_prof = fieldItems.index('profile')
col_vel = fieldItems.index('vitesse')
col_comm = fieldItems.index('comment')
self.line_info_table.setColumnCount(len(fieldItems))
self.line_info_table.setHorizontalHeaderLabels(fieldNames)
if self.enable_tooltips_action.isChecked():
for j in range(0,len(fieldItems)):
self.line_info_table.horizontalHeaderItem(j).setToolTip(self.sp.field_tip[fieldItems[j]])
self.line_info_table.horizontalHeaderItem(col_vel).setText(u'\u0394v (factor)')
if self.enable_tooltips_action.isChecked():
s = 'For a reference line, it is the thermal broadening parameter, in km/s. \n' \
'For satellite line, it is the dimensionless correction factor for the thermal broadening parameter with respect to the reference line.'
self.line_info_table.horizontalHeaderItem(col_vel).setToolTip(s)
self.line_info_table.horizontalHeaderItem(col_comm).setTextAlignment(QtCore.Qt.AlignLeft)
self.line_info_table.horizontalHeaderItem(col_comm).setText(' comment')
init_lines()
do_cosmetics = self.sp.get_conf('do_cosmetik')
save_initial_plot_pars()
self.curr_line_num = self.line_info_box.text()
get_info(self.curr_line_num)
fill_line_info_table()
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Reset|
QtGui.QDialogButtonBox.Apply)
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Satellites")
if self.enable_tooltips_action.isChecked():
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setToolTip("Click to toggle the satellite lines")
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(toggle_show_satellites)
s = "Click to return to the initial states of the line info dialog and figures"
if self.enable_tooltips_action.isChecked():
self.buttonBox.button(QtGui.QDialogButtonBox.Reset).setToolTip(s)
self.buttonBox.button(QtGui.QDialogButtonBox.Reset).clicked.connect(do_reset)
self.buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
self.buttonBox.rejected.connect(self.line_info_dialog.close)
self.line_info_table.doubleClicked.connect(on_doubleClick)
self.line_info_table.itemChanged.connect(on_itemChanged)
self.selected_item = None
self.line_info_table.itemSelectionChanged.connect(on_itemSelectionChanged)
self.line_info_table.itemClicked.connect(on_itemClicked)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.line_info_table)
vbox.addWidget(self.buttonBox)
vbox.addWidget(statusBar)
self.line_info_dialog.setLayout(vbox)
self.line_info_dialog.setWindowTitle('line info dialog')
self.line_info_dialog.setWindowModality(QtCore.Qt.NonModal)
self.line_info_dialog.show()
def fill_nearbyLines_table(self):
if self.nearbyLines is None or self.nearbyLines_table is None:
return
k = self.sp.get_conf('diff_lines_by')
fieldItems = self.sp.fields
jList = range(0,len(fieldItems))
jProc = fieldItems.index('proc')
jList.remove(jProc)
if self.nearbyDialogFilterIsActive:
#selected_ions = self.sp.get_conf('selected_ions')
selected_ions = self.nearbyLines_selected_ions
selected_true_ions = [self.sp.true_ion(ion) for ion in selected_ions]
nearbyLines = []
for line in self.nearbyLines:
ion = str(line[fieldItems.index('id')]).strip()
true_ion = self.sp.true_ion(ion)
selectThisIon = (( ion in selected_ions or true_ion in selected_ions ) and k == 1) or (true_ion in selected_true_ions and k != 1)
if selectThisIon:
nearbyLines.append(line)
else:
nearbyLines = self.nearbyLines
self.nearbyLines_table.setRowCount(len(nearbyLines))
for i in range(0,len(nearbyLines)):
ion = self.sp.true_ion(nearbyLines[i][fieldItems.index('id')])
for j in jList:
if j > jProc:
k = j - 1
else:
k = j
fmt = self.sp.field_format[fieldItems[j]]
s = fmt.format(nearbyLines[i][k])
s = str(s).strip()
if j == fieldItems.index('num'):
if self.sp.isPseudoIon(ion):
proc_str = ''
else:
proc_str = self.sp.process[s[-9]]
if j == fieldItems.index('id'):
if self.show_true_ions:
s = self.sp.true_ion(s).replace('_',' ').strip()
item = QtGui.QTableWidgetItem(s)
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.nearbyLines_table.setItem(i,j,item)
item = QtGui.QTableWidgetItem(proc_str)
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.nearbyLines_table.setItem(i,jProc,item)
self.nearbyLines_table.resizeColumnsToContents()
self.nearbyLines_table.resizeRowsToContents()
self.nearbyLines_table.clearSelection()
def show_nearbyLines_dialog(self):
def get_window_size_and_position():
if self.nearbyLines_dialog is None:
font = QtGui.QFont()
width = QtGui.QFontMetrics(font).width('='*120)
self.nearbyLines_dialog_width = width
self.nearbyLines_dialog_height = 470
sG = QtGui.QApplication.desktop().screenGeometry()
self.nearbyLines_dialog_x = sG.width()-self.nearbyLines_dialog_width
self.nearbyLines_dialog_y = sG.height()-self.nearbyLines_dialog_height
else:
self.nearbyLines_dialog_width = self.nearbyLines_dialog.width()
self.nearbyLines_dialog_height = self.nearbyLines_dialog.height()
self.nearbyLines_dialog_x = self.nearbyLines_dialog.pos().x()
self.nearbyLines_dialog_y = self.nearbyLines_dialog.pos().y()
def do_reset():
self.curr_line_num = self.init_nearby_line_num
#get_info(self.curr_line_num)
#fill_line_info_table()
self.nearbyDialogFilterIsActive = True
#self.nearbyLines_selected_ions = []
toggle_filter()
redo_initial_plot()
def toggle_filter():
self.nearbyLines_selected_ions = []
if not self.nearbyDialogFilterIsActive:
get_selected_ions()
if len(self.nearbyLines_selected_ions) > 0:
self.nearbyDialogFilterIsActive = True
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('background-color:red;')
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setText('Deactivate ion filter')
else:
QtGui.QMessageBox.critical(self, 'nearby lines dialog: ion filter', 'No ion selected.', QtGui.QMessageBox.Ok )
else:
self.nearbyDialogFilterIsActive = False
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('')
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setText('Filter selected ions')
self.fill_nearbyLines_table()
def save_initial_plot_pars():
self.init_nearby_line_num = self.line_info_box.text()
self.init_nearby_ion = self.ion_box.text()
self.init_nearby_xmin = self.xlim_min_box.text()
self.init_nearby_xmax = self.xlim_max_box.text()
self.init_nearby_y1min = self.y1lim_min_box.text()
self.init_nearby_y1max = self.y1lim_max_box.text()
self.init_nearby_y3min = self.y3lim_min_box.text()
self.init_nearby_y3max = self.y3lim_max_box.text()
self.init_nearby_legend_fontsize = self.sp.legend_fontsize
self.init_nearby_legend_loc = self.sp.legend_loc
def redo_initial_plot():
#self.line_info_box.setText(self.init_line_num)
self.ion_box.setText(self.init_nearby_ion)
self.xlim_min_box.setText(self.init_nearby_xmin)
self.xlim_max_box.setText(self.init_nearby_xmax)
self.y1lim_min_box.setText(self.init_nearby_y1min)
self.y1lim_max_box.setText(self.init_nearby_y1max)
self.y3lim_min_box.setText(self.init_nearby_y3min)
self.y3lim_max_box.setText(self.init_nearby_y3max)
self.sp.legend_fontsize = self.init_nearby_legend_fontsize
self.sp.legend_loc = self.init_nearby_legend_loc
self.set_plot_limits_and_draw()
def toggle_statusbar():
self.showStatusBar = not self.showStatusBar
statusBar.setVisible(self.showStatusBar)
def on_doubleClick():
item = self.nearbyLines_table.currentItem()
row = item.row()
col = item.column()
if col in [col_num, col_ref]:
self.line_info_box.setText(item.text())
self.show_line_info_dialog()
elif col == col_ion:
self.ion_box.setText(item.text())
self.draw_ion()
def on_itemClicked():
# to avoid blinking with itemSelectionChanged
item = self.nearbyLines_table.currentItem()
if item == self.selected_item:
on_itemSelectionChanged()
def on_itemSelectionChanged():
item = self.nearbyLines_table.currentItem()
self.selected_item = item
row = item.row()
col = item.column()
if col == col_wave:
wavelength = np.float(item.text())
l_shift = np.float(self.nearbyLines_table.item(row,col_lshift).text())
wavelength = wavelength + l_shift
line_num = str(self.nearbyLines_table.item(row,col_num).text())
ion = str(self.nearbyLines_table.item(row,col_ion).text())
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
r = (self.x_plot_lims[1] - self.x_plot_lims[0])/2
f = 0.05
if (wavelength < self.x_plot_lims[0] + f*r) or (wavelength > self.x_plot_lims[1] - f*r):
if wavelength-r < min_wave:
self.x_plot_lims = (min_wave-r*f, min_wave-r*f+2*r)
elif wavelength+r > max_wave:
self.x_plot_lims = (max_wave+r*f-2*r , max_wave+r*f)
else:
self.x_plot_lims = (wavelength-r,wavelength+r)
if not self.axes_fixed:
self.update_lim_boxes()
self.restore_axes()
self.plot_tick_at(wavelength, ion, line_num)
else:
if self.green_tick_shown:
self.on_draw()
self.green_tick_shown = False
def do_header_clicked(col):
if col == col_ion:
self.toggle_show_true_ions()
self.fill_nearbyLines_table()
def do_header_doubleClicked(col):
sort = fieldItems[col]
if sort == self.nearbyLines_sort_by:
self.nearbyLines_sort_reverse = not self.nearbyLines_sort_reverse
else:
self.nearbyLines_sort_reverse = False
self.nearbyLines_sort_by = sort
self.sort_nearbyLines(sort, self.nearbyLines_sort_reverse)
self.fill_nearbyLines_table()
def get_selected_ions():
selectedItems = self.nearbyLines_table.selectedItems()
selected_ions = []
for item in selectedItems:
col = item.column()
if col == col_ion:
ion = str(item.text())
if not ion in selected_ions:
selected_ions.append(ion)
if len(selected_ions) > 0:
self.nearbyLines_selected_ions = selected_ions
else:
#self.nearbyLines_selected_ions = self.sp.get_conf('selected_ions')
self.nearbyLines_selected_ions = []
def do_selection():
selectedItems = self.nearbyLines_table.selectedItems()
selected_ions = []
selected_lines = []
for item in selectedItems:
col = item.column()
if col == col_ion:
ion = str(item.text())
if not ion in selected_ions:
selected_ions.append(ion)
if col in [col_num, col_ref]:
line = item.text()
selected_lines.append(line)
if len(selected_ions) > 0:
s = ''
for ion in selected_ions:
s = s + ion + ', '
s = s[:-2]
self.ion_box.setText(s)
self.draw_ion()
if len(selected_lines) > 0:
s = selected_lines[0]
self.line_info_box.setText(s)
self.line_info()
get_window_size_and_position()
self.nearbyLines_dialog = QtGui.QDialog()
self.nearbyLines_dialog.resize(self.nearbyLines_dialog_width, self.nearbyLines_dialog_height)
self.nearbyLines_dialog.move(self.nearbyLines_dialog_x,self.nearbyLines_dialog_y)
statusBar = QtGui.QStatusBar()
s = 'Double-click on a line number (or select the line number and press \"Apply\") to show line info dialog. \n' \
'Double-click on an ion to plot line ticks and spectrum for that single ion. \n' \
'Click or select a wavelength to draw a tick at that position. \n' \
'Select multiple ions (using click, Shift+click, and Ctrl+click) and press \"Plot selected ions\" plot line ticks and spectra for a list of ions. \n' \
'Click on the ion header to select all ions. \n' \
'Double-click on a column header to sort the table; Double-click again to toggle between ascending and descending order. \n' \
'Click on \"Reset\" to return to the original selected ions and plot settings. \n' \
'Click on \"Filter selected ions\" to activate/deactivate ion selection.'
statusBar.addWidget(QtGui.QLabel(s),1)
self.showStatusBar = False
statusBar.setVisible(self.showStatusBar)
self.nearbyLines_table = QtGui.QTableWidget()
self.nearbyLines_table.setRowCount(len(self.nearbyLines))
fieldItems = self.sp.fields
fieldNames = [ self.sp.field_abbr[item] for item in fieldItems ]
col_num = fieldItems.index('num')
col_ion = fieldItems.index('id')
col_wave = fieldItems.index('lambda')
col_proc = fieldItems.index('proc')
col_lshift = fieldItems.index('l_shift')
col_irel = fieldItems.index('i_rel')
col_icor = fieldItems.index('i_cor')
col_ref = fieldItems.index('ref')
col_prof = fieldItems.index('profile')
col_vel = fieldItems.index('vitesse')
col_comm = fieldItems.index('comment')
self.nearbyLines_table.setColumnCount(len(fieldNames))
self.nearbyLines_table.setHorizontalHeaderLabels(fieldNames)
if self.enable_tooltips_action.isChecked():
for j in range(0,len(fieldItems)):
self.nearbyLines_table.horizontalHeaderItem(j).setToolTip(self.sp.field_tip[fieldItems[j]])
self.nearbyLines_table.horizontalHeaderItem(col_comm).setTextAlignment(QtCore.Qt.AlignLeft)
self.nearbyLines_table.horizontalHeaderItem(col_vel).setText(u'\u0394v')
if self.enable_tooltips_action.isChecked():
s = u'\u0394v is the thermal broadening parameter of the line, in km/s. \n' \
'For a single Gaussian profile, it is the half-width of the line at the level of 1/e of the peak, \n' \
'related to the full-width at half maximum and the Gaussian standard deviation by:\n\n' \
u' \u0394v = FWHM/(2(ln2)^\u00BD) = FWHM/1.665\n' \
u' \u0394v = \u221A2 \u03C3\n'
self.nearbyLines_table.horizontalHeaderItem(col_vel).setToolTip(s)
self.nearbyLines_table.horizontalHeaderItem(col_comm).setText(' comment')
#self.nearbyDialogFilterIsActive = False
self.fill_nearbyLines_table()
save_initial_plot_pars()
self.buttonBox_nearbyLines = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Reset|
QtGui.QDialogButtonBox.RestoreDefaults|
QtGui.QDialogButtonBox.Apply|
QtGui.QDialogButtonBox.Close)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setText('Filter selected ions')
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Apply).setText('Plot selected ions')
self.buttonBox_nearbyLines.rejected.connect(self.nearbyLines_dialog.close)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_selection)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Reset).clicked.connect(do_reset)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).clicked.connect(toggle_filter)
self.nearbyLines_table.doubleClicked.connect(on_doubleClick)
self.nearbyLines_table.itemSelectionChanged.connect(on_itemSelectionChanged)
self.nearbyLines_table.itemClicked.connect(on_itemClicked)
self.nearbyLines_table.verticalHeader().sectionDoubleClicked.connect(do_selection)
#self.nearbyLines_table.horizontalHeader().sectionClicked.connect(do_header_clicked)
self.nearbyLines_table.horizontalHeader().sectionDoubleClicked.connect(do_header_doubleClicked)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.nearbyLines_table)
vbox.addWidget(self.buttonBox_nearbyLines)
vbox.addWidget(statusBar)
self.nearbyLines_dialog.setLayout(vbox)
s = 'nearby line dialog: list of lines between {0:.2f} and {1:.2f} angstroms'.format(self.sp.cursor_w1, self.sp.cursor_w2)
self.nearbyLines_dialog.setWindowTitle(s)
self.nearbyLines_dialog.setWindowModality(QtCore.Qt.NonModal)
self.cursor_w1 = self.sp.cursor_w1
self.cursor_w2 = self.sp.cursor_w2
if self.nearbyDialogFilterIsActive:
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('background-color:red;')
else:
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('')
self.nearbyLines_dialog.show()
def cont_dialog(self):
Pars = [ ( 'cont_unred' , 'Set to True if reddening is to be applied to the continuum' ),
( 'cont_edens' , u'Electron density, in cm\u207B\u00B3' ),
( 'cont_hi_t' , 'Temperature for the H I continuum, in K' ),
( 'cont_hi_i' , u'Intensity of the H I continuum (in theory, intensity of H\u03B2)' ),
( 'cont_hei_t' , 'Temperature for the He I continuum, in K' ),
( 'cont_hei_i' , 'Intensity of the He I continuum (in theory, intensity of He I 4471)' ),
( 'cont_heii_t' , 'Temperature for the He II continuum, in K' ),
( 'cont_heii_i' , 'Intensity of the He II continuum (in theory, intensity of He I 4686)' ),
( 'cont_bb_t' , 'Temperature of the blackbody continuum, in K' ),
( 'cont_bb_i' , 'Intensity of the blackbody continuum' ),
( 'cont_pl_alpha' , u'Index \u03B1 of the power-law continuum F = I*(\u03BB/5000 \u212B)**\u03B1' ),
( 'cont_pl_i' , 'Intensity I of the power-law continuum' ),
( 'cont_user_table' , 'Interpolation table for the user-defined continuum' ),
( 'cont_user_func' , 'Interpolation function for the user-defined continuum' ) ]
def toggle_statusbar():
self.showStatusBar = not self.showStatusBar
statusBar.setVisible(self.showStatusBar)
def get_window_size_and_position():
if self.cont_pars_dialog is None:
self.cont_pars_dialog_width = 800
self.cont_pars_dialog_height = 460
sG = QtGui.QApplication.desktop().screenGeometry()
self.cont_pars_dialog_x = sG.width()-self.cont_pars_dialog_width
self.cont_pars_dialog_y = sG.height()-self.cont_pars_dialog_height
self.cont_pars_dialog_x = 0
self.cont_pars_dialog_y = 0
else:
self.cont_pars_dialog_width = self.cont_pars_dialog.width()
self.cont_pars_dialog_height = self.cont_pars_dialog.height()
self.cont_pars_dialog_x = self.cont_pars_dialog.pos().x()
self.cont_pars_dialog_y = self.cont_pars_dialog.pos().y()
def set_conf_from_table(row):
s = str(self.table.item(row,1).text())
value = self.ConvStrToValidTypes(s)
if value != None:
self.sp.set_conf(Pars[row][0], value)
self.table.setItem(row, 1, QtGui.QTableWidgetItem(str(value)))
else:
self.table.setItem(row, 1, QtGui.QTableWidgetItem('Error in ' + s))
def on_itemChanged():
self.table.blockSignals(True)
item = self.table.currentItem()
row = item.row()
s = str(item.text())
value = self.ConvStrToValidTypes(s)
if value != None:
self.sp.set_conf(Pars[row][0], value)
#if isinstance(value, basestring):
# value = '\'{}\''.format(value)
self.table.setItem(row, 1, QtGui.QTableWidgetItem(str(value)))
self.table.item(row, 1).setBackgroundColor(self.editableCells_bg_color)
self.cont_par_changed = True
else:
self.table.setItem(row, 1, QtGui.QTableWidgetItem('Error in ' + s))
self.table.item(row, 1).setBackgroundColor(QtGui.QColor('red'))
self.table.blockSignals(False)
get_window_size_and_position()
self.cont_pars_dialog = QtGui.QDialog()
self.cont_pars_dialog.resize(self.cont_pars_dialog_width, self.cont_pars_dialog_height)
self.cont_pars_dialog.move(self.cont_pars_dialog_x, self.cont_pars_dialog_y)
statusBar = QtGui.QStatusBar()
s = 'Click on \"Save\" to write the continuum parameters to a file. \n' \
'Click on \"Update\" to adjust the synthesis to the changes in the continuum parameters. \n' \
'The green fields are editable.'
statusBar.addWidget(QtGui.QLabel(s),1)
self.showStatusBar = False
statusBar.setVisible(self.showStatusBar)
self.table = QtGui.QTableWidget()
self.table.setRowCount(len(Pars))
self.table.setColumnCount(3)
self.table.setHorizontalHeaderLabels([ 'parameter', 'value', 'help' ])
for j in range(0,len(Pars)):
item = QtGui.QTableWidgetItem(Pars[j][0])
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.table.setItem(j,0,item)
value = self.sp.get_conf(Pars[j][0])
#if isinstance(value, basestring):
# value = '\'{}\''.format(value)
item = QtGui.QTableWidgetItem(str(value))
#item = QtGui.QTableWidgetItem(str(self.sp.get_conf(Pars[j][0])))
item.setBackgroundColor(self.editableCells_bg_color)
self.table.setItem(j,1,item)
item = QtGui.QTableWidgetItem(Pars[j][1])
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.table.setItem(j,2,item)
self.table.resizeColumnsToContents()
self.table.resizeRowsToContents()
if self.table.columnWidth(1) > 300:
self.table.setColumnWidth(1,300)
self.table.itemChanged.connect(on_itemChanged)
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Save|
QtGui.QDialogButtonBox.Apply|
QtGui.QDialogButtonBox.Close)
self.buttonBox.button(QtGui.QDialogButtonBox.Help).setDefault(True)
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setText('Update')
if self.enable_tooltips_action.isChecked():
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setToolTip('Click to update synthesis with changes in the continuum parameters.')
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(self.adjust)
self.buttonBox.rejected.connect(self.cont_pars_dialog.close)
self.buttonBox.button(QtGui.QDialogButtonBox.Save).clicked.connect(self.save_cont_pars)
self.buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.table)
vbox.addWidget(self.buttonBox)
vbox.addWidget(statusBar)
self.cont_pars_dialog.setLayout(vbox)
self.cont_pars_dialog.setWindowTitle('Continuum parameters')
self.cont_pars_dialog.show()
def get_line_tick_lim(self, line_tick_pos):
if line_tick_pos == 1:
y1 = 0.43
y2 = 0.57
else:
if line_tick_pos == 2:
y1 = 0.05
y2 = 0.19
else:
y1 = 0.81
y2 = 0.95
return y1, y2
def getTickPosOfSelectedLine(self):
posTick = self.sp.get_conf('line_tick_pos_selectedLine',3)
if posTick not in [0,1,2]:
posOtherTicks = self.sp.get_conf('line_tick_pos')
if posTick == 4:
if posOtherTicks == 2:
posTick = 0
else:
posTick = 2
else:
posTick = posOtherTicks
return posTick
def plot_line_ticks_for(self, satellites, ion, line_num, refline):
k = self.sp.get_conf('line_tick_ax')
if not (k == 1 and self.residual_GroupBox.isChecked()):
k = 0
posTick = self.getTickPosOfSelectedLine()
y1, y2 = self.get_line_tick_lim(posTick)
if len(satellites) > 0:
if ( k == 0 ):
self.sp.plot_line_ticks_for(satellites, ion, line_num, refline, self.axes, y1, y2, self.x_plot_lims[0], self.x_plot_lims[1], self.addGreenTickToLegend)
elif ( k == 1 ):
self.sp.plot_line_ticks_for(satellites, ion, line_num, refline, self.axes3, y1, y2, self.addGreenTickToLegend)
elif ( k == 2 ):
self.sp.plot_line_ticks_for(satellites, ion, line_num, refline, self.axes2, 0.2, 0.8, self.addGreenTickToLegend)
self.green_tick_shown = True
self.canvas.draw()
def on_draw(self, show_legend=True):
log_.debug('Entering on_drawn', calling=self.calling)
if self.sp is None:
log_.debug('Np sp in on_drawn', calling=self.calling)
return
if self.axes is None:
log_.debug('Calling make_axes from on_draw (self.axes is None)', calling=self.calling)
self.call_on_draw=False
self.make_axes()
self.init_axes()
log_.debug('back from make_axes from on_draw', calling=self.calling)
self.call_on_draw=True
if self.do_save:
self.save_axes()
self.axes.cla()
self.sp.plot_ax1(self.axes, show_legend)
k = self.sp.get_conf('line_tick_ax')
if self.show_line_ticks_action.isChecked() and ( k == 0 ):
y1, y2 = self.get_line_tick_lim(self.sp.get_conf('line_tick_pos'))
self.sp.plot_line_ticks(self.axes, y1, y2, self.x_plot_lims[0], self.x_plot_lims[1], show_legend=show_legend)
if self.sp.get_conf('cont_plot', False):
self.sp.plot_conts(self.axes)
if self.residual_GroupBox.isChecked():
self.axes3.cla()
self.sp.plot_ax3(self.axes3, show_legend)
if self.show_line_ticks_action.isChecked() and ( k == 1 ):
y1, y2 = self.get_line_tick_lim(self.sp.get_conf('line_tick_pos'))
self.sp.plot_line_ticks(self.axes3, y1, y2)
if self.show_line_ticks_action.isChecked() and ( k == 2 ):
self.axes2.cla()
# self.sp.plot_ax2(self.axes2)
self.sp.plot_line_ticks(self.axes2, 0.2, 0.8)
if self.residual_GroupBox.isChecked():
self.axes3.set_xlabel(r'Wavelength ($\AA$)')
self.axes3.set_ylabel(r'Residual')
#elif self.show_line_ticks_action.isChecked() and self.sp.get_conf(') and self.axes2 is not None:
elif self.show_line_ticks_action.isChecked() and ( k == 2 ):
self.axes2.set_xlabel(r'Wavelength ($\AA$)')
else:
self.axes.set_xlabel(r'Wavelength ($\AA$)')
self.axes.set_ylabel(r'F$_\lambda$')
self.restore_axes()
# self.update_lim_boxes()
if self.adjust_fig_action.isChecked():
plt.tight_layout(0.1)
self.canvas.draw()
self.statusBar().showMessage('Redraw is finished.', 4000)
log_.debug('Exit on_drawn', calling=self.calling)
self.magenta_tick_shown = False
def show_lines_clicked(self):
if self.lineIDs_GroupBox.isChecked():
self.show_line_ticks_action.setChecked(True)
self.plot_lines_action.setChecked(True)
self.sp.set_conf('plot_lines_of_selected_ions', True)
self.set_ion()
else:
self.show_line_ticks_action.setChecked(False)
self.plot_lines_action.setChecked(False)
self.sp.set_conf('plot_lines_of_selected_ions', False)
self.make_axes()
def line_tick_color_clicked(self):
color = QtGui.QColorDialog.getColor()
self.sp.set_conf('line_tick_color', str(color.name()))
if self.show_line_ticks_action.isChecked():
self.make_axes()
def toggle_show_true_ions(self):
self.show_true_ions = not self.show_true_ions
def toggle_legend_clicked(self):
fontsize_list = ['small', 'medium', 'large']
i = fontsize_list.index(self.sp.legend_fontsize) + 1
if i == len(fontsize_list):
self.sp.legend_fontsize = fontsize_list[0]
self.sp.legend_loc = (self.sp.legend_loc)%2+1
else:
self.sp.legend_fontsize = fontsize_list[i]
self.make_axes()
def enable_tooltips_action_clicked(self):
if self.enable_tooltips_action.isChecked():
self.enableToolTips()
self.sp.set_conf('qt_enable_tooltips', True)
log_.debug('Tooltips enabled', calling=self.calling)
else:
self.disableToolTips()
self.sp.set_conf('qt_enable_tooltips', False)
log_.debug('Tooltips disabled', calling=self.calling)
def adjust_fig_action_clicked(self):
if self.adjust_fig_action.isChecked():
self.sp.set_conf('fig_adjust', True)
log_.debug('Adjust figure enabled', calling=self.calling)
else:
self.fig.subplots_adjust(hspace=self.sp.get_conf('fig_hspace'),
bottom=self.sp.get_conf('fig_bottom'),
right=self.sp.get_conf('fig_right'),
top=self.sp.get_conf('fig_top'),
left=self.sp.get_conf('fig_left'))
log_.debug('Adjust figure disabled', calling=self.calling)
self.draw_ion()
def show_uncor_obs_action_clicked(self):
if self.show_uncor_obs_action.isChecked():
self.sp.show_uncor_spec = True
else:
self.sp.show_uncor_spec = False
self.set_plot_limits_and_draw()
def disableToolTips(self):
self.lineIDs_GroupBox.setToolTip('')
self.residual_GroupBox.setToolTip('')
self.run_button.setToolTip('')
self.adjust_button.setToolTip('')
self.line_info_box.setToolTip('')
self.ebv_box.setToolTip('')
self.obj_velo_box.setToolTip('')
self.sp_min_box.setToolTip('')
self.sp_max_box.setToolTip('')
self.xlim_min_box.setToolTip('')
self.xlim_max_box.setToolTip('')
self.y1lim_min_box.setToolTip('')
self.y1lim_max_box.setToolTip('')
self.y3lim_min_box.setToolTip('')
self.y3lim_max_box.setToolTip('')
self.fix_axes_cb.setToolTip('')
self.cut_cb.setToolTip('')
self.ion_cb.setToolTip('')
self.sp_norm_box.setToolTip('')
self.resol_box.setToolTip('')
self.cut2_box.setToolTip('')
self.ion_box.setToolTip('')
self.line_sort_menu.setToolTip('')
self.line_field_menu.setToolTip('')
self.line_tick_ax_menu.setToolTip('')
self.line_tick_pos_menu.setToolTip('')
self.diff_lines_menu.setToolTip('')
self.verbosity_menu.setToolTip('')
self.style_menu.setToolTip('')
def enableToolTips(self):
self.lineIDs_GroupBox.setToolTip(self.lineIDs_GroupBox_ToolTip)
self.residual_GroupBox.setToolTip(self.residual_GroupBox_ToolTip)
self.run_button.setToolTip(self.run_button_ToolTip)
self.adjust_button.setToolTip(self.adjust_button_ToolTip)
self.line_info_box.setToolTip(self.line_info_box_ToolTip)
self.ebv_box.setToolTip(self.ebv_box_ToolTip)
self.obj_velo_box.setToolTip(self.obj_velo_box_ToolTip)
self.sp_min_box.setToolTip(self.sp_min_box_ToolTip)
self.sp_max_box.setToolTip(self.sp_max_box_ToolTip)
self.xlim_min_box.setToolTip(self.xlim_min_box_ToolTip)
self.xlim_max_box.setToolTip(self.xlim_max_box_ToolTip)
self.y1lim_min_box.setToolTip(self.y1lim_min_box_ToolTip)
self.y1lim_max_box.setToolTip(self.y1lim_max_box_ToolTip)
self.y3lim_min_box.setToolTip(self.y3lim_min_box_ToolTip)
self.y3lim_max_box.setToolTip(self.y3lim_max_box_ToolTip)
self.fix_axes_cb.setToolTip(self.fix_axes_cb_ToolTip)
self.cut_cb.setToolTip(self.cut_cb_ToolTip)
self.ion_cb.setToolTip(self.ion_cb_ToolTip)
self.sp_norm_box.setToolTip(self.sp_norm_box_ToolTip)
self.resol_box.setToolTip(self.resol_box_ToolTip)
self.cut2_box.setToolTip(self.cut2_box_ToolTip)
self.ion_box.setToolTip(self.ion_box_ToolTip)
self.line_sort_menu.setToolTip(self.line_sort_menu_ToolTip)
self.line_field_menu.setToolTip(self.line_field_menu_ToolTip)
self.line_tick_ax_menu.setToolTip(self.line_tick_ax_menu_ToolTip)
self.line_tick_pos_menu.setToolTip(self.line_tick_pos_menu_ToolTip)
self.diff_lines_menu.setToolTip(self.diff_lines_menu_ToolTip)
self.verbosity_menu.setToolTip(self.verbosity_menu_ToolTip)
self.style_menu.setToolTip(self.style_menu_ToolTip)
def show_line_ticks_action_clicked(self):
self.set_ion()
if self.plot_lines_action.isChecked():
self.sp.set_conf('plot_lines_of_selected_ions', True)
else:
self.sp.set_conf('plot_lines_of_selected_ions', False)
if self.show_line_ticks_action.isChecked() or self.plot_lines_action.isChecked():
self.lineIDs_GroupBox.setChecked(True)
else:
self.lineIDs_GroupBox.setChecked(False)
self.make_axes()
def plot_cont_action_clicked(self):
if self.plot_cont_action.isChecked():
self.sp.set_conf('cont_plot', True)
else:
self.sp.set_conf('cont_plot', False)
self.on_draw()
def ion_cb_changed(self):
if self.ion_cb.isChecked():
self.sp.set_conf('show_selected_ions_only', True)
self.selected_ions_action.setChecked(True)
else:
self.sp.set_conf('show_selected_ions_only', False)
self.selected_ions_action.setChecked(False)
self.make_axes()
def cut_cb_changed(self):
if self.cut_cb.isChecked():
self.sp.set_conf('show_selected_intensities_only', True)
self.selected_intensities_action.setChecked(True)
else:
self.sp.set_conf('show_selected_intensities_only', False)
self.selected_intensities_action.setChecked(False)
self.make_axes()
def selected_lines_clicked(self):
if self.selected_ions_action.isChecked():
self.sp.set_conf('show_selected_ions_only', True)
self.ion_cb.setChecked(True)
else:
self.sp.set_conf('show_selected_ions_only', False)
self.ion_cb.setChecked(False)
if self.selected_intensities_action.isChecked():
self.sp.set_conf('show_selected_intensities_only', True)
self.cut_cb.setChecked(True)
else:
self.sp.set_conf('show_selected_intensities_only', False)
self.cut_cb.setChecked(False)
self.make_axes()
def diff_lines_by_process_clicked(self):
if self.diff_lines_by_process_action.isChecked():
self.sp.set_conf('diff_lines_by_process', True)
else:
self.sp.set_conf('diff_lines_by_process', False)
self.make_axes()
def editing_lines_clicked(self):
if self.editing_lines_action.isChecked():
self.sp.set_conf('qt_allow_editing_lines', True)
else:
self.sp.set_conf('qt_allow_editing_lines', False)
def update_lines_clicked(self):
if self.update_lines_action.isChecked():
self.sp.set_conf('qt_update_after_editing_lines', True)
else:
self.sp.set_conf('qt_update_after_editing_lines', False)
def cycle_forwards_ions(self):
j = self.sp.get_conf('index_of_current_ion')
selected_ions = self.sp.get_conf('selected_ions')
if j in range(-1, len(self.sp.selected_ions_data)-1):
j += 1
else:
j = -1
self.sp.set_conf('index_of_current_ion', j)
self.set_refline_to_info_box(j)
self.make_axes()
def cycle_backwards_ions(self):
j = self.sp.get_conf('index_of_current_ion')
selected_ions = self.sp.get_conf('selected_ions')
if j in range(0, len(self.sp.selected_ions_data)):
j -= 1
else:
j = len(self.sp.selected_ions_data)-1
self.sp.set_conf('index_of_current_ion', j)
self.set_refline_to_info_box(j)
self.make_axes()
def show_line_ticks_from_file(self):
file_choices = "Text files (*.txt *.dat) (*.txt *.dat);;Tex files (*.tex) (*.tex);;CSV files (*.csv) (*.csv);;All Files (*) (*)"
if self.tick_file is None:
path = ''
else:
path = self.tick_file
path = unicode(QtGui.QFileDialog.getOpenFileName(self, 'Open file', path, file_choices))
if path:
self.tick_file = path
else:
return
f = open(self.tick_file, 'r')
lines = f.readlines()
f.close()
color = 'darkmagenta'
posTick = self.sp.get_conf('line_tick_pos')
y1, y2 = self.get_line_tick_lim(posTick)
k = self.sp.get_conf('line_tick_ax')
if k == 2:
k = 1
y1 = 0.2
y2 = 0.8
elif k == 1 and self.residual_GroupBox.isChecked():
k = 1
else:
k = 0
dy = (y2-y1)*0.30
if self.magenta_tick_shown == True:
self.draw_ion()
for line in lines:
line = line.strip()
line = line.split(' ')[0]
if self.isFloat(line):
wavelength = np.float(line)
if wavelength > self.x_plot_lims[0] and wavelength < self.x_plot_lims[1]:
self.fig.axes[k].axvline( wavelength, y1+dy, y2-dy, color = color, linestyle = 'solid', linewidth = 1.5 )
self.fig.axes[k].step( [0,0], [0,100], color = color, linestyle = 'solid', linewidth = 1.5, label = self.tick_file.split('/')[-1] )
self.fig.axes[k].legend(loc=self.sp.legend_loc, fontsize=self.sp.legend_fontsize)
self.fig.canvas.draw()
self.magenta_tick_shown = True
def residual_box_clicked(self):
if self.residual_GroupBox.isChecked():
self.sp.set_conf('qt_plot_residuals', True)
else:
self.sp.set_conf('qt_plot_residuals', False)
self.make_axes()
def make_axes(self):
log_.debug('Entering make_axes', calling=self.calling)
if self.call_on_draw:
self.save_axes()
self.fig.clf()
i_ax1 = 0
i_ax2 = 1
i_ax3 = 2
rspan_ax1 = 4
rspan_ax2 = 1
rspan_ax3 = 4
n_subplots = rspan_ax1
k = self.sp.get_conf('line_tick_ax')
ShowAx2 = self.show_line_ticks_action.isChecked() and ( k == 2 )
if ShowAx2:
i_ax2 = n_subplots
n_subplots += rspan_ax2
if self.residual_GroupBox.isChecked():
i_ax3 = n_subplots
n_subplots += rspan_ax3
if self.axes is not None:
del(self.axes)
self.axes = plt.subplot2grid((n_subplots,1), (i_ax1,0), rowspan=rspan_ax1)
self.sp.ax1 = self.axes
if ShowAx2:
if self.axes2 is not None:
del(self.axes2)
self.axes2 = plt.subplot2grid((n_subplots,1), (i_ax2,0), rowspan=rspan_ax2, sharex=self.axes )
self.axes2.tick_params( left='off',labelleft='off' )
self.sp.ax2 = self.axes2
self.axes.get_xaxis().set_visible(False)
else:
self.axes2 = None
self.sp.ax2 = None
if self.residual_GroupBox.isChecked():
if self.axes3 is not None:
del(self.axes3)
self.axes3 = plt.subplot2grid((n_subplots,1), (i_ax3,0), rowspan=rspan_ax3, sharex=self.axes )
self.sp.ax3 = self.axes3
if ShowAx2:
self.axes2.get_xaxis().set_visible(False)
self.axes.get_xaxis().set_visible(False)
else:
self.axes3 = None
self.sp.ax3 = self.axes3
self.fig.subplots_adjust(hspace=self.sp.get_conf('fig_hspace'),
bottom=self.sp.get_conf('fig_bottom'),
right=self.sp.get_conf('fig_right'),
top=self.sp.get_conf('fig_top'),
left=self.sp.get_conf('fig_left'))
if self.call_on_draw:
log_.debug('Calling on_draw from make_axes', calling=self.calling)
self.do_save = False
self.on_draw()
self.do_save = True
log_.debug('Exit make_axes', calling=self.calling)
def init_axes(self):
self.x_plot_lims = self.sp.get_conf('x_plot_lims')
if self.x_plot_lims is None:
self.x_plot_lims = (np.min(self.sp.w), np.max(self.sp.w))
self.y1_plot_lims = self.sp.get_conf('y1_plot_lims')
if self.y1_plot_lims is None:
mask = (self.sp.w_ori > self.x_plot_lims[0]) & (self.sp.w_ori < self.x_plot_lims[1])
r = 1.2
if self.sp.sp_synth_lr is None:
a = np.min(self.sp.f[mask])
b = np.max(self.sp.f[mask])
else:
a = np.min(self.sp.sp_synth_lr[mask])
b = np.max(self.sp.sp_synth_lr[mask])
self.y1_plot_lims = ((a*(1+r)+b*(1-r))/2, (a*(1-r)+b*(1+r))/2)
self.y2_plot_lims = self.sp.get_conf('y2_plot_lims')
if self.y2_plot_lims is None:
self.y2_plot_lims = (-0.5, 1.5)
self.y3_plot_lims = self.sp.get_conf('y3_plot_lims')
if self.y3_plot_lims is None:
mask = (self.sp.w_ori > self.x_plot_lims[0]) & (self.sp.w_ori < self.x_plot_lims[1])
r = 1.2
if self.sp.sp_synth_lr is None:
self.y3_plot_lims = (-1,1)
else:
a = np.min((self.sp.f_ori - self.sp.sp_synth_lr)[mask])
b = np.max((self.sp.f_ori - self.sp.sp_synth_lr)[mask])
self.y3_plot_lims = ((a*(1+r)+b*(1-r))/2, (a*(1-r)+b*(1+r))/2)
log_.debug('Axes initialized. IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
self.print_axes()
def save_axes(self):
if self.axes is not None:
self.x_plot_lims = self.axes.get_xlim()
self.y1_plot_lims = self.axes.get_ylim()
self.xscale = self.axes.get_xscale()
self.yscale = self.axes.get_yscale()
if self.axes2 is not None:
self.y2_plot_lims = self.axes2.get_ylim()
if self.axes3 is not None:
self.y3_plot_lims = self.axes3.get_ylim()
self.sp.save_axes()
log_.debug('Axes saved. IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
self.print_axes()
def restore_axes(self):
if self.x_plot_lims is not None:
if self.axes is not None:
self.axes.set_xlim(self.x_plot_lims)
log_.debug('X-axes restored to {}'.format(self.axes.get_xlim()), calling=self.calling)
else:
log_.debug('axes is None', calling=self.calling)
else:
log_.debug('x_plot_lims is None', calling=self.calling)
if self.y1_plot_lims is not None:
if self.axes is not None:
self.axes.set_ylim(self.y1_plot_lims)
if self.y2_plot_lims is not None:
if self.axes2 is not None:
self.axes2.set_ylim(self.y2_plot_lims)
if self.y3_plot_lims is not None:
if self.axes3 is not None:
self.axes3.set_ylim(self.y3_plot_lims)
if self.xscale is not None:
self.axes.set_xscale(self.xscale)
log_.debug('X scale set to {}'.format(self.xscale))
if self.yscale is not None:
self.axes.set_yscale(self.yscale)
log_.debug('Y scale set to {}'.format(self.yscale))
log_.debug('Axes restored. IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
self.print_axes()
def print_axes(self):
log_.debug('lims: {} {} {} {}'.format(self.x_plot_lims, self.y1_plot_lims, self.y2_plot_lims, self.y3_plot_lims), calling=self.calling)
log_.debug('Axes IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
log_.debug(' IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
def exec_init(self):
if self.init_file_name is None:
self.get_init_filename()
if self.init_file_name:
self.statusBar().showMessage('Running synthesis ...')
QtGui.QApplication.processEvents()
self.start_spectrum()
self.do_save = False
self.on_draw()
self.do_save = True
self.restore_axes()
self.update_lim_boxes()
self.save_parameters_file = None
else:
log_.warn('A filename must be given', calling=self.calling)
sys.exit('An initialization filename must be given')
def get_init_filename(self):
file_choices = "Python initialization files (*init.py) (*init.py);;Python files (*.py) (*.py);;All files (*) (*)"
title = 'Open pySSN initialization file'
init_file = str(QtGui.QFileDialog.getOpenFileName(self, title, self.init_file_name, file_choices))
if init_file and os.path.isfile(init_file):
self.init_file_name = init_file
else:
self.init_file_name = ''
def select_init(self):
old_name = self.init_file_name
self.get_init_filename()
if self.init_file_name:
self.exec_init()
else:
self.init_file_name = old_name
def save_pars(self):
path = self.sp.get_conf('save_parameters_filename')
keys = self.sp.default_keys
if '__builtins__' in keys:
keys.remove('__builtins__')
keys.sort()
with open(path, 'w') as f:
for key in keys:
value = self.sp.conf[key]
if isinstance(value, basestring):
value = '\"{}\"'.format(value)
f.write('{} = {}\n'.format(key, value))
self.statusBar().showMessage('Parameters saved to file %s' % path, 4000)
def save_pars_as(self):
if self.save_parameters_file is None:
path = self.init_file_name
else:
path = self.save_parameters_file
keys = self.sp.default_keys
keys_to_be_removed = ['__builtins__', 'plot_magenta', 'label_magenta', 'plot_cyan', 'label_cyan']
for key in keys_to_be_removed:
if key in keys:
keys.remove(key)
keys.sort()
file_choices = "pySSN initialization files (*init.py) (*init.py);;Python files (*.py) (*.py);;All files (*) (*)"
title = 'Save synthesis and plot parameters'
selectedFilter = 'pySSN initialization files (*init.py) (*init.py)'
path = unicode(QtGui.QFileDialog.getSaveFileName(self, title, path, file_choices, selectedFilter))
if path:
with open(path, 'w') as f:
for key in keys:
if key == 'instr_prof':
value = self.sp.format_instr_prof()
else:
value = self.sp.conf[key]
if isinstance(value, basestring):
value = '\"{}\"'.format(value)
f.write('{} = {}\n'.format(key, value))
self.save_parameters_file = path
self.statusBar().showMessage('Parameters saved to file %s' % path, 4000)
def teste_instr_prof(self, prof):
if prof is None:
return 'not defined'
keys = prof.keys()
keys.remove('comment')
if not 'width' in keys:
return 'The parameter \'width\' was not found.'
if prof['width'] == 0.0:
return 'The value of \'width\' can not be zero'
if not (self.sp.get_key_indexes('Bb', prof)==self.sp.get_key_indexes('Br', prof)==
self.sp.get_key_indexes('beta', prof)==self.sp.get_key_indexes('alpha', prof)):
return 'Invalid indexes por the parameters \'Bb\', \'Br\', \'alpha\', or \'beta\''
if not all((type(prof[key])==float or type(prof[key])==int) for key in keys):
return 'The values of parameters must be numbers.'
return ''
def apply_instr_prof(self):
def do_update():
path = str(prof_box.toPlainText()).strip()
try:
user_module = {}
exec(path) in user_module
prof = user_module['instr_prof']
self.sp.set_conf('instr_prof', prof)
log_.message('new instrumental profile is ok', calling = self.calling)
except:
title = 'Error reading instrument profile'
msg = 'Unable to read instrumental profile'
path = None
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
msg = self.teste_instr_prof(prof)
if not msg:
self.update_profile()
else:
title = 'Error in the instrument profile'
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
def toggle_statusbar():
self.showHelpBrowser = not self.showHelpBrowser
helpBrowser.setVisible(self.showHelpBrowser)
if self.showHelpBrowser:
self.instr_prof_dialog.resize(self.instr_prof_dialog_width, 2.1*self.instr_prof_dialog_height)
else:
self.instr_prof_dialog.resize(self.instr_prof_dialog_width, self.instr_prof_dialog_height)
def get_window_size_and_position():
if self.instr_prof_dialog is None:
font = QtGui.QFont("Courier")
width = QtGui.QFontMetrics(font).width('='*80)
height = 15*QtGui.QFontMetrics(font).height()
self.instr_prof_dialog_width = width
self.instr_prof_dialog_height = height
sG = QtGui.QApplication.desktop().screenGeometry()
self.instr_prof_dialog_x = sG.width()-self.instr_prof_dialog_width
self.instr_prof_dialog_y = sG.height()
else:
if not self.showHelpBrowser:
self.instr_prof_dialog_width = self.instr_prof_dialog.width()
self.instr_prof_dialog_height = self.instr_prof_dialog.height()
self.instr_prof_dialog_x = self.instr_prof_dialog.pos().x()
self.instr_prof_dialog_y = self.instr_prof_dialog.pos().y()
self.showHelpBrowser = False
get_window_size_and_position()
self.instr_prof_dialog = QtGui.QDialog()
self.instr_prof_dialog.setWindowFlags(self.instr_prof_dialog.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
self.instr_prof_dialog.resize(self.instr_prof_dialog_width, self.instr_prof_dialog_height)
self.instr_prof_dialog.move(self.instr_prof_dialog_x,self.instr_prof_dialog_y)
self.instr_prof_dialog.setWindowTitle('instrument profile dialog')
prof_box = QtGui.QTextEdit()
prof_box.setFontFamily("Courier")
prof_box.setText('instr_prof = ' + self.sp.format_instr_prof())
linkLabel = QtGui.QLabel('<a href="https://github.com/Morisset/pySSN/wiki">More help online</a>')
linkLabel.setOpenExternalLinks(True)
helpBrowser = QtGui.QTextBrowser()
# text=open('instr_prof.html').read()
# This text should go to a file open with text=open('instr_prof.html').read()
text = """<title> Instrumental profile help</title>
<p>The instrumental profile if defined by the <a href="https://en.wikibooks.org/wiki/Python_Programming/Dictionaries">python dictionary</a> <b>instr_prof</b>.
<p>The main component of the instrumental profile is set by the parameter <b>width</b>, which is the only indispensable parameters.</p>
<p>If <b>width</b> > 0, the main component profile follows a <a href="https://en.wikipedia.org/wiki/Normal_distribution">Gaussian distribution</a>, P ∝ exp(-(λ/<b>width</b>)<sup>2</sup>).
In this case, <b>width</b> is related to the normal full-width at half maximum by <b>width</b> = FWHM/(2(ln2)<sup>1/2</sup>) = FWHM/1.665.</p>
<p>If <b>width</b> < 0, the main component profile follows a <a href="https://en.wikipedia.org/wiki/rectangular_distribution">rectangular distribution</a>, P = 1 for -|<b>width</b>|/2 < λ < |<b>width</b>|/2, and P = 0 for all other values of λ.</p>
<p>A variable number of optional components can be included, each defined by four parameters, <b>Bb</b>, <b>Br</b>, <b>alpha</b>, and <b>beta</b>, and following P ∝ <b>B</b>exp(-(λ/<b>beta</b>)<sup><b>alpha</b></sup>).
<b>Bb</b> and <b>Br</b> are the intensity scale parameters for the bluish and reddish sides of the profile, respectively.</p>
<p>If more than one optional component is in use, the parameters must be indexed as <b>alpha_1</b> <b>alpha_2</b>, etc.</p>
Special cases for the optional components:
<ul>
<li><b>alpha</b> = 2 produces a <a href="https://en.wikipedia.org/wiki/Normal_distribution">Gaussian distribution</a>.
<li><b>alpha</b> = 2, <b>Bb</b> = 0 (or <b>Br</b> = 0) produces a <a href="https://en.wikipedia.org/wiki/Half_normal_distribution">half-Gaussian distribution</a>.
<li><b>alpha</b> = 1 produces an <a href="https://en.wikipedia.org/wiki/Exponential_distribution">exponential distribution</a>.
</ul>
<p>A comment may be included in <b>instr_prof</b>.</p>
<p>Examples:</p>
<ol>
<li>instr_prof = {'width': 0.5}<br>
<li>instr_prof = {'width': 0.5, 'comment': 'Gaussian profle'}<br>
<li>Example: instr_prof = {'width': 0.5, 'Bb':0.00016, 'Br':9e-05, 'beta': 2.2, 'alpha': 0.45}<br>
<li>instr_prof = {'width': 0.5, 'Bb_1':0.00016, 'Br_1':9e-05, 'beta_1': 2.2, 'alpha_1': 0.45, 'Bb_2': 0.0014, 'Br_2':0.001, 'beta_2': 1.4, 'alpha_2': 0.75}<br>
</ol>"""
helpBrowser.document().setHtml(text)
helpBrowser.setOpenExternalLinks(True)
helpBrowser.setVisible(self.showHelpBrowser)
policy = helpBrowser.sizePolicy()
policy.setVerticalStretch(20)
helpBrowser.setSizePolicy(policy)
vbox = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Apply)
buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Update")
vbox.addWidget(prof_box,0)
vbox.addWidget(buttonBox)
vbox.addWidget(linkLabel)
vbox.addWidget(helpBrowser)
buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_update)
buttonBox.rejected.connect(self.instr_prof_dialog.close)
self.instr_prof_dialog.setLayout(vbox)
self.instr_prof_dialog.setWindowModality(QtCore.Qt.NonModal)
self.instr_prof_dialog.show()
def refine_wavelengths(self):
def table2list(text):
text = str(text)
text = text.splitlines()
s = ''
for i in range(len(text)):
line = text[i].split()
if len(line) == 2 and sum([self.isFloat(x) for x in line]) == 2:
s += '({}, {}), '.format(line[0], line[1])
else:
if len(line) > 0:
title = 'Error in table'
msg = 'Error in line \'{}\'.\nEach line must have two numbers separated by whitespaces.'.format(text[i])
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return ''
s = s.strip(' ,')
if s == '':
return 'lambda_shift_table = None'
else:
return 'lambda_shift_table = [{}]'.format(s)
def toggle_table():
self.refine_wave_as_table = not self.refine_wave_as_table
if self.refine_wave_as_table:
text = str(edit_box.toPlainText()).strip()
edit_box.clear()
text = text.replace('lambda_shift_table','')
text = text.strip(' =[]')
text = text.split(')')
for i in range(len(text)-1):
line = text[i].strip(' (,')
line = line.split(',')
line = '{:<7} {}'.format(line[0].strip(),line[1].strip())
edit_box.append(line)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as list")
else:
text = table2list(edit_box.toPlainText())
if text == '':
self.refine_wave_as_table = True
return
edit_box.clear()
edit_box.setText(text)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
def do_update():
old_value = self.sp.get_conf('lambda_shift_table')
if self.refine_wave_as_table:
path = table2list(edit_box.toPlainText())
if path == 'error':
return
else:
path = str(edit_box.toPlainText()).strip()
try:
user_module = {}
exec(path) in user_module
value = user_module['lambda_shift_table']
self.sp.set_conf('lambda_shift_table', value)
log_.message('new \'lambda_shit_table\' is ok', calling = self.calling)
except:
title = 'Error'
msg = 'Unable to read \'lambda_shit_table\''
path = None
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
self.sp.show_uncor_spec = True
self.sp.init_obs()
if self.sp.read_obs_error:
self.sp.set_conf('lambda_shift_table', old_value)
if self.showErrorBox:
title = 'Error'
msg = self.sp.read_obs_error
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
else:
self.rerun()
if not self.show_uncor_obs_action.isChecked():
self.sp.show_uncor_spec = False
def toggle_help():
self.showHelpBrowser = not self.showHelpBrowser
helpBrowser.setVisible(self.showHelpBrowser)
if self.showHelpBrowser:
self.refine_wave_dialog.resize(self.refine_wave_dialog_width, 2.5*self.refine_wave_dialog_height)
else:
self.refine_wave_dialog.resize(self.refine_wave_dialog_width, self.refine_wave_dialog_height)
def get_window_size_and_position():
if self.refine_wave_dialog is None:
font = QtGui.QFont("Courier")
width = QtGui.QFontMetrics(font).width('='*80)
height = 15*QtGui.QFontMetrics(font).height()
self.refine_wave_dialog_width = width
self.refine_wave_dialog_height = height
sG = QtGui.QApplication.desktop().screenGeometry()
self.refine_wave_dialog_x = sG.width()-self.refine_wave_dialog_width
self.refine_wave_dialog_y = sG.height()
else:
if not self.showHelpBrowser:
self.refine_wave_dialog_width = self.refine_wave_dialog.width()
self.refine_wave_dialog_height = self.refine_wave_dialog.height()
self.refine_wave_dialog_x = self.refine_wave_dialog.pos().x()
self.refine_wave_dialog_y = self.refine_wave_dialog.pos().y()
self.showHelpBrowser = False
get_window_size_and_position()
self.refine_wave_dialog = QtGui.QDialog()
self.refine_wave_dialog.setWindowFlags(self.refine_wave_dialog.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
self.refine_wave_dialog.resize(self.refine_wave_dialog_width, self.refine_wave_dialog_height)
self.refine_wave_dialog.move(self.refine_wave_dialog_x,self.refine_wave_dialog_y)
self.refine_wave_dialog.setWindowTitle('wavelength-refining dialog')
edit_box = QtGui.QTextEdit()
edit_box.setFontFamily("Courier")
self.refine_wave_as_table = False
edit_box.setText('lambda_shift_table = ' + str(self.sp.get_conf('lambda_shift_table')))
linkLabel = QtGui.QLabel('<a href="https://github.com/Morisset/pySSN/wiki">More help online</a>')
linkLabel.setOpenExternalLinks(True)
helpBrowser = QtGui.QTextBrowser()
# text=open('wave_refining.html').read()
# This text should go to a file open with text=open('wave-refining').read()
text = """<title> Wavelength-refining help</title>
<p>The wavelength calibration of the observational spectrum can be refined with the use of
the <a href="https://en.wikibooks.org/wiki/Python_Programming/Lists">python list</a> <b>lambda_shift_table</b>.
Each element of this list is an ordered pair of numbers (λ, Δλ), where Δλ is the wavelength shift at the wavelength λ needed to improve the calibration, after the Doppler correction.</p>
<p>The data in <b>lambda_shit_table</b> will be linearly interpolated to provide the corrected wavelengths.
Outside the range of wavelenghts given in <b>lambda_shit_table</b>, the correction will be extrapolated to zero.</p>
<p>To set aside the wavelength-refining, set <b>lambda_shit_table</b> to None.</p>
<p>Examples:</p>
<ol>
<li><p>lambda_shift_table = [(4674, 0.05), (4690, 0.1), (9000, 1)]</p></li>
<li><p>lambda_shift_table = None (to set aside the wavelength-refining)</p></li>
</ol>
<p>Button functions:</p>
<ul>
<li><p>Click on <b><span style="color:red">Show as table</span></b> to display and edit the data contained in <b>lambda_shit_table</b> as a two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Show as list</span></b> to get back the <b>lambda_shit_table</b> list from the two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Update</span></b> to refine the wavelength calibration and redo the synthesis.</p></li>
</ul>
"""
helpBrowser.document().setHtml(text)
helpBrowser.setOpenExternalLinks(True)
helpBrowser.setVisible(self.showHelpBrowser)
policy = helpBrowser.sizePolicy()
policy.setVerticalStretch(20)
helpBrowser.setSizePolicy(policy)
vbox = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.RestoreDefaults|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Apply)
buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Update")
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
vbox.addWidget(edit_box,0)
vbox.addWidget(buttonBox)
vbox.addWidget(linkLabel)
vbox.addWidget(helpBrowser)
buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_help)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).clicked.connect(toggle_table)
buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_update)
buttonBox.rejected.connect(self.refine_wave_dialog.close)
self.refine_wave_dialog.setLayout(vbox)
self.refine_wave_dialog.setWindowModality(QtCore.Qt.NonModal)
self.refine_wave_dialog.show()
def plot_user_cont(self):
self.fig.axes[0].step( [0,0], [0,100], color = color, linestyle = 'solid', label = label, linewidth = 2.5 )
self.fig.axes[0].legend(loc=current_legend_loc, fontsize=self.sp.legend_fontsize)
self.fig.canvas.draw()
def user_cont_table2list(self, text):
text = str(text)
text = text.splitlines()
text = sorted(text)
s = ''
for i in range(len(text)):
line = text[i].split()
if len(line) == 2 and sum([self.isFloat(x) for x in line]) == 2:
s += '({}, {}), '.format(line[0], line[1])
else:
if len(line) > 0:
title = 'Error in table'
msg = 'Error in line \'{}\'.\nEach line must have two numbers separated by whitespaces.'.format(text[i])
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return ''
s = s.strip(' ,')
if s == '':
s = 'None'
else:
s = '[{}]'.format(s)
return 'cont_user_func = \'{}\'\n\ncont_user_table = {}'.format(self.sp.get_conf('cont_user_func'), s)
def update_user_cont(self):
msg = ''
old_value = self.sp.get_conf('cont_user_table')
old_kind = self.sp.get_conf('cont_user_func')
if self.interpol_cont_as_table:
path = self.user_cont_table2list(self.user_cont_editBox.toPlainText())
if path == 'error':
return
else:
path = str(self.user_cont_editBox.toPlainText()).strip()
try:
user_module = {}
exec(path) in user_module
kind = user_module['cont_user_func']
log_.message('new \'cont_user_func\' is ok', calling = self.calling)
value = user_module['cont_user_table']
log_.message('new \'cont_user_table\' is ok', calling = self.calling)
except:
msg = 'Unable to read \'cont_user_func\' or \'cont_user_table\''
path = None
kinds = {'nearest', 'zero', 'linear', 'slinear', 'quadratic', 'cubic'}
if msg == '':
if kind not in kinds:
msg = 'Invalid function'
if msg != '':
title = 'Error'
msg = 'Problem in user-defined continuum interpolation.\n{}'.format(msg)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
if old_value != value or old_kind != kind:
self.cont_par_changed = True
if value is not None and len(value) == 0:
value = None
self.sp.set_conf('cont_user_table', value)
self.sp.set_conf('cont_user_func', kind)
self.sp.update_user_cont()
self.set_plot_limits_and_draw()
self.sp.plot_conts(self.axes, ['user'])
self.canvas.draw()
else:
self.set_plot_limits_and_draw()
def user_cont_list2table(self, points):
self.user_cont_editBox.clear()
for point in points:
line = '{:<7} {}'.format(str(point[0]).strip(),str(point[1]).strip())
self.user_cont_editBox.append(line)
def user_continuum(self):
def save_initial_plot_pars():
self.init_cont_line_num = self.line_info_box.text()
self.init_cont_ion = self.ion_box.text()
self.init_cont_xmin = self.xlim_min_box.text()
self.init_cont_xmax = self.xlim_max_box.text()
self.init_cont_y1min = self.y1lim_min_box.text()
self.init_cont_y1max = self.y1lim_max_box.text()
self.init_cont_y3min = self.y3lim_min_box.text()
self.init_cont_y3max = self.y3lim_max_box.text()
self.init_cont_legend_fontsize = self.sp.legend_fontsize
self.init_cont_legend_loc = self.sp.legend_loc
self.init_cont_sel_ions_only = self.selected_ions_action.isChecked()
def redo_initial_plot():
self.line_info_box.setText(self.init_cont_line_num)
self.ion_box.setText(self.init_cont_ion)
self.xlim_min_box.setText(self.init_cont_xmin)
self.xlim_max_box.setText(self.init_cont_xmax)
self.y1lim_min_box.setText(self.init_cont_y1min)
self.y1lim_max_box.setText(self.init_cont_y1max)
self.y3lim_min_box.setText(self.init_cont_y3min)
self.y3lim_max_box.setText(self.init_cont_y3max)
self.sp.legend_fontsize = self.init_cont_legend_fontsize
self.sp.legend_loc = self.init_cont_legend_loc
self.selected_ions_action.setChecked(self.init_cont_sel_ions_only)
self.selected_lines_clicked()
self.set_plot_limits_and_draw()
def toggle_table():
self.interpol_cont_as_table = not self.interpol_cont_as_table
if self.interpol_cont_as_table:
text = str(self.user_cont_editBox.toPlainText()).strip()
text = text[text.find('[')+1:text.find(']')]
text = text.replace('\n','')
self.user_cont_editBox.clear()
text = text.split(')')
for i in range(len(text)-1):
line = text[i].strip(' (,')
line = line.split(',')
line = '{:<7} {}'.format(line[0].strip(),line[1].strip())
self.user_cont_editBox.append(line)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as list")
else:
self.get_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
self.del_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
self.on_draw()
text = self.user_cont_table2list(self.user_cont_editBox.toPlainText())
if text == '':
self.interpol_cont_as_table = True
return
self.user_cont_editBox.clear()
self.user_cont_editBox.setText(text)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
def toggle_help():
self.showHelpBrowser = not self.showHelpBrowser
helpBrowser.setVisible(self.showHelpBrowser)
if self.showHelpBrowser:
self.interpol_cont_dialog.resize(self.interpol_cont_dialog_width, 2.5*self.interpol_cont_dialog_height)
else:
self.interpol_cont_dialog.resize(self.interpol_cont_dialog_width, self.interpol_cont_dialog_height)
def get_window_size_and_position():
if self.interpol_cont_dialog is None:
font = QtGui.QFont("Courier")
width = QtGui.QFontMetrics(font).width('='*80)
height = 15*QtGui.QFontMetrics(font).height()
self.interpol_cont_dialog_width = width
self.interpol_cont_dialog_height = height
sG = QtGui.QApplication.desktop().screenGeometry()
self.interpol_cont_dialog_x = sG.width()-self.interpol_cont_dialog_width
self.interpol_cont_dialog_y = sG.height()
else:
if not self.showHelpBrowser:
self.interpol_cont_dialog_width = self.interpol_cont_dialog.width()
self.interpol_cont_dialog_height = self.interpol_cont_dialog.height()
self.interpol_cont_dialog_x = self.interpol_cont_dialog.pos().x()
self.interpol_cont_dialog_y = self.interpol_cont_dialog.pos().y()
def get_points():
self.get_user_cont_points = not self.get_user_cont_points
self.del_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
if self.get_user_cont_points:
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('background-color:red;')
self.set_plot_limits_and_draw()
self.sp.plot_conts(self.axes, ['user'])
self.canvas.draw()
if self.interpol_cont_as_table == False:
toggle_table()
else:
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
def del_points():
self.del_user_cont_points = not self.del_user_cont_points
self.get_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
if self.del_user_cont_points:
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('background-color:red;')
self.set_plot_limits_and_draw()
self.sp.plot_conts(self.axes, ['user'])
self.canvas.draw()
if self.interpol_cont_as_table == False:
toggle_table()
else:
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
def on_close():
redo_initial_plot()
self.interpol_cont_dialog.close()
def do_update():
self.get_user_cont_points = False
self.del_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
self.update_user_cont()
self.showHelpBrowser = False
get_window_size_and_position()
save_initial_plot_pars()
self.ion_box.setText('')
self.selected_ions_action.setChecked(True)
self.selected_lines_clicked()
self.set_plot_limits_and_draw()
self.interpol_cont_dialog = QtGui.QDialog()
self.interpol_cont_dialog.setWindowFlags(self.interpol_cont_dialog.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
#self.interpol_cont_dialog.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowMaximizeButtonHint | QtCore.Qt.WindowStaysOnTopHint)
self.interpol_cont_dialog.resize(self.interpol_cont_dialog_width, self.interpol_cont_dialog_height)
self.interpol_cont_dialog.move(self.interpol_cont_dialog_x,self.interpol_cont_dialog_y)
self.interpol_cont_dialog.setWindowTitle('user-defined continuum dialog')
self.user_cont_editBox = QtGui.QTextEdit()
self.user_cont_editBox.setFontFamily("Courier")
self.interpol_cont_as_table = False
self.get_user_cont_points = False
self.del_user_cont_points = False
text = 'cont_user_func = \'{}\'\n\ncont_user_table = {}'.format(str(self.sp.get_conf('cont_user_func')), self.sp.get_conf('cont_user_table'))
self.user_cont_editBox.setText(text)
linkLabel = QtGui.QLabel('<a href="https://github.com/Morisset/pySSN/wiki">More help online</a>')
linkLabel.setOpenExternalLinks(True)
helpBrowser = QtGui.QTextBrowser()
# text=open('user_continuum.html').read()
# This text should go to a file open with text=open('user_continuum').read()
text = """<title> User-defined continuum help</title>
<p>A user-defined continuum can be added to the continuum calculated from other sources (electron recombination, free-free transition, two-photom, black-body and
power-law emission). It is obtained by the interpolation of the data contained in the
<a href="https://en.wikibooks.org/wiki/Python_Programming/Lists">python list</a> <b>cont_user_table</b>. Each element of this list is an ordered pair of numbers
(λ, <i>f</i>), where <i>f</i> is the additional continuum flux at the wavelength λ.</p>
<p>The parameter <b>cont_user_func</b> defines the kind of the interpolation. Possible values are 'linear', 'quadratic', 'cubic', corresponding to linear
interpolation, second and third order spline interpolation, respectively. Outside the range of wavelenghts given in <b>cont_user_table</b>, the user continuum
component will be extrapolated to zero.</p>
<p>There are three modes of editing the interpolation control points: editing the list <b>cont_user_table</b> directly or as a two columns table, or clicking
with the mouse on the figure at the intended level of total continuum (see Button functions below). To set aside the user-defined continuum, set
<b>cont_user_table</b> to None.</p>
<p>Examples:</p>
<ol>
<li><p>cont_user_func = 'linear'<br>
cont_user_table = [(4674, 0.05), (4690, 0.1), (9000, 1)]
</p></li>
<li><p>cont_user_table = None (to set aside the user-defined continuum)</p></li>
</ol>
<p>Button functions:</p>
<ul>
<li><p>Click on <b><span style="color:red">Show as table</span></b> to display and edit the data contained in <b>cont_user_table</b> as a two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Show as list</span></b> to get back the <b>cont_user_table</b> list from the two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Add points</span></b> to activate/deactivate the mode that allows to add new controls points by mouse-clicking on the
figure. Each time a new control point is included, the interpolation is automatically updated.</p></li>
<li><p>Click on <b><span style="color:red">Del points</span></b> to activate/deactivate the mode that allows to click on the figure to delete the nearest
(in wavelength) control point. Each time a control point is deleted, the interpolation is automatically updated</p></li>
<li><p>Click on <b><span style="color:red">Update</span></b> to incorporate the changes in the user-defined continuum.</p></li>
<li><p>Click on <b><span style="color:red">Close</span></b> to close the dialog and return to the preceding plot setting.</p></li>
</ul>
"""
helpBrowser.document().setHtml(text)
helpBrowser.setOpenExternalLinks(True)
helpBrowser.setVisible(self.showHelpBrowser)
policy = helpBrowser.sizePolicy()
policy.setVerticalStretch(20)
helpBrowser.setSizePolicy(policy)
vbox = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.RestoreDefaults|
QtGui.QDialogButtonBox.Retry|
QtGui.QDialogButtonBox.Ignore|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Apply)
buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Update")
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
buttonBox.button(QtGui.QDialogButtonBox.Retry).setText("Add points")
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setText("Del points")
vbox.addWidget(self.user_cont_editBox,0)
vbox.addWidget(buttonBox)
vbox.addWidget(linkLabel)
vbox.addWidget(helpBrowser)
buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_help)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).clicked.connect(toggle_table)
buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_update)
buttonBox.button(QtGui.QDialogButtonBox.Retry).clicked.connect(get_points)
buttonBox.button(QtGui.QDialogButtonBox.Ignore).clicked.connect(del_points)
buttonBox.rejected.connect(on_close)
#self.interpol_cont_dialog.onCloseEvet(on_close)
self.interpol_cont_dialog.setLayout(vbox)
self.interpol_cont_dialog.setWindowModality(QtCore.Qt.NonModal)
self.interpol_cont_dialog.show()
def isValidFilename(self, filename):
if filename is None:
return False
try:
open(filename,'r')
return True
except IOError:
try:
open(filename, 'w')
return True
except IOError:
return False
def set_cosmetic_file(self):
file_choices = "Line cosmetic files (*cosm*.dat) (*cosm*.dat);;Data files (*.dat) (*.dat);;All files (*) (*)"
title = 'Set the line cosmetic file'
cosmetic_file = str(QtGui.QFileDialog.getSaveFileName(self, title, '', file_choices, options=QtGui.QFileDialog.DontConfirmOverwrite))
msg = "Line cosmetic file '{}' not valid!".format(cosmetic_file)
if cosmetic_file and not self.isValidFilename(cosmetic_file):
QtGui.QMessageBox.critical(self, 'pySSN', msg, QtGui.QMessageBox.Ok )
cosmetic_file = None
if cosmetic_file:
self.sp.set_conf('do_cosmetik', True)
dir_ = os.path.dirname(cosmetic_file)
if dir_ == os.getcwd():
cosmetic_file = cosmetic_file.split('/')[-1]
self.sp.set_conf('fic_cosmetik', cosmetic_file)
self.sp.fic_cosmetik = cosmetic_file
if self.sp is not None:
self.set_status_text()
if self.axes is not None:
self.adjust()
def empty_cosmetic_file(self):
if self.sp.fic_cosmetik is None or self.sp.phyat_file is None:
return
title = 'pySSN: cosmetic file'
msg = 'All lines in the cosmetic file will be removed.\nConfirm?'
ret = QtGui.QMessageBox.question(self, title, msg, QtGui.QMessageBox.Ok, QtGui.QMessageBox.Cancel )
if ret == QtGui.QMessageBox.Ok:
f = open(self.sp.fic_cosmetik, 'w')
f.close()
def order_lines(self, lines):
if lines is None:
return None
numbers = []
for line in lines:
line_num = int(self.sp.fieldStrFromLine(line,'num'))
numbers.append(line_num)
lines = [x for _,x in sorted(zip(numbers, lines))]
return lines
def remove_duplicate_lines(self, lines):
if lines is None:
return None
numbers = []
output = []
for line in lines:
line_num = int(self.sp.fieldStrFromLine(line,'num'))
if line_num not in numbers:
numbers.append(line_num)
output.append(line)
return output
def order_cosmetic_file(self):
if self.sp.fic_cosmetik is None or not os.path.isfile(self.sp.fic_cosmetik):
return
f = open(self.sp.fic_cosmetik, 'r')
cosmetic_lines = f.readlines()
f.close()
cosmetic_lines = self.order_lines(cosmetic_lines)
n0 = len(cosmetic_lines)
cosmetic_lines = self.remove_duplicate_lines(cosmetic_lines)
n1 = len(cosmetic_lines)
f = open(self.sp.fic_cosmetik, 'w')
f.writelines(cosmetic_lines)
f.close()
if n0 > n1:
s = ' and the duplicate lines removed'
else:
s = ''
msg = 'The cosmetic \'{0:}\' file was ordered{1:}.'.format(self.sp.fic_cosmetik, s)
self.statusBar().showMessage(msg, 4000)
def clean_cosmetic_file(self):
def ShowCleanMessage(UnchangedLineList):
nUL = len(UnchangedLineList)
if nUL == 1:
s1 = ''
s2 = 'was'
s3 = 'this line'
elif nUL > 1:
s1 = 's'
s2 = 'were'
s3 = 'these lines'
msgBox = QtGui.QMessageBox()
msgBox.setIcon(QtGui.QMessageBox.Question)
msgBox.title = 'pySSN: cosmetic file'
msg = '{0:} unchanged line{1:} in the cosmetic file {2:} found.'.format(nUL, s1, s2)
msgBox.setText(msg)
msgBox.setInformativeText('Do you want to delete {:}?\n'.format(s3))
detailedText = 'Unchanged line{:}:\n\n'.format(s1)
for i in UnchangedLineList:
detailedText = detailedText + str(i) + '\n'
msgBox.setDetailedText(detailedText)
DelButton = msgBox.addButton(self.tr("Delete"), QtGui.QMessageBox.ActionRole)
s = 'Delete from the cosmetic file all unchanged lines'
if self.enable_tooltips_action.isChecked():
DelButton.setToolTip(s)
msgBox.addButton(QtGui.QMessageBox.Cancel)
answer = msgBox.exec_()
if msgBox.clickedButton() == DelButton:
answer = True
else:
answer = False
return answer
if self.sp.fic_cosmetik is None or self.sp.phyat_file is None:
return
#if not self.sp.get_conf('clean_cosmetic_file'):
# return
if not os.path.isfile(self.sp.fic_cosmetik):
return
f = open(self.sp.fic_cosmetik, 'r')
cosmetic_lines = f.readlines()
f.close()
UnchangedLineList = []
ChangedLines = []
for i in range(len(cosmetic_lines)):
line_c = cosmetic_lines[i].rstrip()
line_num = int(self.sp.fieldStrFromLine(line_c,'num'))
if self.sp.cosmetic_line_unchanged(line_c):
UnchangedLineList.append(line_num)
else:
ChangedLines.append(line_c + '\n')
if len(UnchangedLineList) > 0:
ret = ShowCleanMessage(UnchangedLineList)
if ret == True:
f = open(self.sp.fic_cosmetik, 'w')
f.writelines(ChangedLines)
f.close()
else:
msg = 'No unchanged line in the cosmetic file {:}'.format(self.sp.fic_cosmetik)
self.statusBar().showMessage(msg, 4000)
def match_cosmetic_phyat_files(self):
def ShowErrorMessage():
msg = 'The wavelength or intensity in the cosmetic file does not match that in the atomic database.\n\n' \
'Do you want to try to automatically correct the cosmetic file?'
msgBox = QtGui.QMessageBox()
msgBox.setText("Error in cosmetic file for line: " + str(line_num))
msgBox.setInformativeText(msg)
msgBox.addButton(QtGui.QMessageBox.Yes)
msgBox.addButton(QtGui.QMessageBox.YesToAll)
msgBox.addButton(QtGui.QMessageBox.No)
msgBox.addButton(QtGui.QMessageBox.NoToAll)
msgBox.setDefaultButton(QtGui.QMessageBox.Yes)
answer = msgBox.exec_()
return answer
def ShowFinalMessage(nErr, nCor, nUnCor, nNfd, UnCorList, NotFound):
msgBox = QtGui.QMessageBox()
msgBox.setText('pySSN: error in cosmetic file')
if nCor > 0:
s0 = 'Rerun the synthesis to take into account the changes.\n\n'
else:
s0 = ''
if nUnCor > 0:
s1 = 'The cosmetic data for lines that still have problems will be ignored. ' \
'Do you want to delete them from the cosmetic file?'
else:
s1 = ''
msg = 'Number of lines with problems: {0:}\n' \
'Number of corrected lines: {1:}\n' \
'Number of uncorrected lines: {2:}\n' \
'Number of lines not found in the atomic database: {3:}\n\n' \
'{4:}{5:}'.format(nErr, nCor, nUnCor, nNfd, s0, s1)
msgBox.setInformativeText(msg)
if nNfd > 0:
detailedText = 'Lines not found:\n\n'
for i in NotFound:
detailedText = detailedText + i + '\n'
detailedText = detailedText + '\n'
else:
detailedText = ''
if nUnCor > 0:
detailedText = detailedText + 'Lines not corrected:\n\n'
for i in UnCorList:
detailedText = detailedText + i + '\n'
msgBox.setDetailedText(detailedText)
DelAllButton = msgBox.addButton(self.tr("Delete all"), QtGui.QMessageBox.ActionRole)
DelNotFndButton = msgBox.addButton(self.tr("delete not found"), QtGui.QMessageBox.ActionRole)
DelUncorButton = msgBox.addButton(self.tr("delete uncorrected"), QtGui.QMessageBox.ActionRole)
if self.enable_tooltips_action.isChecked():
s = 'Delete from the cosmetic file all lines that still have problems'
DelAllButton.setToolTip(s)
s = 'Delete from the cosmetic file the lines not found in the atomic database'
DelNotFndButton.setToolTip(s)
s = 'Delete from the cosmetic file the uncorrected lines'
DelUncorButton.setToolTip(s)
msgBox.addButton(QtGui.QMessageBox.Cancel)
msgBox.setMaximumHeight(16777215)
msgBox.setMinimumHeight(800)
# It does not expand! Why?
msgBox.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
msgBox.setSizeGripEnabled(True)
if nUnCor == 0:
DelUncorButton.setEnabled(False)
DelAllButton.setEnabled(False)
if nNfd == 0:
DelNotFndButton.setEnabled(False)
DelAllButton.setEnabled(False)
answer = msgBox.exec_()
if msgBox.clickedButton() == DelAllButton:
answer = ['DelNotFnd', 'DelUncor']
elif msgBox.clickedButton() == DelNotFndButton:
answer = ['DelNotFnd']
elif msgBox.clickedButton() == DelUncorButton:
answer = ['DelUncor']
else:
answer = []
return answer
if self.sp.fic_cosmetik is None:
return
if os.path.isfile(self.sp.fic_cosmetik):
cosmetik_arr, errorMsg = self.sp.read_cosmetik()
if len(errorMsg) > 0:
self.sp.do_cosmetik = False
self.sp.set_conf('do_cosmetik', False)
title = 'Error in cosmetic file: '
msg = 'Unable to read cosmetic data from file \'{}\':{}\n\nLine cosmetics will be disabled!'.format(self.sp.get_conf('fic_cosmetik'), errorMsg)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
return
ret = None
f = open(self.sp.fic_cosmetik, 'r')
cosmetic_lines = f.readlines()
f.close()
ErrorList = []
CorrectedList = []
UnCorList = []
NotFound =[]
k = self.sp.field_pos['id']
keys = [ 'lambda', 'l_shift', 'i_rel', 'i_cor' ]
for i in range(len(cosmetic_lines)):
line_c = cosmetic_lines[i].rstrip()
line_num = int(self.sp.fieldStrFromLine(line_c,'num'))
cosmeticLineOk = self.sp.cosmetic_line_ok(line_c)
if cosmeticLineOk == None:
NotFound.append(line_c[:k])
ErrorList.append(line_c[:k])
elif cosmeticLineOk == False:
ErrorList.append(line_c[:k])
if ret != QtGui.QMessageBox.YesToAll and ret != QtGui.QMessageBox.NoToAll:
ret = ShowErrorMessage()
if ret == QtGui.QMessageBox.Yes or ret == QtGui.QMessageBox.YesToAll:
CorrectedList.append(line_c[:k])
line = self.sp.read_line(self.sp.phyat_file, line_num)
line = line.rstrip()
v0 = {i: np.float(self.sp.fieldStrFromLine(line, i)) for i in keys}
v1 = {i: np.float(self.sp.fieldStrFromLine(line_c, i)) for i in keys}
l_shift = v1['lambda'] + v1['l_shift'] - v0['lambda']
i_cor = v1['i_cor'] * v1['i_rel'] / v0['i_rel']
l_shift_str = self.rightFormat(str(l_shift), 'l_shift')
i_cor_str = self.rightFormat(str(i_cor), 'i_cor')
line = self.sp.replace_field(line, 'l_shift', l_shift_str)
line = self.sp.replace_field(line, 'i_cor', i_cor_str)
log_.warn('(corrected) ' + line + '\n', calling=self.calling)
self.sp.replace_line(self.sp.fic_cosmetik, line)
else:
UnCorList.append(line_c[:k])
log_.warn('Not corrected.\n', calling=self.calling)
nErr = len(ErrorList)
nCor = len(CorrectedList)
nUnCor = len(UnCorList)
nNfd = len(NotFound)
if nErr > 0:
answer = ShowFinalMessage(nErr, nCor, nUnCor, nNfd, UnCorList, NotFound)
if 'DelNotFnd' in answer:
for i in NotFound:
self.sp.remove_line(self.sp.fic_cosmetik, int(i))
if 'DelUncor' in answer:
for i in UnCorList:
self.sp.remove_line(self.sp.fic_cosmetik, int(i))
def set_status_text(self):
if self.sp is None:
return
if self.sp.phyat_file == 'NO_phyat.dat':
self.status_text.setText('pySSN, v {}. init file: {}, No synthesis'.format(__version__,
self.sp.config_file.split('/')[-1]))
elif self.sp.get_conf('do_cosmetik'):
self.status_text.setText('pySSN, v {}. init file: {}, at. data: {}, model: {}, cosmetic: {}'.format(__version__,
self.sp.config_file.split('/')[-1],
self.sp.phyat_file.split('/')[-1],
self.sp.get_conf('fic_modele').split('/')[-1],
self.sp.get_conf('fic_cosmetik').split('/')[-1]))
else:
self.status_text.setText('pySSN, v {}. init file: {}, at. data: {}, model: {}, No cosmetic'.format(__version__,
self.sp.config_file.split('/')[-1],
self.sp.phyat_file.split('/')[-1],
self.sp.get_conf('fic_modele').split('/')[-1]))
def test_init_file(self):
if self.sp == None:
self.showErrorBox = False
self.showErrorBox = True
invalidCommands = []
if os.path.isfile(self.init_file_name):
f = open(self.init_file_name, 'r')
lines = f.readlines()
f.close()
else:
invalidCommands.append('\nFile not found')
lines = []
triple_quoted_string_found = False
newlines = []
rows = []
for i in range(len(lines)):
line = lines[i].split('#')[0].rstrip()
k = line.find('=')
if not (line.strip().startswith('#') or len(line.strip()) == 0):
if '"""' in line:
triple_quoted_string_found = not triple_quoted_string_found
if triple_quoted_string_found:
newlines.append(line.split('#')[0].rstrip())
rows.append(i+1)
else:
s = line.split('#')[0].rstrip()
if len(s.strip()) > 0:
newlines[-1] += '\n' + s
else:
if len(line) == len(line.lstrip()) and not triple_quoted_string_found:
newlines.append(line.split('#')[0].rstrip())
rows.append(i+1)
else:
s = line.split('#')[0].rstrip()
if len(s.strip()) > 0:
newlines[-1] += '\n' + s
for i in range(len(newlines)):
line = newlines[i]
line_list = line.split('\n')
if len(line_list) > 3:
line_str = line_list[0] + '\n' + line_list[1] + '\n' + line_list[2] + '\n...'
else:
line_str = line
try:
exec(line)
except IndentationError:
invalidCommands.append('\nIndentation error, line {}:\n{}'.format(rows[i],line_str))
except SyntaxError:
if '"""' in line and triple_quoted_string_found:
invalidCommands.append('\nUnclosed triple-quotation mark, line {}:\n{}'.format(rows[i],line_str))
else:
invalidCommands.append('\nInvalid syntax, line {}:\n{}'.format(rows[i],line_str))
except(AttributeError, NameError):
invalidCommands.append('\nUndefined variable name or attribute, line {}:\n{}'.format(rows[i],line_str))
except:
invalidCommands.append('\nUndefined error, line {}:\n{}'.format(rows[i],line_str))
if len(invalidCommands) > 0:
title = 'Fatal error'
msg = 'Error in the initialization file \'{0}\': '.format(self.init_file_name)
for line in invalidCommands:
msg = msg + '\n' + line
if self.showErrorBox:
if self.sp == None:
buttom = QtGui.QMessageBox.Abort
else:
buttom = QtGui.QMessageBox.Cancel
QtGui.QMessageBox.critical(self, title, msg, buttom)
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
return False
return True
def start_spectrum(self):
init_file = self.init_file_name.split('/')[-1]
dir_ = self.init_file_name.split(init_file)[0]
if dir_ == '':
dir_ = './'
self.directory = dir_
if not self.test_init_file():
if self.sp == None:
sys.exit()
else:
return
self.sp = spectrum(config_file=self.init_file_name)
if self.sp.errorMsg:
if self.showErrorBox:
msg = 'Synthesis not possible. \n\n{}'.format(self.sp.errorMsg)
msg = self.sp.errorMsg
ret = QtGui.QMessageBox.critical(self, 'Critical Error', msg, QtGui.QMessageBox.Abort, QtGui.QMessageBox.Ignore)
if ret == QtGui.QMessageBox.Abort:
sys.exit()
self.sp.errorMsg = ''
if len(self.sp.read_obs_error) > 0:
title = 'Error reading observations'
msg = self.sp.read_obs_error
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
if ( self.sp.get_conf('fic_cosmetik') is None or
self.sp.get_conf('fic_cosmetik') == '' ):
self.sp.set_conf('do_cosmetik', False)
if self.sp.get_conf('do_synth') and self.sp.get_conf('do_cosmetik'):
self.match_cosmetic_phyat_files()
if self.sp.get_conf('clean_cosmetic_file'):
self.clean_cosmetic_file()
if self.sp.get_conf('order_cosmetic_file'):
self.order_cosmetic_file()
self.set_status_text()
self.axes = None
self.sp.ax2_fontsize = 6
self.sp_norm_box.setText('{}'.format(self.sp.get_conf('sp_norm')))
self.obj_velo_box.setText('{}'.format(self.sp.get_conf('obj_velo')))
self.ebv_box.setText('{}'.format(self.sp.get_conf('e_bv', 0)))
self.resol_box.setText('{}'.format(self.sp.get_conf('resol')))
self.cut2_box.setText('{}'.format(self.sp.get_conf('cut_plot2')))
self.magenta_box.setText('{}'.format(self.sp.plot_magenta))
self.magenta_label_box.setText('{}'.format(self.sp.label_magenta))
self.cyan_box.setText('{}'.format(self.sp.plot_cyan))
self.cyan_label_box.setText('{}'.format(self.sp.label_cyan))
self.sp_min_box.setText('{}'.format(self.sp.get_conf('limit_sp')[0]))
self.sp_max_box.setText('{}'.format(self.sp.get_conf('limit_sp')[1]))
self.init_axes()
self.xlim_min_box.setText('{}'.format(self.x_plot_lims[0]))
self.xlim_max_box.setText('{}'.format(self.x_plot_lims[1]))
self.y1lim_min_box.setText('{}'.format(self.y1_plot_lims[0]))
self.y1lim_max_box.setText('{}'.format(self.y1_plot_lims[1]))
self.y3lim_min_box.setText('{}'.format(self.y3_plot_lims[0]))
self.y3lim_max_box.setText('{}'.format(self.y3_plot_lims[1]))
self.verbosity_ag.actions()[self.sp.get_conf('log_level', 0)].setChecked(True)
self.line_tick_ax_ag.actions()[self.sp.get_conf('line_tick_ax', 0)].setChecked(True)
self.line_tick_pos_ag.actions()[self.sp.get_conf('line_tick_pos', 0)].setChecked(True)
self.residual_GroupBox.setChecked(self.sp.get_conf('qt_plot_residuals', True))
self.selected_ions_action.setChecked(self.sp.get_conf('show_selected_ions_only', False))
self.ion_cb.setChecked(self.sp.get_conf('show_selected_ions_only', False))
self.selected_intensities_action.setChecked(self.sp.get_conf('show_selected_intensities_only', False))
self.cut_cb.setChecked(self.sp.get_conf('show_selected_intensities_only', False))
self.diff_lines_ag.actions()[self.sp.get_conf('diff_lines_by', 0)].setChecked(True)
self.line_tick_ax_ag.actions()[self.sp.get_conf('line_tick_ax', 0)].setChecked(True)
self.editing_lines_action.setChecked(self.sp.get_conf('qt_allow_editing_lines', False))
self.update_lines_action.setChecked(self.sp.get_conf('qt_update_after_editing_lines', False))
self.plot_cont_action.setChecked(self.sp.get_conf('cont_plot', False))
self.show_line_ticks_action.setChecked(self.sp.get_conf('show_line_ticks', False))
self.plot_lines_action.setChecked(self.sp.get_conf('plot_lines_of_selected_ions', False))
self.lineIDs_GroupBox.setChecked(self.sp.get_conf('show_line_ticks', False) or self.sp.get_conf('plot_lines_of_selected_ions', False))
try:
selected_ions = self.sp.get_conf('selected_ions')
s = ''
for ion in selected_ions:
s = s + ion + ', '
if not s == '':
s = s[:-2]
self.ion_box.setText(s)
self.set_ion()
except:
self.ion_box.setText('')
self.line_sort_ag.actions()[self.sp.get_conf('save_lines_sort', 0)].setChecked(True)
self.show_header_action.setChecked(self.sp.get_conf('save_lines_header', False))
self.get_line_fields_to_print()
self.readOnlyCells_bg_color = QtGui.QColor('white')
self.editableCells_bg_color = QtGui.QColor('lightgreen')
if 'linux' in sys.platform and 'Plastique' in self.style_list:
default_style = 'Plastique'
elif 'darwin' in sys.platform and 'Macintosh (aqua)' in self.style_list:
default_style = 'Macintosh (aqua)'
else:
default_style = self.style_list[0]
if self.sp.get_conf('qt_style') not in self.style_list:
if 'QT_STYLE' in os.environ:
if os.environ['QT_STYLE'] in self.style_list:
self.sp.set_conf('qt_style', os.environ['QT_STYLE'])
else:
log_.warn('Unknown Qt style {}, using {}'.format(os.environ['QT_STYLE'], default_style))
self.sp.set_conf('qt_style', default_style)
else:
self.sp.set_conf('qt_style', default_style)
index_style = self.style_list.index(self.sp.get_conf('qt_style'))
self.style_ag.actions()[index_style].setChecked(True)
QtGui.qApp.setStyle(self.sp.get_conf('qt_style'))
self.enable_tooltips_action.setChecked(self.sp.get_conf('qt_enable_tooltips', True))
self.enable_tooltips_action_clicked()
self.adjust_fig_action.setChecked(self.sp.get_conf('fig_adjust', True))
def sp_norm(self):
if self.sp is None:
return
if not self.validate_sp_norm():
return
old_sp_norm = self.sp.get_conf('sp_norm')
new_sp_norm = np.float(self.sp_norm_box.text())
if old_sp_norm == new_sp_norm:
return
log_.message('Changing sp_norm. Old: {}, New: {}'.format(old_sp_norm, new_sp_norm), calling=self.calling)
self.statusBar().showMessage('Changing intensity scale of the observed spectrum ...')
QtGui.QApplication.processEvents()
self.sp.renorm(new_sp_norm)
self.on_draw()
def obj_velo(self):
if self.sp is None:
return
if not self.validate_obj_velo():
return
old_obj_velo = self.sp.get_conf('obj_velo')
new_obj_velo = np.float(self.obj_velo_box.text())
if old_obj_velo == new_obj_velo:
return
self.sp.iterpolate_velocity = False
self.sp.set_conf('obj_velo', new_obj_velo)
log_.message('Changing obj_velo. Old: {}, New: {}'.format(old_obj_velo, new_obj_velo), calling=self.calling)
self.statusBar().showMessage('Executing doppler correction of the observed spectrum ...')
QtGui.QApplication.processEvents()
self.sp.init_obs(obj_velo=new_obj_velo)
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = self.sp.do_synth, do_read_liste = True, do_profiles=False)
self.on_draw()
def ebv(self):
if self.sp is None:
return
if not self.validate_ebv():
return
old_ebv = self.sp.get_conf('e_bv')
new_ebv = np.float(self.ebv_box.text())
if old_ebv == new_ebv and not self.cont_par_changed:
return
log_.message('Changing E B-V. Old: {}, New: {}'.format(old_ebv, new_ebv), calling=self.calling)
self.statusBar().showMessage('Changing color excess E(B-V) ...', 4000)
self.statusBar().showMessage('Executing reddening correction of the synthetic spectrum ...')
QtGui.QApplication.processEvents()
self.sp.set_conf('e_bv', new_ebv)
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = self.sp.do_synth, do_read_liste = False, do_profiles=False)
self.on_draw()
self.cont_par_changed = False
def rerun(self):
if not self.validate_synthesis_parameters():
return
if ( self.x_plot_lims[0] < np.float(self.sp_min_box.text()) or
self.x_plot_lims[1] > np.float(self.sp_max_box.text()) ):
self.xlim_min_box.setText(self.sp_min_box.text())
self.xlim_max_box.setText(self.sp_max_box.text())
self.statusBar().showMessage('Rerunning synthesis ...')
QtGui.QApplication.processEvents()
self.sp.set_conf('limit_sp', (np.float(self.sp_min_box.text()), np.float(self.sp_max_box.text())))
self.sp.set_conf('resol', np.int(self.resol_box.text()))
self.sp.set_conf('obj_velo', np.float(self.obj_velo_box.text()))
self.sp.set_conf('sp_norm', np.float(self.sp_norm_box.text()))
self.sp.set_conf('e_bv', np.float(self.ebv_box.text()))
self.sp.init_obs()
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run()
self.set_plot_limits_and_draw()
def adjust(self):
if self.sp is None:
return
self.sp.errorMsg = ''
self.statusBar().showMessage('Running update ...')
QtGui.QApplication.processEvents()
self.sp_norm()
self.obj_velo()
self.ebv()
if self.sp.errorMsg:
if self.showErrorBox:
msg = self.sp.errorMsg
QtGui.QMessageBox.warning(self, 'Update error', msg, QtGui.QMessageBox.Ok)
return 0
ndiff, errorMsg = self.sp.adjust()
if ndiff == -1:
self.sp.do_cosmetik = False
self.sp.set_conf('do_cosmetik', False)
self.sp.fic_cosmetik
self.set_status_text()
title = 'Error in cosmetic file'
msg = 'Unable to read from file \'{}\'\nChanging to \'no cosmetic\':\n{}'.format(self.sp.get_conf('fic_cosmetik'), errorMsg)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
if ndiff > 0:
self.on_draw()
self.statusBar().showMessage('Update finished.', 4000)
return ndiff
def apply_post_proc(self):
path = str(self.post_proc_file or '')
file_choices = "Python files (*.py) (*.py);;All files (*) (*)"
title = 'Open post-process file'
path = unicode(QtGui.QFileDialog.getOpenFileName(self, title, path, file_choices))
path = path.split('/')[-1]
if not path:
return
try:
user_module = {}
execfile(path, user_module)
self.post_proc = user_module['post_proc']
self.post_proc_file = path
log_.message('function post_proc read from {}'.format(self.post_proc_file))
except:
self.post_proc = None
title = 'Error reading post-process file'
msg = 'Unable to read post-process file \'{}\''.format(path)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
try:
self.post_proc(self.fig)
self.canvas.draw()
except:
title = 'Error executing post-process'
msg = 'Error in post-process file \'{}\''.format(self.post_proc_file)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
def update_profile(self):
if self.sp is None:
return
self.sp.run(do_synth = True, do_read_liste = False, do_profiles=True)
self.on_draw()
def cut2(self):
if self.sp is None:
return
if not self.validate_cut():
return
self.selected_intensities_action.setChecked(True)
self.sp.set_conf('show_selected_intensities_only', True)
self.cut_cb.setChecked(True)
self.draw_ion()
def get_ion_str(self,s):
s = s.strip()
s = s.replace(' ', '_')
if s.isdigit():
line = self.sp.get_line_from_reduce_code(s)
if line is None:
s = ''
else:
s = self.sp.fieldStrFromLine(line,'id').strip()
return s
def set_ion(self):
if self.sp is None:
return
sList = []
s = self.ion_box.text()
k = s.indexOf(',')
while k >= 0:
s0 = self.get_ion_str(str(s[:k]))
if s0 != '' and s0 != '*':
sList.append(s0)
s = s[k+1:]
k = s.indexOf(',')
s0 = self.get_ion_str(str(s))
if s0 != '' and s0 != '*':
sList.append(s0)
s = ''
for s0 in sList:
s = s + s0 + ', '
s = s[:-2]
for item in sList[:]:
sList.remove(item)
if item[-1] == '*':
item = item[:-1]
this_ion_only = False
else:
this_ion_only = True
self.sp.set_ion_list()
if item.ljust(9) in self.sp.liste_raies['id']:
if self.sp.true_ion(item) == item or this_ion_only:
sList = sList + [item]
if not this_ion_only:
sList = sList + self.sp.get_all_ions_from_ion(item)
elif item.ljust(9) in self.sp.sp_theo['raie_ref']['id']:
if self.sp.true_ion(item) == item or this_ion_only:
sList = sList + [item]
if not this_ion_only:
sList = sList + self.sp.get_all_ions_from_ion(item)
else:
ion_list = self.sp.get_ions_from_element(item)
sList = sList + ion_list
self.sp.set_conf('selected_ions', sList)
self.ion_box.setText(s)
def set_refline_to_info_box(self,j):
if self.sp.get_conf('diff_lines_by') == 0 and len(self.sp.selected_ions_data) > 0:
if j == -1:
j = 0
s = str(self.sp.selected_ions_data[j][2][0])
self.line_info_box.setText(s)
def draw_ion(self):
if self.cut_cb.isChecked():
if self.validate_cut():
self.sp.set_conf('cut_plot2', np.float(self.cut2_box.text()))
else:
return
self.set_ion()
self.sp.set_conf('index_of_current_ion', -1)
self.sp.set_selected_ions_data()
self.set_refline_to_info_box(-1)
self.on_draw()
def line_info(self):
if self.sp is None:
return
msg = ''
s = str(self.line_info_box.text())
if s == '':
return
w = self.sp.field_width['num'] - 1
s = s[-w:]
if s[0] == '0':
s = s[1:]
self.line_info_box.setText(s)
try:
new_ref = int(s)
except ValueError:
msg = 'Invalid input.\n It is not an integer'
if msg == '':
line = self.sp.get_line_from_reduce_code(s)
if line is None:
msg = 'No line unambiguously associated with this number.'
if msg == '':
s = self.sp.fieldStrFromLine(line,'num').strip()
self.line_info_box.setText(s)
self.line_info_ref = int(s)
if self.sp.get_conf('qt_show_dialogs', True):
self.show_line_info_dialog()
else:
self.sp.line_info(new_ref, sort='i_rel')
else:
title = 'Error in line number'
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
def magenta_line(self):
if self.sp is None:
return
ref_str = self.magenta_box.text()
ref_txt = self.magenta_label_box.text()
if ref_str == '':
self.sp.plot_magenta = None
self.sp.label_magenta = ''
self.on_draw()
else:
new_ref = np.int(ref_str)
self.sp.plot_magenta = new_ref
self.sp.label_magenta = ref_txt
self.on_draw()
def cyan_line(self):
if self.sp is None:
return
ref_str = self.cyan_box.text()
ref_txt = self.cyan_label_box.text()
if ref_str == '':
self.sp.plot_cyan = None
self.sp.label_cyan = ''
self.on_draw()
else:
new_ref = np.int(ref_str)
self.sp.plot_cyan = new_ref
self.sp.label_cyan = ref_txt
self.on_draw()
def diff_lines(self):
self.sp.set_conf('index_of_current_ion', -1)
self.set_plot_ax2()
if self.sp.get_conf('diff_lines_by') == 0 and len(self.sp.selected_ions_data) > 0:
s = str(self.sp.selected_ions_data[0][2][0])
self.line_info_box.setText(s)
def set_plot_ax2(self):
self.sp.set_selected_ions_data()
k = self.line_tick_ax_list.index(self.line_tick_ax_ag.checkedAction().text())
self.sp.set_conf('line_tick_ax',k)
k = self.line_tick_pos_list.index(self.line_tick_pos_ag.checkedAction().text())
self.sp.set_conf('line_tick_pos',k)
k = self.diff_lines_list.index(self.diff_lines_ag.checkedAction().text())
self.sp.set_conf('diff_lines_by',k)
if self.show_line_ticks_action.isChecked():
self.make_axes()
def verbosity(self):
verbosity = self.verbosity_list.index(self.verbosity_ag.checkedAction().text())
if verbosity == log_.level:
return
log_.debug('Verbosity changed from {} to {}'.format(log_.level, verbosity), calling=self.calling)
log_.level = verbosity
self.sp.set_conf('log_level', verbosity)
def style(self):
new_style_str = str(self.style_ag.checkedAction().text())
old_style_str = self.sp.get_conf('qt_style')
if new_style_str == old_style_str:
return
self.sp.set_conf('qt_style', new_style_str)
QtGui.qApp.setStyle(new_style_str)
log_.debug('Widget style changed from {} to {}'.format(old_style_str, new_style_str), calling=self.calling)
def update_lim_boxes(self):
xformat = '{:.1f}'
yformat = '{1:.{0}f}'
min_diff = 2
if abs(self.x_plot_lims[1] - self.x_plot_lims[0]) < min_diff:
m = (self.x_plot_lims[0] + self.x_plot_lims[1])/2
x_lims = (m - min_diff/2,m + min_diff/2)
else:
x_lims = self.x_plot_lims
min_diff = 0.2
if abs(self.y1_plot_lims[1] - self.y1_plot_lims[0]) < min_diff:
m = (self.y1_plot_lims[0] + self.y1_plot_lims[1])/2
y1_lims = (m - min_diff/2,m + min_diff/2)
else:
y1_lims = self.y1_plot_lims
min_diff = 0.2
if abs(self.y3_plot_lims[1] - self.y3_plot_lims[0]) < min_diff:
m = (self.y3_plot_lims[0] + self.y3_plot_lims[1])/2
y3_lims = (m - min_diff/2,m + min_diff/2)
else:
y3_lims = self.y3_plot_lims
if self.x_plot_lims[0] != np.float(self.xlim_min_box.text()):
self.xlim_min_box.setText(xformat.format(x_lims[0]))
if self.x_plot_lims[1] != np.float(self.xlim_max_box.text()):
self.xlim_max_box.setText(xformat.format(x_lims[1]))
delta = abs(y1_lims[1]-y1_lims[0])
if delta < 2:
precision = 2
else:
precision = 1
if self.y1_plot_lims[0] != np.float(self.y1lim_min_box.text()):
self.y1lim_min_box.setText(yformat.format(precision, y1_lims[0]))
if self.y1_plot_lims[1] != np.float(self.y1lim_max_box.text()):
self.y1lim_max_box.setText(yformat.format(precision, y1_lims[1]))
delta = abs(y3_lims[1]-y3_lims[0])
if delta < 2:
precision = 2
else:
precision = 1
if self.y3_plot_lims[0] != np.float(self.y3lim_min_box.text()):
self.y3lim_min_box.setText(yformat.format(precision, y3_lims[0]))
if self.y3_plot_lims[1] != np.float(self.y3lim_max_box.text()):
self.y3lim_max_box.setText(yformat.format(precision, y3_lims[1]))
self.set_plot_limits_and_draw()
def validate_input(self, editBox, field, title, varType = 'float', showError = True):
value = editBox.text()
if value == None:
return False
if ( ( varType == 'float' and not self.isFloat(value) ) or \
( varType == 'integer' and not self.isInteger(value) ) or \
( varType == 'positive integer' and not self.isPositiveInteger(value) ) or \
( varType == 'positive odd integer' and not self.isPositiveOdd(value) ) ):
msg = '{} should be a {}'.format(field, varType)
msg.replace('a integer', 'an integer')
editBox.setFocus()
if showError:
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
return False
else:
return True
def validate_sp_min(self):
return self.validate_input(self.sp_min_box, 'xmin for the synthesis', 'Input error', 'float')
def validate_sp_max(self):
return self.validate_input(self.sp_max_box, 'xmax for the synthesis', 'Input error', 'float')
def validate_sp_norm(self):
return self.validate_input(self.sp_norm_box, 'normalization factor', 'Input error', 'float')
def validate_ebv(self):
return self.validate_input(self.ebv_box, 'color excess E(B-V)', 'Input error', 'float')
def validate_obj_velo(self):
return self.validate_input(self.obj_velo_box, 'radial velocity', 'Input error', 'float')
def validate_resol(self):
return self.validate_input(self.resol_box, 'rebinning factor', 'Input error', 'positive odd integer')
def validate_xlim_min(self, showError = True):
return self.validate_input(self.xlim_min_box, 'xmin', 'Invalid plot limit', 'float', showError)
def validate_xlim_max(self, showError = True):
return self.validate_input(self.xlim_max_box, 'xmax', 'Invalid plot limit', 'float', showError)
def validate_y1lim_min(self):
return self.validate_input(self.y1lim_min_box, 'ymin', 'Invalid plot limit', 'float')
def validate_y1lim_max(self):
return self.validate_input(self.y1lim_max_box, 'ymax', 'Invalid plot limit', 'float')
def validate_y3lim_min(self):
return self.validate_input(self.y3lim_min_box, 'residual ymin', 'Invalid plot limit', 'float')
def validate_y3lim_max(self):
return self.validate_input(self.y3lim_max_box, 'residual ymax', 'Invalid plot limit', 'float')
def validate_cut(self):
return self.validate_input(self.cut2_box, 'cut', 'Input error', 'float')
def sp_lim_in_range(self):
xmin = np.float(self.sp_min_box.text())
xmax = np.float(self.sp_max_box.text())
if ( xmin < xmax - 9.999 ) and ( xmin > 0. ) and ( xmax < 200000000.):
return True
else:
if self.showErrorBox:
QtGui.QMessageBox.critical(self, 'Invalid synthesis limits', 'The acceptable values are:\n\n xmax - xmin > 10,\n xmin > 0,\n xmax < 200000000.',
QtGui.QMessageBox.Ok )
else:
log_.warn('Invalid synthesis limits', 'The acceptable values are:\n\n xmax - xmin > 10,\n xmin > 0,\n xmax < 200000000.', calling=self.calling)
return False
def validate_synthesis_parameters(self):
return ( self.validate_sp_min() and
self.validate_sp_max() and
self.sp_lim_in_range() and
self.validate_sp_norm() and
self.validate_obj_velo() and
self.validate_ebv() and
self.validate_resol() )
def validate_plot_parameters(self):
return ( self.validate_xlim_min() and
self.validate_xlim_max() and
self.validate_y1lim_min() and
self.validate_y1lim_max() and
self.validate_y3lim_min() and
self.validate_y3lim_max() )
def set_plot_limits_and_draw(self):
if not self.validate_plot_parameters():
return
self.x_plot_lims = (np.float(self.xlim_min_box.text()), np.float(self.xlim_max_box.text()))
self.y1_plot_lims = (np.float(self.y1lim_min_box.text()), np.float(self.y1lim_max_box.text()))
self.y3_plot_lims = (np.float(self.y3lim_min_box.text()), np.float(self.y3lim_max_box.text()))
self.sp.set_conf('x_plot_lims', self.x_plot_lims)
self.sp.set_conf('y1_plot_lims', self.y1_plot_lims)
self.sp.set_conf('y3_plot_lims', self.y3_plot_lims)
self.restore_axes()
self.draw_ion()
def set_limit_sp(self):
if not ( self.validate_sp_min() and
self.validate_sp_max() and
self.sp_lim_in_range() ):
return
limit_sp = (np.float(self.sp_min_box.text()), np.float(self.sp_max_box.text()))
self.sp.set_conf('limit_sp', limit_sp)
def set_limit_sp_and_run(self):
if str(self.sp_min_box.text()).strip() == '':
self.sp_min_box.setText('{:.1f}'.format(self.sp.w_min))
if str(self.sp_max_box.text()).strip() == '':
self.sp_max_box.setText('{:.1f}'.format(self.sp.w_max))
if not ( self.validate_sp_min() and
self.validate_sp_max() and
self.sp_lim_in_range() ):
return
old_limit_sp = self.sp.get_conf('limit_sp')
new_limit_sp = (np.float(self.sp_min_box.text()), np.float(self.sp_max_box.text()))
if old_limit_sp == new_limit_sp:
if not self.axes_fixed:
self.xlim_min_box.setText(self.sp_min_box.text())
self.xlim_max_box.setText(self.sp_max_box.text())
self.set_plot_limits_and_draw()
return
if not self.validate_xlim_min(False):
self.xlim_min_box.setText(self.sp_min_box.text())
if not self.validate_xlim_max(False):
self.xlim_max_box.setText(self.sp_max_box.text())
if ( np.float(self.xlim_min_box.text()) >= new_limit_sp[1] or
np.float(self.xlim_max_box.text()) <= new_limit_sp[0] ):
self.xlim_min_box.setText(self.sp_min_box.text())
self.xlim_max_box.setText(self.sp_max_box.text())
self.sp.set_conf('limit_sp', new_limit_sp)
log_.message('Changing limit_sp. Old: {}, New: {}'.format(old_limit_sp, new_limit_sp), calling=self.calling)
self.statusBar().showMessage('Changing the synthesis wavelength limits ...')
QtGui.QApplication.processEvents()
self.sp.init_obs()
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = True, do_read_liste = True, do_profiles=False)
self.set_plot_limits_and_draw()
def resol(self):
if self.sp is None:
return
if not self.validate_resol():
return
old_resol = self.sp.get_conf('resol')
new_resol = np.int(self.resol_box.text())
if old_resol == new_resol:
return
self.sp.set_conf('resol', new_resol)
log_.message('Changing resol. Old: {}, New: {}'.format(old_resol, new_resol), calling=self.calling)
self.statusBar().showMessage('Changing rebinning factor ...')
QtGui.QApplication.processEvents()
self.sp.set_conf('resol', new_resol)
self.sp.init_obs()
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = True, do_read_liste = True, do_profiles=False)
self.on_draw()
def leave_fig(self, event):
self.sp.firstClick = True
if ( self.x_plot_lims != self.axes.get_xlim() or
self.y1_plot_lims != self.axes.get_ylim() or
( self.axes3 is not None and self.y3_plot_lims != self.axes3.get_ylim() ) ):
limits_changed = True
else:
limits_changed = False
if not self.axes_fixed and limits_changed:
self.save_axes()
self.update_lim_boxes()
def fix_axes(self):
if self.fix_axes_cb.isChecked():
self.axes_fixed = True
else:
self.axes_fixed = False
def get_line_fields_to_print(self):
field_list = self.sp.get_conf('save_lines_fields')
for i in range(0,len(self.line_field_menu.actions())):
if self.line_print_dic.keys()[i] in field_list:
self.line_field_menu.actions()[i].setChecked(True)
else:
self.line_field_menu.actions()[i].setChecked(False)
def set_show_header(self):
if self.show_header_action.isChecked():
self.sp.set_conf('save_lines_header', True)
else:
self.sp.set_conf('save_lines_header', False)
def set_line_fields_to_print(self):
s = []
for i in range(0,len(self.line_field_menu.actions())):
if self.line_field_menu.actions()[i].isChecked():
s.append( self.line_print_dic.keys()[i])
self.sp.set_conf('save_lines_fields', s)
def save_lines(self):
self.sp.save_lines()
path = self.sp.get_conf('save_lines_filename')
self.statusBar().showMessage('Lines saved to file %s' % path, 4000)
def save_lines_as(self):
file_choices = "Text files (*.txt *.dat) (*.txt *.dat);;Tex files (*.tex) (*.tex);;CSV files (*.csv) (*.csv);;All Files (*) (*)"
filename = self.sp.get_conf('save_lines_filename')
extension = os.path.splitext(filename)[1][1:].lower()
if extension in ['txt','dat']:
selectedFilter = 'Text files (*.txt *.dat) (*.txt *.dat)'
elif extension in ['tex']:
selectedFilter = 'Tex files (*.tex) (*.tex)'
elif extension in ['csv']:
selectedFilter = 'CSV files (*.csv) (*.csv)'
else:
selectedFilter = 'All Files (*) (*)'
path = unicode(QtGui.QFileDialog.getSaveFileName(self, 'Save lines to file', filename, file_choices, selectedFilter))
if path:
self.sp.set_conf('save_lines_filename', path)
self.sp.save_lines()
self.statusBar().showMessage('Lines saved to file %s' % path, 4000)
def save_synthesis_as(self):
file_choices = "Text files (*.txt *.dat) (*.txt *.dat);;Tex files (*.tex) (*.tex);;CSV files (*.csv) (*.csv);;All Files (*) (*)"
filename = self.sp.get_conf('synthesis_filename')
extension = os.path.splitext(filename)[1][1:].lower()
if extension in ['txt','dat']:
selectedFilter = 'Text files (*.txt *.dat) (*.txt *.dat)'
elif extension in ['tex']:
selectedFilter = 'Tex files (*.tex) (*.tex)'
elif extension in ['csv']:
selectedFilter = 'CSV files (*.csv) (*.csv)'
else:
selectedFilter = 'All Files (*) (*)'
path = unicode(QtGui.QFileDialog.getSaveFileName(self, 'Save synthesis to file', filename, file_choices, selectedFilter))
with open(path, 'w') as f:
for w, fl in zip(self.sp.w_ori, self.sp.sp_synth_lr):
f.write('{} {} \n'.format(w, fl))
self.statusBar().showMessage('Synhtesis saved to file %s' % path, 2000)
def line_sort(self):
k = self.line_sort_list.index(self.line_sort_ag.checkedAction().text())
self.sp.set_conf('save_lines_sort',k)
def main_loc(init_filename=None, post_proc_file=None):
app = QtGui.QApplication(sys.argv)
form = AppForm(init_filename=init_filename, post_proc_file=post_proc_file)
form.show()
app.exec_()
return form.fig
def main_loc_obj(init_filename=None, post_proc_file=None):
app = QtGui.QApplication(sys.argv)
form = AppForm(init_filename=init_filename, post_proc_file=post_proc_file)
form.show()
app.exec_()
return form
def main():
parser = get_parser()
args = parser.parse_args()
log_.level = args.verbosity
app = QtGui.QApplication(sys.argv)
form = AppForm(init_filename=args.file, post_proc_file=args.post_proc)
#import pdb
#pdb.set_trace()
form.show()
app.exec_()
if __name__ == "__main__":
main()
| gpl-3.0 |
sebastian-nagel/cc-crawl-statistics | plot/tld.py | 1 | 11639 | import sys
from collections import defaultdict
import pandas
from crawlplot import CrawlPlot, PLOTDIR
from crawlstats import CST, MonthlyCrawl, MultiCount
from top_level_domain import TopLevelDomain
from stats.tld_alexa_top_1m import alexa_top_1m_tlds
from stats.tld_cisco_umbrella_top_1m import cisco_umbrella_top_1m_tlds
from stats.tld_majestic_top_1m import majestic_top_1m_tlds
# min. share of URLs for a TLD to be shown in metrics
min_urls_percentage = .05
field_percentage_formatter = '{0:,.2f}'.format
class TldStats(CrawlPlot):
def __init__(self):
self.tlds = defaultdict(dict)
self.tld_stats = defaultdict(dict)
self.N = 0
def add(self, key, val):
cst = CST[key[0]]
if cst != CST.tld:
return
tld = key[1]
crawl = key[2]
self.tlds[tld][crawl] = val
def transform_data(self):
crawl_has_host_domain_counts = {}
for tld in self.tlds:
tld_repr = tld
tld_obj = None
if tld in ('', '(ip address)'):
continue
else:
try:
tld_obj = TopLevelDomain(tld)
tld_repr = tld_obj.tld
except:
print('error', tld)
continue
for crawl in self.tlds[tld]:
self.tld_stats['suffix'][self.N] = tld_repr
self.tld_stats['crawl'][self.N] = crawl
date = pandas.Timestamp(MonthlyCrawl.date_of(crawl))
self.tld_stats['date'][self.N] = date
if tld_obj:
self.tld_stats['type'][self.N] \
= TopLevelDomain.short_type(tld_obj.tld_type)
self.tld_stats['subtype'][self.N] = tld_obj.sub_type
self.tld_stats['tld'][self.N] = tld_obj.first_level
else:
self.tld_stats['type'][self.N] = ''
self.tld_stats['subtype'][self.N] = ''
self.tld_stats['tld'][self.N] = ''
value = self.tlds[tld][crawl]
n_pages = MultiCount.get_count(0, value)
self.tld_stats['pages'][self.N] = n_pages
n_urls = MultiCount.get_count(1, value)
self.tld_stats['urls'][self.N] = n_urls
n_hosts = MultiCount.get_count(2, value)
self.tld_stats['hosts'][self.N] = n_hosts
n_domains = MultiCount.get_count(3, value)
self.tld_stats['domains'][self.N] = n_domains
if n_urls != n_hosts:
# multi counts including host counts are not (yet)
# available for all crawls
crawl_has_host_domain_counts[crawl] = True
elif crawl not in crawl_has_host_domain_counts:
crawl_has_host_domain_counts[crawl] = False
self.N += 1
for crawl in crawl_has_host_domain_counts:
if not crawl_has_host_domain_counts[crawl]:
print('No host and domain counts for', crawl)
for n in self.tld_stats['crawl']:
if self.tld_stats['crawl'][n] == crawl:
del(self.tld_stats['hosts'][n])
del(self.tld_stats['domains'][n])
self.tld_stats = pandas.DataFrame(self.tld_stats)
def save_data(self):
self.tld_stats.to_csv('data/tlds.csv')
def percent_agg(self, data, columns, index, values, aggregate):
data = data[[columns, index, values]]
data = data.groupby([columns, index]).agg(aggregate)
data = data.groupby(level=0).apply(lambda x: 100.0*x/float(x.sum()))
# print("\n-----\n")
# print(data.to_string(formatters={'urls': field_percentage_formatter}))
return data
def pivot_percentage(self, data, columns, index, values, aggregate):
data = self.percent_agg(data, columns, index, values, aggregate)
return data.reset_index().pivot(index=index,
columns=columns, values=values)
def plot_groups(self):
title = 'Groups of Top-Level Domains'
ylabel = 'URLs %'
clabel = ''
img_file = 'tld/groups.png'
data = self.pivot_percentage(self.tld_stats, 'crawl', 'type',
'urls', {'urls': 'sum'})
data = data.transpose()
print("\n-----\n")
types = set(self.tld_stats['type'].tolist())
formatters = {c: field_percentage_formatter for c in types}
print(data.to_string(formatters=formatters))
data.to_html('{}/tld/groups-percentage.html'.format(PLOTDIR),
formatters=formatters,
classes=['tablesorter', 'tablepercentage'])
data = self.percent_agg(self.tld_stats, 'date', 'type',
'urls', {'urls': 'sum'}).reset_index()
return self.line_plot(data, title, ylabel, img_file,
x='date', y='urls', c='type', clabel=clabel)
def plot(self, crawls, latest_crawl):
field_formatters = {c: '{:,.0f}'.format
for c in ['pages', 'urls', 'hosts', 'domains']}
for c in ['%urls', '%hosts', '%domains']:
field_formatters[c] = field_percentage_formatter
data = self.tld_stats
data = data[data['crawl'].isin(crawls)]
crawl_data = data
top_tlds = []
# stats per crawl
for crawl in crawls:
print("\n-----\n{}\n".format(crawl))
for aggr_type in ('type', 'tld'):
data = crawl_data
data = data[data['crawl'].isin([crawl])]
data = data.set_index([aggr_type], drop=False)
data = data.sum(level=aggr_type).sort_values(
by=['urls'], ascending=False)
for count in ('urls', 'hosts', 'domains'):
data['%'+count] = 100.0 * data[count] / data[count].sum()
if aggr_type == 'tld':
# skip less frequent TLDs
data = data[data['%urls'] >= min_urls_percentage]
for tld in data.index.values:
top_tlds.append(tld)
print(data.to_string(formatters=field_formatters))
print()
if crawl == latest_crawl:
# latest crawl by convention
type_name = aggr_type
if aggr_type == 'type':
type_name = 'group'
path = '{}/tld/latest-crawl-{}s.html'.format(
PLOTDIR, type_name)
data.to_html(path,
formatters=field_formatters,
classes=['tablesorter'])
# stats comparison for selected crawls
for aggr_type in ('type', 'tld'):
data = crawl_data
if aggr_type == 'tld':
data = data[data['tld'].isin(top_tlds)]
data = self.pivot_percentage(data, 'crawl', aggr_type,
'urls', {'urls': 'sum'})
print("\n----- {}\n".format(aggr_type))
print(data.to_string(formatters={c: field_percentage_formatter
for c in crawls}))
if aggr_type == 'tld':
# save as HTML table
path = '{}/tld/selected-crawls-percentage.html'.format(
PLOTDIR, len(crawls))
data.to_html(path,
formatters={c: '{0:,.4f}'.format
for c in crawls},
classes=['tablesorter', 'tablepercentage'])
def plot_comparison(self, crawl, name, topNlimit=None, method='spearman'):
print()
print('Comparison for', crawl, '-', name, '-', method)
data = self.tld_stats
data = data[data['crawl'].isin([crawl])]
data = data[data['urls'] >= topNlimit]
data = data.set_index(['tld'], drop=False)
data = data.sum(level='tld')
print(data)
data['alexa'] = pandas.Series(alexa_top_1m_tlds)
data['cisco'] = pandas.Series(cisco_umbrella_top_1m_tlds)
data['majestic'] = pandas.Series(majestic_top_1m_tlds)
fields = ('pages', 'urls', 'hosts', 'domains',
'alexa', 'cisco', 'majestic')
formatters = {c: '{0:,.3f}'.format for c in fields}
# relative frequency (percent)
for count in fields:
data[count] = 100.0 * data[count] / data[count].sum()
# Spearman's rank correlation for all TLDs
corr = data.corr(method=method, min_periods=1)
print(corr.to_string(formatters=formatters))
corr.to_html('{}/tld/{}-comparison-{}-all-tlds.html'
.format(PLOTDIR, name, method),
formatters=formatters,
classes=['matrix'])
if topNlimit is None:
return
# Spearman's rank correlation for TLDs covering
# at least topNlimit % of urls
data = data[data['urls'] >= topNlimit]
print()
print('Top', len(data), 'TLDs (>= ', topNlimit, '%)')
print(data)
data.to_html('{}/tld/{}-comparison.html'.format(PLOTDIR, name),
formatters=formatters,
classes=['tablesorter', 'tablepercentage'])
print()
corr = data.corr(method=method, min_periods=1)
print(corr.to_string(formatters=formatters))
corr.to_html('{}/tld/{}-comparison-{}-frequent-tlds.html'
.format(PLOTDIR, name, method),
formatters=formatters,
classes=['matrix'])
print()
def plot_comparison_groups(self):
# Alexa and Cisco types/groups:
for (name, data) in [('Alexa', alexa_top_1m_tlds),
('Cisco', cisco_umbrella_top_1m_tlds),
('Majestic', majestic_top_1m_tlds)]:
compare_types = defaultdict(int)
for tld in data:
compare_types[TopLevelDomain(tld).tld_type] += data[tld]
print(name, 'TLD groups:')
for tld in compare_types:
c = compare_types[tld]
print(' {:6d}\t{:4.1f}\t{}'.format(c, (100.0*c/1000000), tld))
print()
if __name__ == '__main__':
plot_crawls = sys.argv[1:]
latest_crawl = plot_crawls[-1]
if len(plot_crawls) == 0:
print(sys.argv[0], 'crawl-id...')
print()
print('Distribution of top-level domains for (selected) monthly crawls')
print()
print('Example:')
print('', sys.argv[0], '[options]', 'CC-MAIN-2014-52', 'CC-MAIN-2016-50')
print()
print('Last argument is considered to be the latest crawl')
print()
print('Options:')
print()
sys.exit(1)
plot = TldStats()
plot.read_data(sys.stdin)
plot.transform_data()
plot.save_data()
plot.plot_groups()
plot.plot(plot_crawls, latest_crawl)
if latest_crawl == 'CC-MAIN-2018-22':
# plot comparison only for crawl of similar date as benchmark data
plot.plot_comparison(latest_crawl, 'selected-crawl',
min_urls_percentage)
# plot.plot_comparison(latest_crawl, 'selected-crawl',
# min_urls_percentage, 'pearson')
plot.plot_comparison_groups()
| apache-2.0 |
jangorecki/h2o-3 | h2o-py/tests/testdir_algos/glm/pyunit_prostate_glm.py | 6 | 1124 | from __future__ import print_function
import h2o
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from tests import pyunit_utils
import pandas as pd
import statsmodels.api as sm
def test_prostate():
h2o_data = h2o.upload_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
h2o_data.summary()
sm_data = pd.read_csv(pyunit_utils.locate("smalldata/logreg/prostate.csv")).as_matrix()
sm_data_response = sm_data[:, 1]
sm_data_features = sm_data[:, 2:]
h2o_glm = H2OGeneralizedLinearEstimator(family="binomial", nfolds=10, alpha=0.5)
h2o_glm.train(x=list(range(2, h2o_data.ncol)), y=1, training_frame=h2o_data)
sm_glm = sm.GLM(endog=sm_data_response, exog=sm_data_features, family=sm.families.Binomial()).fit()
print("statsmodels null deviance {0}".format(sm_glm.null_deviance))
print("h2o null deviance {0}".format(h2o_glm.null_deviance()))
assert abs(sm_glm.null_deviance - h2o_glm.null_deviance()) < 1e-5, "Expected null deviances to be the same"
if __name__ == "__main__":
pyunit_utils.standalone_test(test_prostate)
else:
test_prostate()
| apache-2.0 |
chraibi/EEOver | plot.py | 1 | 2625 | from matplotlib.pyplot import *
from numpy import *
import pylab
from matplotlib.patches import Ellipse
from math import pi
import os, shutil
from sys import argv
import imp
try:
imp.find_module('tqdm')
from tqdm import tqdm
found_tqdm = True
except ImportError:
found_tqdm = False
if len(argv) < 4:
print ("Usage: python %s ellipseDataFile rootsFile resultsFile"%argv[0])
sys.exit()
DIR = "cases" # put the figs here
ms = 9
if os.path.exists(DIR):
print ("Delete directory <%s>"%DIR)
shutil.rmtree(DIR)
print ("Create directory <%s>"%DIR)
os.makedirs(DIR)
else:
os.makedirs(DIR)
print ("Directory <%s> does not exist. Create one .."%DIR)
filename = argv[1] # testcases.txt
rootsfile = argv[2] # roots
resultsfile = argv[3] # areas
print("got input", filename)
print("got roots", rootsfile)
print("got roots", resultsfile)
f = open(rootsfile)
data = loadtxt(filename)
data = np.atleast_2d(data) # for the case that we have only one case
areas = loadtxt(resultsfile)
areas = np.atleast_2d(areas)
roots = [line.split() for line in f if not line.startswith("#")]
ids = range(data.shape[0]) #unique(data[:,0]).astype(int)
if found_tqdm:
ids = tqdm(ids)
for Id in ids:
#PHI should be in radian
d = data[Id, :]
area = areas[Id, :]
index = d[0]
A1 = d[1]
B1 = d[2]
H1 = d[3]
K1 = d[4]
PHI_1 = d[5]
A2 = d[6]
B2 = d[7]
H2 = d[8]
K2 = d[9]
PHI_2 = d[10]
while PHI_1 >= pi: #fmod is not so acurate. I prefere doing a simple substraction
PHI_1 -= pi
while PHI_2 >= pi:
PHI_2 -= pi
# get the roots
#
r = roots[Id]
length = len(r) - 1
pylab.axes()
cir = Ellipse(xy=(H1, K1), width=2*A1, height=2*B1, angle=PHI_1*180/pi, alpha=.2, fc='r', lw=3)
plot(H1, K1, "or", ms=ms)
plot(H2, K2, "ob", ms=ms)
pylab.gca().add_patch(cir)
#plot roots
if length:
R = array([float(i) for i in r[1:]])
rx_TR = (R[0::2])*cos(-PHI_1) + (R[1::2])*sin(-PHI_1) + H1
ry_TR = -(R[0::2])*sin(-PHI_1) + (R[1::2])*cos(-PHI_1) + K1
plot(rx_TR, ry_TR, "og", ms=ms, lw=2)
cir = Ellipse(xy=(H2, K2), width=2*A2, height=2*B2, angle=PHI_2*180/pi, alpha=.2, fc='b', lw=3)
pylab.gca().add_patch(cir)
pylab.axis('scaled')
pylab.title(r"$%d,\; A_1= %.3f,\; A_2= %.3f,\;A_{12}= %.3f,\; \epsilon=%.3f$"%(
index, area[1], area[2], area[3], area[5]))
pylab.grid()
figname = os.path.join(DIR, "case%.3d.png"%(Id))
pylab.savefig(figname)
# sys.stdout.write("---> %s"%figname)
#pylab.show()
pylab.clf()
| lgpl-3.0 |
cainiaocome/scikit-learn | sklearn/linear_model/ridge.py | 89 | 39360 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight' : sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter.
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
bsmrstu-warriors/Moytri--The-Drone-Aider | Lib/site-packages/scipy/signal/fir_filter_design.py | 53 | 18572 | """Functions for FIR filter design."""
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
import sigtools
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21)**0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
N : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and stopband
(or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta :
The beta parameter for the kaiser window.
Notes
-----
There are several ways to obtain the Kaiser window:
signal.kaiser(numtaps, beta, sym=0)
signal.get_window(beta, numtaps)
signal.get_window(('kaiser', beta), numtaps)
The empirical equations discovered by Kaiser are used.
See Also
--------
kaiser_beta, kaiser_atten
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response filter.
The filter will have linear phase; it will be Type I if `numtaps` is odd and
Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True);
`nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
nyq : float
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : 1D ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
Examples
--------
Low-pass from 0 to f::
>>> firwin(numtaps, f)
Use a specific window function::
>>> firwin(numtaps, f, window='nuttall')
High-pass ('stop' from 0 to f)::
>>> firwin(numtaps, f, pass_zero=False)
Band-pass::
>>> firwin(numtaps, [f1, f2], pass_zero=False)
Band-stop::
>>> firwin(numtaps, [f1, f2])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1])::
>>>firwin(numtaps, [f1, f2, f3, f4])
Multi-band (passbands are [f1, f2] and [f3,f4])::
>>> firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
See also
--------
scipy.signal.firwin2
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width)/nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff is even,
# and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0]*pass_zero, cutoff, [1.0]*pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of a passband.
bands = cutoff.reshape(-1,2)
# Build up the coefficients.
alpha = 0.5 * (numtaps-1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Example
-------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s' % (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps,2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq)-1 and freq[k] == freq[k+1]:
freq[k] = freq[k] - eps
freq[k+1] = freq[k+1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps-1)/2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> bpass = sp.signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = sp.signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
[<matplotlib.lines.Line2D object at 0xf486790>]
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass':1, 'differentiator':2, 'hilbert':3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
| gpl-3.0 |
dlmacedo/SVM-CNN | main.py | 1 | 11381 | # ==============================================================================
# Copyright David Macedo. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.examples.tutorials.mnist import input_data
from sklearn import svm
import time
from elm import GenELMClassifier
from random_layer import MLPRandomLayer
NUMBER_OF_FEATURES = 128
BATCH_SIZE = 55
BATCHES_IN_EPOCH = 1000
TRAIN_SIZE = BATCHES_IN_EPOCH * BATCH_SIZE
TEST_SIZE = 10000
NUMBER_OF_EPOCHS = 3
NUMBER_OF_EXPERIMENTS = 100
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
converter = np.array([0,1,2,3,4,5,6,7,8,9])
svm_results = {
"LK-SVM-ACCU":0, "GK-SVM-ACCU":0, "LK-SVM-TIME":0, "GK-SVM-TIME":0,}
experiment_results = {
"1024HL-ELM-ACCU":0, "4096HL-ELM-ACCU":0, "ConvNet-ACCU":0, "ConvNetSVM-ACCU":0,
"1024HL-ELM-TIME":0, "4096HL-ELM-TIME":0, "ConvNet-TIME":0, "ConvNetSVM-TIME":0,}
train_features = np.zeros((TRAIN_SIZE, 28 * 28), dtype=float)
train_labels = np.zeros(TRAIN_SIZE, dtype=int)
test_features = mnist.test.images
test_labels = np.zeros(TEST_SIZE, dtype=int)
train_features_cnn = np.zeros((TRAIN_SIZE, NUMBER_OF_FEATURES), dtype=float)
train_labels_cnn = np.zeros(TRAIN_SIZE, dtype=int)
test_labels_cnn = np.zeros(TEST_SIZE, dtype=int)
def print_debug(ndarrayinput, stringinput):
print("\n"+stringinput)
print(ndarrayinput.shape)
print(type(ndarrayinput))
print(np.mean(ndarrayinput))
print(ndarrayinput)
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def SVM(krnl):
print("\n###############################\n", krnl, "Kernel SVM Train/Test\n###############################")
for i in range(BATCHES_IN_EPOCH):
train_batch = mnist.train.next_batch(BATCH_SIZE)
features_batch = train_batch[0]
labels_batch = train_batch[1]
for j in range(BATCH_SIZE):
for k in range(28*28):
train_features[BATCH_SIZE * i + j, k] = features_batch[j, k]
train_labels[BATCH_SIZE * i + j] = np.sum(np.multiply(converter, labels_batch[j, :]))
# print_debug(train_features, "train_features")
# print_debug(train_labels, "train_labels")
for j in range(TEST_SIZE):
test_labels[j] = np.sum(np.multiply(converter, mnist.test.labels[j, :]))
# print_debug(test_features, "test_features")
# print_debug(test_labels, "test_labels")
initial_time = time.time()
clf = svm.SVC(kernel=krnl)
clf.fit(train_features, train_labels)
training_time = time.time()-initial_time
print("\nTraining Time = ", training_time)
accuracy = clf.score(test_features, test_labels)
# test_time = time.time() - (training_time + initial_time)
# print("\nTest Time = ", test_time)
print("\n", krnl, "kernel SVM accuracy =", accuracy)
return accuracy, training_time
def ELM(nodes):
print("\n#########################\n", nodes, "Hidden Layer Nodes ELM Train/Test\n#########################")
for i in range(BATCHES_IN_EPOCH):
train_batch = mnist.train.next_batch(BATCH_SIZE)
features_batch = train_batch[0]
labels_batch = train_batch[1]
for j in range(BATCH_SIZE):
for k in range(28*28):
train_features[BATCH_SIZE * i + j, k] = features_batch[j, k]
train_labels[BATCH_SIZE * i + j] = np.sum(np.multiply(converter, labels_batch[j, :]))
# print_debug(train_features, "train_features")
# print_debug(train_labels, "train_labels")
for j in range(TEST_SIZE):
test_labels[j] = np.sum(np.multiply(converter, mnist.test.labels[j, :]))
# print_debug(test_features, "test_features")
# print_debug(test_labels, "test_labels")
initial_time = time.time()
srhl_tanh = MLPRandomLayer(n_hidden=nodes, activation_func="tanh")
clf = GenELMClassifier(hidden_layer=srhl_tanh)
clf.fit(train_features, train_labels)
training_time = time.time()-initial_time
print("\nTraining Time = ", training_time)
accuracy = clf.score(test_features, test_labels)
# test_time = time.time() - (training_time + initial_time)
# print("\nTest Time = ", test_time)
print("\n", nodes, "hidden layer nodes ELM accuracy =", accuracy)
return accuracy, training_time
def ConvNet(number_of_training_epochs):
print("\n#########################\nConvNet Train/Test\n#########################")
initial_time = time.time()
for i in range(number_of_training_epochs * BATCHES_IN_EPOCH):
batch = mnist.train.next_batch(BATCH_SIZE)
if i%BATCHES_IN_EPOCH == 0:
train_accuracy = model_accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
# print("\nEpoch ", int(i/BATCHES_IN_EPOCH), "Training Accuracy ", train_accuracy)
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
training_time = time.time()-initial_time
print("\nTraining Time = ", training_time)
accuracy = model_accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})
# test_time = time.time() - (training_time + initial_time)
# print("\nTest Time = ", test_time)
print("\nConvNet accuracy =", accuracy)
return accuracy, training_time
def ConvNetSVM():
print("\n#########################\nConvNetSVM Train/Test\n#########################")
initial_time = time.time()
for i in range(BATCHES_IN_EPOCH):
train_batch = mnist.train.next_batch(BATCH_SIZE)
features_batch = h_fc1.eval(feed_dict={x: train_batch[0]})
labels_batch = train_batch[1]
for j in range(BATCH_SIZE):
for k in range(NUMBER_OF_FEATURES):
train_features_cnn[BATCH_SIZE * i + j, k] = features_batch[j, k]
train_labels_cnn[BATCH_SIZE * i + j] = np.sum(np.multiply(converter, labels_batch[j, :]))
# print_debug(train_features_cnn, "train_features_cnn")
# print_debug(train_labels_cnn, "train_labels_cnn")
test_features_cnn = h_fc1.eval(feed_dict={x: mnist.test.images})
for j in range(TEST_SIZE):
test_labels_cnn[j] = np.sum(np.multiply(converter, mnist.test.labels[j, :]))
# print_debug(test_features_cnn, "test_features_cnn")
# print_debug(test_labels_cnn, "train_labels_cnn")
clf = svm.SVC()
clf.fit(train_features_cnn, train_labels_cnn)
training_time = time.time()-initial_time
print("\nTraining Time = ", training_time)
accuracy = clf.score(test_features_cnn, test_labels_cnn)
# test_time = time.time() - (training_time + initial_time)
# print("\nTest Time = ", test_time)
print("\nConvNetSVM accuracy =", accuracy)
return accuracy, training_time
print("\n#########################\nStarting\n#########################\n")
sess = tf.InteractiveSession()
print("\n#########################\nBuilding ConvNet\n#########################")
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
x_image = tf.reshape(x, [-1,28,28,1])
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
W_fc1 = weight_variable([7 * 7 * 64, NUMBER_OF_FEATURES])
b_fc1 = bias_variable([NUMBER_OF_FEATURES])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([NUMBER_OF_FEATURES, 10])
b_fc2 = bias_variable([10])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
model_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#sess.run(tf.initialize_all_variables())
print("\n#########################\nExecuting Experiments\n#########################")
dataframe_svm = pd.DataFrame()
dataframe_results = pd.DataFrame()
svm_results["LK-SVM-ACCU"], svm_results["LK-SVM-TIME"] = SVM("linear")
svm_results["GK-SVM-ACCU"], svm_results["GK-SVM-TIME"] = SVM("rbf")
dataframe_svm = dataframe_svm.append(svm_results, ignore_index=True)
dataframe_svm = dataframe_svm[["LK-SVM-ACCU", "GK-SVM-ACCU", "LK-SVM-TIME", "GK-SVM-TIME"]]
for index in range(NUMBER_OF_EXPERIMENTS):
print("\n#########################\nExperiment", index+1, "of", NUMBER_OF_EXPERIMENTS, "\n#########################")
experiment_results["1024HL-ELM-ACCU"], experiment_results["1024HL-ELM-TIME"] = ELM(1024)
experiment_results["4096HL-ELM-ACCU"], experiment_results["4096HL-ELM-TIME"] = ELM(4096)
sess.run(tf.initialize_all_variables())
experiment_results["ConvNet-ACCU"], experiment_results["ConvNet-TIME"] = ConvNet(NUMBER_OF_EPOCHS)
experiment_results["ConvNetSVM-ACCU"], experiment_results["ConvNetSVM-TIME"] = ConvNetSVM()
dataframe_results = dataframe_results.append(experiment_results, ignore_index=True)
dataframe_results = dataframe_results[["1024HL-ELM-ACCU", "4096HL-ELM-ACCU", "ConvNet-ACCU", "ConvNetSVM-ACCU",
"1024HL-ELM-TIME", "4096HL-ELM-TIME", "ConvNet-TIME", "ConvNetSVM-TIME",]]
print("\n#########################\nPrinting Results\n#########################\n")
print("\n", dataframe_svm)
print("\n", dataframe_results, "\n")
print(dataframe_results.describe())
print("\n#########################\nStoping\n#########################\n")
sess.close()
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.style.use('classic')
mpl.use('pdf')
dataframe_results = dataframe_results[["1024HL-ELM-ACCU", "4096HL-ELM-ACCU", "ConvNet-ACCU", "ConvNetSVM-ACCU",]]
plt.rc('font', family='serif', serif='Times')
plt.rc('text', usetex=True)
plt.rc('xtick', labelsize=8)
plt.rc('ytick', labelsize=8)
plt.rc('axes', labelsize=8)
#plt.tight_layout()
width = 3.487
height = width / 1.618
fig=plt.figure()
fig.subplots_adjust(left=.15, bottom=.16, right=.99, top=.97)
ax = dataframe_results.plot.box(figsize=(width, height))
ax.set_xlabel("X label")
ax.set_ylabel("Y label")
ax.set_title("Title")
plt.savefig("df_global.pdf")
"""
| apache-2.0 |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/matplotlib/quiver.py | 1 | 41365 | """
Support for plotting vector fields.
Presently this contains Quiver and Barb. Quiver plots an arrow in the
direction of the vector, with the size of the arrow related to the
magnitude of the vector.
Barbs are like quiver in that they point along a vector, but
the magnitude of the vector is given schematically by the presence of barbs
or flags on the barb.
This will also become a home for things such as standard
deviation ellipses, which can and will be derived very easily from
the Quiver code.
"""
import numpy as np
from numpy import ma
import matplotlib.collections as collections
import matplotlib.transforms as transforms
import matplotlib.text as mtext
import matplotlib.artist as martist
from matplotlib.artist import allow_rasterization
from matplotlib import docstring
import matplotlib.font_manager as font_manager
import matplotlib.cbook as cbook
from matplotlib.cbook import delete_masked_points
from matplotlib.patches import CirclePolygon
import math
_quiver_doc = """
Plot a 2-D field of arrows.
call signatures::
quiver(U, V, **kw)
quiver(U, V, C, **kw)
quiver(X, Y, U, V, **kw)
quiver(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the arrow locations (default is tail of
arrow; see *pivot* kwarg)
*U*, *V*:
Give the x and y components of the arrow vectors
*C*:
An optional array used to map colors to the arrows
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if ``len(X)`` and ``len(Y)``
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*units*: [ 'width' | 'height' | 'dots' | 'inches' | 'x' | 'y' | 'xy' ]
Arrow units; the arrow dimensions *except for length* are in
multiples of this unit.
* 'width' or 'height': the width or height of the axes
* 'dots' or 'inches': pixels or inches, based on the figure dpi
* 'x', 'y', or 'xy': *X*, *Y*, or sqrt(X^2+Y^2) data units
The arrows scale differently depending on the units. For
'x' or 'y', the arrows get larger as one zooms in; for other
units, the arrow size is independent of the zoom state. For
'width or 'height', the arrow size increases with the width and
height of the axes, respectively, when the the window is resized;
for 'dots' or 'inches', resizing does not change the arrows.
*angles*: [ 'uv' | 'xy' | array ]
With the default 'uv', the arrow aspect ratio is 1, so that
if *U*==*V* the angle of the arrow on the plot is 45 degrees
CCW from the *x*-axis.
With 'xy', the arrow points from (x,y) to (x+u, y+v).
Alternatively, arbitrary angles may be specified as an array
of values in degrees, CCW from the *x*-axis.
*scale*: [ *None* | float ]
Data units per arrow length unit, e.g., m/s per plot width; a smaller
scale parameter makes the arrow longer. If *None*, a simple
autoscaling algorithm is used, based on the average vector length
and the number of vectors. The arrow length unit is given by
the *scale_units* parameter
*scale_units*: *None*, or any of the *units* options.
For example, if *scale_units* is 'inches', *scale* is 2.0, and
``(u,v) = (1,0)``, then the vector will be 0.5 inches long.
If *scale_units* is 'width', then the vector will be half the width
of the axes.
If *scale_units* is 'x' then the vector will be 0.5 x-axis
units. To plot vectors in the x-y plane, with u and v having
the same units as x and y, use
"angles='xy', scale_units='xy', scale=1".
*width*:
Shaft width in arrow units; default depends on choice of units,
above, and number of vectors; a typical starting value is about
0.005 times the width of the plot.
*headwidth*: scalar
Head width as multiple of shaft width, default is 3
*headlength*: scalar
Head length as multiple of shaft width, default is 5
*headaxislength*: scalar
Head length at shaft intersection, default is 4.5
*minshaft*: scalar
Length below which arrow scales, in units of head length. Do not
set this to less than 1, or small arrows will look terrible!
Default is 1
*minlength*: scalar
Minimum length as a multiple of shaft width; if an arrow length
is less than this, plot a dot (hexagon) of this diameter instead.
Default is 1.
*pivot*: [ 'tail' | 'middle' | 'tip' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*.
*color*: [ color | color sequence ]
This is a synonym for the
:class:`~matplotlib.collections.PolyCollection` facecolor kwarg.
If *C* has been set, *color* has no effect.
The defaults give a slightly swept-back arrow; to make the head a
triangle, make *headaxislength* the same as *headlength*. To make the
arrow more pointed, reduce *headwidth* or increase *headlength* and
*headaxislength*. To make the head smaller relative to the shaft,
scale down all the head parameters. You will probably do best to leave
minshaft alone.
linewidths and edgecolors can be used to customize the arrow
outlines. Additional :class:`~matplotlib.collections.PolyCollection`
keyword arguments:
%(PolyCollection)s
""" % docstring.interpd.params
_quiverkey_doc = """
Add a key to a quiver plot.
Call signature::
quiverkey(Q, X, Y, U, label, **kw)
Arguments:
*Q*:
The Quiver instance returned by a call to quiver.
*X*, *Y*:
The location of the key; additional explanation follows.
*U*:
The length of the key
*label*:
A string with the length and units of the key
Keyword arguments:
*coordinates* = [ 'axes' | 'figure' | 'data' | 'inches' ]
Coordinate system and units for *X*, *Y*: 'axes' and 'figure' are
normalized coordinate systems with 0,0 in the lower left and 1,1
in the upper right; 'data' are the axes data coordinates (used for
the locations of the vectors in the quiver plot itself); 'inches'
is position in the figure in inches, with 0,0 at the lower left
corner.
*color*:
overrides face and edge colors from *Q*.
*labelpos* = [ 'N' | 'S' | 'E' | 'W' ]
Position the label above, below, to the right, to the left of the
arrow, respectively.
*labelsep*:
Distance in inches between the arrow and the label. Default is
0.1
*labelcolor*:
defaults to default :class:`~matplotlib.text.Text` color.
*fontproperties*:
A dictionary with keyword arguments accepted by the
:class:`~matplotlib.font_manager.FontProperties` initializer:
*family*, *style*, *variant*, *size*, *weight*
Any additional keyword arguments are used to override vector
properties taken from *Q*.
The positioning of the key depends on *X*, *Y*, *coordinates*, and
*labelpos*. If *labelpos* is 'N' or 'S', *X*, *Y* give the position
of the middle of the key arrow. If *labelpos* is 'E', *X*, *Y*
positions the head, and if *labelpos* is 'W', *X*, *Y* positions the
tail; in either of these two cases, *X*, *Y* is somewhere in the
middle of the arrow+label key object.
"""
class QuiverKey(martist.Artist):
""" Labelled arrow for use as a quiver plot scale key."""
halign = {'N': 'center', 'S': 'center', 'E': 'left', 'W': 'right'}
valign = {'N': 'bottom', 'S': 'top', 'E': 'center', 'W': 'center'}
pivot = {'N': 'mid', 'S': 'mid', 'E': 'tip', 'W': 'tail'}
def __init__(self, Q, X, Y, U, label, **kw):
martist.Artist.__init__(self)
self.Q = Q
self.X = X
self.Y = Y
self.U = U
self.coord = kw.pop('coordinates', 'axes')
self.color = kw.pop('color', None)
self.label = label
self._labelsep_inches = kw.pop('labelsep', 0.1)
self.labelsep = (self._labelsep_inches * Q.ax.figure.dpi)
def on_dpi_change(fig):
self.labelsep = (self._labelsep_inches * fig.dpi)
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
Q.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
self.labelpos = kw.pop('labelpos', 'N')
self.labelcolor = kw.pop('labelcolor', None)
self.fontproperties = kw.pop('fontproperties', dict())
self.kw = kw
_fp = self.fontproperties
#boxprops = dict(facecolor='red')
self.text = mtext.Text(
text=label, # bbox=boxprops,
horizontalalignment=self.halign[self.labelpos],
verticalalignment=self.valign[self.labelpos],
fontproperties=font_manager.FontProperties(**_fp))
if self.labelcolor is not None:
self.text.set_color(self.labelcolor)
self._initialized = False
self.zorder = Q.zorder + 0.1
__init__.__doc__ = _quiverkey_doc
def _init(self):
if True: # not self._initialized:
self._set_transform()
_pivot = self.Q.pivot
self.Q.pivot = self.pivot[self.labelpos]
# Hack: save and restore the Umask
_mask = self.Q.Umask
self.Q.Umask = ma.nomask
self.verts = self.Q._make_verts(np.array([self.U]),
np.zeros((1,)))
self.Q.Umask = _mask
self.Q.pivot = _pivot
kw = self.Q.polykw
kw.update(self.kw)
self.vector = collections.PolyCollection(
self.verts,
offsets=[(self.X, self.Y)],
transOffset=self.get_transform(),
**kw)
if self.color is not None:
self.vector.set_color(self.color)
self.vector.set_transform(self.Q.get_transform())
self._initialized = True
def _text_x(self, x):
if self.labelpos == 'E':
return x + self.labelsep
elif self.labelpos == 'W':
return x - self.labelsep
else:
return x
def _text_y(self, y):
if self.labelpos == 'N':
return y + self.labelsep
elif self.labelpos == 'S':
return y - self.labelsep
else:
return y
@allow_rasterization
def draw(self, renderer):
self._init()
self.vector.draw(renderer)
x, y = self.get_transform().transform_point((self.X, self.Y))
self.text.set_x(self._text_x(x))
self.text.set_y(self._text_y(y))
self.text.draw(renderer)
def _set_transform(self):
if self.coord == 'data':
self.set_transform(self.Q.ax.transData)
elif self.coord == 'axes':
self.set_transform(self.Q.ax.transAxes)
elif self.coord == 'figure':
self.set_transform(self.Q.ax.figure.transFigure)
elif self.coord == 'inches':
self.set_transform(self.Q.ax.figure.dpi_scale_trans)
else:
raise ValueError('unrecognized coordinates')
def set_figure(self, fig):
martist.Artist.set_figure(self, fig)
self.text.set_figure(fig)
def contains(self, mouseevent):
# Maybe the dictionary should allow one to
# distinguish between a text hit and a vector hit.
if (self.text.contains(mouseevent)[0]
or self.vector.contains(mouseevent)[0]):
return True, {}
return False, {}
quiverkey_doc = _quiverkey_doc
# This is a helper function that parses out the various combination of
# arguments for doing colored vector plots. Pulling it out here
# allows both Quiver and Barbs to use it
def _parse_args(*args):
X, Y, U, V, C = [None] * 5
args = list(args)
# The use of atleast_1d allows for handling scalar arguments while also
# keeping masked arrays
if len(args) == 3 or len(args) == 5:
C = np.atleast_1d(args.pop(-1))
V = np.atleast_1d(args.pop(-1))
U = np.atleast_1d(args.pop(-1))
if U.ndim == 1:
nr, nc = 1, U.shape[0]
else:
nr, nc = U.shape
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
class Quiver(collections.PolyCollection):
"""
Specialized PolyCollection for arrows.
The only API method is set_UVC(), which can be used
to change the size, orientation, and color of the
arrows; their locations are fixed when the class is
instantiated. Possibly this method will be useful
in animations.
Much of the work in this class is done in the draw()
method so that as much information as possible is available
about the plot. In subsequent draw() calls, recalculation
is limited to things that might have changed, so there
should be no performance penalty from putting the calculations
in the draw() method.
"""
@docstring.Substitution(_quiver_doc)
def __init__(self, ax, *args, **kw):
"""
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s
"""
self.ax = ax
X, Y, U, V, C = _parse_args(*args)
self.X = X
self.Y = Y
self.XY = np.hstack((X[:, np.newaxis], Y[:, np.newaxis]))
self.N = len(X)
self.scale = kw.pop('scale', None)
self.headwidth = kw.pop('headwidth', 3)
self.headlength = float(kw.pop('headlength', 5))
self.headaxislength = kw.pop('headaxislength', 4.5)
self.minshaft = kw.pop('minshaft', 1)
self.minlength = kw.pop('minlength', 1)
self.units = kw.pop('units', 'width')
self.scale_units = kw.pop('scale_units', None)
self.angles = kw.pop('angles', 'uv')
self.width = kw.pop('width', None)
self.color = kw.pop('color', 'k')
self.pivot = kw.pop('pivot', 'tail')
self.transform = kw.pop('transform', ax.transData)
kw.setdefault('facecolors', self.color)
kw.setdefault('linewidths', (0,))
collections.PolyCollection.__init__(self, [], offsets=self.XY,
transOffset=self.transform,
closed=False,
**kw)
self.polykw = kw
self.set_UVC(U, V, C)
self._initialized = False
self.keyvec = None
self.keytext = None
def on_dpi_change(fig):
self._new_UV = True # vertices depend on width, span
# which in turn depend on dpi
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
self.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
def _init(self):
"""
Initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: # not self._initialized:
trans = self._set_transform()
ax = self.ax
sx, sy = trans.inverted().transform_point(
(ax.bbox.width, ax.bbox.height))
self.span = sx
if self.width is None:
sn = max(8, min(25, math.sqrt(self.N)))
self.width = 0.06 * self.span / sn
@allow_rasterization
def draw(self, renderer):
self._init()
if (self._new_UV or self.angles == 'xy'
or self.scale_units in ['x', 'y', 'xy']):
verts = self._make_verts(self.U, self.V)
self.set_verts(verts, closed=False)
self._new_UV = False
collections.PolyCollection.draw(self, renderer)
def set_UVC(self, U, V, C=None):
U = ma.masked_invalid(U, copy=False).ravel()
V = ma.masked_invalid(V, copy=False).ravel()
mask = ma.mask_or(U.mask, V.mask, copy=False, shrink=True)
if C is not None:
C = ma.masked_invalid(C, copy=False).ravel()
mask = ma.mask_or(mask, C.mask, copy=False, shrink=True)
if mask is ma.nomask:
C = C.filled()
else:
C = ma.array(C, mask=mask, copy=False)
self.U = U.filled(1)
self.V = V.filled(1)
self.Umask = mask
if C is not None:
self.set_array(C)
self._new_UV = True
def _dots_per_unit(self, units):
"""
Return a scale factor for converting from units to pixels
"""
ax = self.ax
if units in ('x', 'y', 'xy'):
if units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
elif units == 'y':
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
else: # 'xy' is assumed
dxx0 = ax.viewLim.width
dxx1 = ax.bbox.width
dyy0 = ax.viewLim.height
dyy1 = ax.bbox.height
dx1 = np.hypot(dxx1, dyy1)
dx0 = np.hypot(dxx0, dyy0)
dx = dx1 / dx0
else:
if units == 'width':
dx = ax.bbox.width
elif units == 'height':
dx = ax.bbox.height
elif units == 'dots':
dx = 1.0
elif units == 'inches':
dx = ax.figure.dpi
else:
raise ValueError('unrecognized units')
return dx
def _set_transform(self):
"""
Sets the PolygonCollection transform to go
from arrow width units to pixels.
"""
dx = self._dots_per_unit(self.units)
self._trans_scale = dx # pixels per arrow width unit
trans = transforms.Affine2D().scale(dx)
self.set_transform(trans)
return trans
def _angles_lengths(self, U, V, eps=1):
xy = self.ax.transData.transform(self.XY)
uv = np.hstack((U[:, np.newaxis], V[:, np.newaxis]))
xyp = self.ax.transData.transform(self.XY + eps * uv)
dxy = xyp - xy
angles = np.arctan2(dxy[:, 1], dxy[:, 0])
lengths = np.absolute(dxy[:, 0] + dxy[:, 1] * 1j) / eps
return angles, lengths
def _make_verts(self, U, V):
uv = (U + V * 1j)
if self.angles == 'xy' and self.scale_units == 'xy':
# Here eps is 1 so that if we get U, V by diffing
# the X, Y arrays, the vectors will connect the
# points, regardless of the axis scaling (including log).
angles, lengths = self._angles_lengths(U, V, eps=1)
elif self.angles == 'xy' or self.scale_units == 'xy':
# Calculate eps based on the extents of the plot
# so that we don't end up with roundoff error from
# adding a small number to a large.
eps = np.abs(self.ax.dataLim.extents).max() * 0.001
angles, lengths = self._angles_lengths(U, V, eps=eps)
if self.scale_units == 'xy':
a = lengths
else:
a = np.absolute(uv)
if self.scale is None:
sn = max(10, math.sqrt(self.N))
if self.Umask is not ma.nomask:
amean = a[~self.Umask].mean()
else:
amean = a.mean()
scale = 1.8 * amean * sn / self.span # crude auto-scaling
# scale is typical arrow length as a multiple
# of the arrow width
if self.scale_units is None:
if self.scale is None:
self.scale = scale
widthu_per_lenu = 1.0
else:
if self.scale_units == 'xy':
dx = 1
else:
dx = self._dots_per_unit(self.scale_units)
widthu_per_lenu = dx / self._trans_scale
if self.scale is None:
self.scale = scale * widthu_per_lenu
length = a * (widthu_per_lenu / (self.scale * self.width))
X, Y = self._h_arrows(length)
if self.angles == 'xy':
theta = angles
elif self.angles == 'uv':
theta = np.angle(uv)
else:
# Make a copy to avoid changing the input array.
theta = ma.masked_invalid(self.angles, copy=True).filled(0)
theta = theta.ravel()
theta *= (np.pi / 180.0)
theta.shape = (theta.shape[0], 1) # for broadcasting
xy = (X + Y * 1j) * np.exp(1j * theta) * self.width
xy = xy[:, :, np.newaxis]
XY = np.concatenate((xy.real, xy.imag), axis=2)
if self.Umask is not ma.nomask:
XY = ma.array(XY)
XY[self.Umask] = ma.masked
# This might be handled more efficiently with nans, given
# that nans will end up in the paths anyway.
return XY
def _h_arrows(self, length):
""" length is in arrow width units """
# It might be possible to streamline the code
# and speed it up a bit by using complex (x,y)
# instead of separate arrays; but any gain would be slight.
minsh = self.minshaft * self.headlength
N = len(length)
length = length.reshape(N, 1)
# This number is chosen based on when pixel values overflow in Agg
# causing rendering errors
#length = np.minimum(length, 2 ** 16)
np.clip(length, 0, 2 ** 16, out=length)
# x, y: normal horizontal arrow
x = np.array([0, -self.headaxislength,
-self.headlength, 0],
np.float64)
x = x + np.array([0, 1, 1, 1]) * length
y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
y = np.repeat(y[np.newaxis, :], N, axis=0)
# x0, y0: arrow without shaft, for short vectors
x0 = np.array([0, minsh - self.headaxislength,
minsh - self.headlength, minsh], np.float64)
y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
ii = [0, 1, 2, 3, 2, 1, 0, 0]
X = x.take(ii, 1)
Y = y.take(ii, 1)
Y[:, 3:-1] *= -1
X0 = x0.take(ii)
Y0 = y0.take(ii)
Y0[3:-1] *= -1
shrink = length / minsh
X0 = shrink * X0[np.newaxis, :]
Y0 = shrink * Y0[np.newaxis, :]
short = np.repeat(length < minsh, 8, axis=1)
# Now select X0, Y0 if short, otherwise X, Y
cbook._putmask(X, short, X0)
cbook._putmask(Y, short, Y0)
if self.pivot[:3] == 'mid':
X -= 0.5 * X[:, 3, np.newaxis]
elif self.pivot[:3] == 'tip':
X = X - X[:, 3, np.newaxis] # numpy bug? using -= does not
# work here unless we multiply
# by a float first, as with 'mid'.
tooshort = length < self.minlength
if tooshort.any():
# Use a heptagonal dot:
th = np.arange(0, 8, 1, np.float64) * (np.pi / 3.0)
x1 = np.cos(th) * self.minlength * 0.5
y1 = np.sin(th) * self.minlength * 0.5
X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
tooshort = np.repeat(tooshort, 8, 1)
cbook._putmask(X, tooshort, X1)
cbook._putmask(Y, tooshort, Y1)
# Mask handling is deferred to the caller, _make_verts.
return X, Y
quiver_doc = _quiver_doc
_barbs_doc = """
Plot a 2-D field of barbs.
Call signatures::
barb(U, V, **kw)
barb(U, V, C, **kw)
barb(X, Y, U, V, **kw)
barb(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the barb locations
(default is head of barb; see *pivot* kwarg)
*U*, *V*:
Give the x and y components of the barb shaft
*C*:
An optional array used to map colors to the barbs
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if ``len(X)`` and ``len(Y)``
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*length*:
Length of the barb in points; the other parts of the barb
are scaled against this.
Default is 9
*pivot*: [ 'tip' | 'middle' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*. Default is 'tip'
*barbcolor*: [ color | color sequence ]
Specifies the color all parts of the barb except any flags. This
parameter is analagous to the *edgecolor* parameter for polygons,
which can be used instead. However this parameter will override
facecolor.
*flagcolor*: [ color | color sequence ]
Specifies the color of any flags on the barb. This parameter is
analagous to the *facecolor* parameter for polygons, which can be
used instead. However this parameter will override facecolor. If
this is not set (and *C* has not either) then *flagcolor* will be
set to match *barbcolor* so that the barb has a uniform color. If
*C* has been set, *flagcolor* has no effect.
*sizes*:
A dictionary of coefficients specifying the ratio of a given
feature to the length of the barb. Only those values one wishes to
override need to be included. These features include:
- 'spacing' - space between features (flags, full/half barbs)
- 'height' - height (distance from shaft to top) of a flag or
full barb
- 'width' - width of a flag, twice the width of a full barb
- 'emptybarb' - radius of the circle used for low magnitudes
*fill_empty*:
A flag on whether the empty barbs (circles) that are drawn should
be filled with the flag color. If they are not filled, they will
be drawn such that no color is applied to the center. Default is
False
*rounding*:
A flag to indicate whether the vector magnitude should be rounded
when allocating barb components. If True, the magnitude is
rounded to the nearest multiple of the half-barb increment. If
False, the magnitude is simply truncated to the next lowest
multiple. Default is True
*barb_increments*:
A dictionary of increments specifying values to associate with
different parts of the barb. Only those values one wishes to
override need to be included.
- 'half' - half barbs (Default is 5)
- 'full' - full barbs (Default is 10)
- 'flag' - flags (default is 50)
*flip_barb*:
Either a single boolean flag or an array of booleans. Single
boolean indicates whether the lines and flags should point
opposite to normal for all barbs. An array (which should be the
same size as the other data arrays) indicates whether to flip for
each individual barb. Normal behavior is for the barbs and lines
to point right (comes from wind barbs having these features point
towards low pressure in the Northern Hemisphere.) Default is
False
Barbs are traditionally used in meteorology as a way to plot the speed
and direction of wind observations, but can technically be used to
plot any two dimensional vector quantity. As opposed to arrows, which
give vector magnitude by the length of the arrow, the barbs give more
quantitative information about the vector magnitude by putting slanted
lines or a triangle for various increments in magnitude, as show
schematically below::
: /\ \\
: / \ \\
: / \ \ \\
: / \ \ \\
: ------------------------------
.. note the double \\ at the end of each line to make the figure
.. render correctly
The largest increment is given by a triangle (or "flag"). After those
come full lines (barbs). The smallest increment is a half line. There
is only, of course, ever at most 1 half line. If the magnitude is
small and only needs a single half-line and no full lines or
triangles, the half-line is offset from the end of the barb so that it
can be easily distinguished from barbs with a single full line. The
magnitude for the barb shown above would nominally be 65, using the
standard increments of 50, 10, and 5.
linewidths and edgecolors can be used to customize the barb.
Additional :class:`~matplotlib.collections.PolyCollection` keyword
arguments:
%(PolyCollection)s
""" % docstring.interpd.params
docstring.interpd.update(barbs_doc=_barbs_doc)
class Barbs(collections.PolyCollection):
'''
Specialized PolyCollection for barbs.
The only API method is :meth:`set_UVC`, which can be used to
change the size, orientation, and color of the arrows. Locations
are changed using the :meth:`set_offsets` collection method.
Possibly this method will be useful in animations.
There is one internal function :meth:`_find_tails` which finds
exactly what should be put on the barb given the vector magnitude.
From there :meth:`_make_barbs` is used to find the vertices of the
polygon to represent the barb based on this information.
'''
#This may be an abuse of polygons here to render what is essentially maybe
#1 triangle and a series of lines. It works fine as far as I can tell
#however.
@docstring.interpd
def __init__(self, ax, *args, **kw):
"""
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%(barbs_doc)s
"""
self._pivot = kw.pop('pivot', 'tip')
self._length = kw.pop('length', 7)
barbcolor = kw.pop('barbcolor', None)
flagcolor = kw.pop('flagcolor', None)
self.sizes = kw.pop('sizes', dict())
self.fill_empty = kw.pop('fill_empty', False)
self.barb_increments = kw.pop('barb_increments', dict())
self.rounding = kw.pop('rounding', True)
self.flip = kw.pop('flip_barb', False)
transform = kw.pop('transform', ax.transData)
#Flagcolor and and barbcolor provide convenience parameters for setting
#the facecolor and edgecolor, respectively, of the barb polygon. We
#also work here to make the flag the same color as the rest of the barb
#by default
if None in (barbcolor, flagcolor):
kw['edgecolors'] = 'face'
if flagcolor:
kw['facecolors'] = flagcolor
elif barbcolor:
kw['facecolors'] = barbcolor
else:
#Set to facecolor passed in or default to black
kw.setdefault('facecolors', 'k')
else:
kw['edgecolors'] = barbcolor
kw['facecolors'] = flagcolor
#Parse out the data arrays from the various configurations supported
x, y, u, v, c = _parse_args(*args)
self.x = x
self.y = y
xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
#Make a collection
barb_size = self._length ** 2 / 4 # Empirically determined
collections.PolyCollection.__init__(self, [], (barb_size,), offsets=xy,
transOffset=transform, **kw)
self.set_transform(transforms.IdentityTransform())
self.set_UVC(u, v, c)
def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
'''
Find how many of each of the tail pieces is necessary. Flag
specifies the increment for a flag, barb for a full barb, and half for
half a barb. Mag should be the magnitude of a vector (ie. >= 0).
This returns a tuple of:
(*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)
*half_flag* is a boolean whether half of a barb is needed,
since there should only ever be one half on a given
barb. *empty_flag* flag is an array of flags to easily tell if
a barb is empty (too low to plot any barbs/flags.
'''
#If rounding, round to the nearest multiple of half, the smallest
#increment
if rounding:
mag = half * (mag / half + 0.5).astype(np.int)
num_flags = np.floor(mag / flag).astype(np.int)
mag = np.mod(mag, flag)
num_barb = np.floor(mag / full).astype(np.int)
mag = np.mod(mag, full)
half_flag = mag >= half
empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))
return num_flags, num_barb, half_flag, empty_flag
def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
pivot, sizes, fill_empty, flip):
'''
This function actually creates the wind barbs. *u* and *v*
are components of the vector in the *x* and *y* directions,
respectively.
*nflags*, *nbarbs*, and *half_barb*, empty_flag* are,
*respectively, the number of flags, number of barbs, flag for
*half a barb, and flag for empty barb, ostensibly obtained
*from :meth:`_find_tails`.
*length* is the length of the barb staff in points.
*pivot* specifies the point on the barb around which the
entire barb should be rotated. Right now, valid options are
'head' and 'middle'.
*sizes* is a dictionary of coefficients specifying the ratio
of a given feature to the length of the barb. These features
include:
- *spacing*: space between features (flags, full/half
barbs)
- *height*: distance from shaft of top of a flag or full
barb
- *width* - width of a flag, twice the width of a full barb
- *emptybarb* - radius of the circle used for low
magnitudes
*fill_empty* specifies whether the circle representing an
empty barb should be filled or not (this changes the drawing
of the polygon).
*flip* is a flag indicating whether the features should be flipped to
the other side of the barb (useful for winds in the southern
hemisphere.
This function returns list of arrays of vertices, defining a polygon
for each of the wind barbs. These polygons have been rotated to
properly align with the vector direction.
'''
#These control the spacing and size of barb elements relative to the
#length of the shaft
spacing = length * sizes.get('spacing', 0.125)
full_height = length * sizes.get('height', 0.4)
full_width = length * sizes.get('width', 0.25)
empty_rad = length * sizes.get('emptybarb', 0.15)
#Controls y point where to pivot the barb.
pivot_points = dict(tip=0.0, middle=-length / 2.)
#Check for flip
if flip:
full_height = -full_height
endx = 0.0
endy = pivot_points[pivot.lower()]
# Get the appropriate angle for the vector components. The offset is
# due to the way the barb is initially drawn, going down the y-axis.
# This makes sense in a meteorological mode of thinking since there 0
# degrees corresponds to north (the y-axis traditionally)
angles = -(ma.arctan2(v, u) + np.pi / 2)
# Used for low magnitude. We just get the vertices, so if we make it
# out here, it can be reused. The center set here should put the
# center of the circle at the location(offset), rather than at the
# same point as the barb pivot; this seems more sensible.
circ = CirclePolygon((0, 0), radius=empty_rad).get_verts()
if fill_empty:
empty_barb = circ
else:
# If we don't want the empty one filled, we make a degenerate
# polygon that wraps back over itself
empty_barb = np.concatenate((circ, circ[::-1]))
barb_list = []
for index, angle in np.ndenumerate(angles):
#If the vector magnitude is too weak to draw anything, plot an
#empty circle instead
if empty_flag[index]:
#We can skip the transform since the circle has no preferred
#orientation
barb_list.append(empty_barb)
continue
poly_verts = [(endx, endy)]
offset = length
# Add vertices for each flag
for i in range(nflags[index]):
# The spacing that works for the barbs is a little to much for
# the flags, but this only occurs when we have more than 1
# flag.
if offset != length:
offset += spacing / 2.
poly_verts.extend(
[[endx, endy + offset],
[endx + full_height, endy - full_width / 2 + offset],
[endx, endy - full_width + offset]])
offset -= full_width + spacing
# Add vertices for each barb. These really are lines, but works
# great adding 3 vertices that basically pull the polygon out and
# back down the line
for i in range(nbarbs[index]):
poly_verts.extend(
[(endx, endy + offset),
(endx + full_height, endy + offset + full_width / 2),
(endx, endy + offset)])
offset -= spacing
# Add the vertices for half a barb, if needed
if half_barb[index]:
# If the half barb is the first on the staff, traditionally it
# is offset from the end to make it easy to distinguish from a
# barb with a full one
if offset == length:
poly_verts.append((endx, endy + offset))
offset -= 1.5 * spacing
poly_verts.extend(
[(endx, endy + offset),
(endx + full_height / 2, endy + offset + full_width / 4),
(endx, endy + offset)])
# Rotate the barb according the angle. Making the barb first and
# then rotating it made the math for drawing the barb really easy.
# Also, the transform framework makes doing the rotation simple.
poly_verts = transforms.Affine2D().rotate(-angle).transform(
poly_verts)
barb_list.append(poly_verts)
return barb_list
def set_UVC(self, U, V, C=None):
self.u = ma.masked_invalid(U, copy=False).ravel()
self.v = ma.masked_invalid(V, copy=False).ravel()
if C is not None:
c = ma.masked_invalid(C, copy=False).ravel()
x, y, u, v, c = delete_masked_points(self.x.ravel(),
self.y.ravel(),
self.u, self.v, c)
else:
x, y, u, v = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v)
magnitude = np.hypot(u, v)
flags, barbs, halves, empty = self._find_tails(magnitude,
self.rounding,
**self.barb_increments)
# Get the vertices for each of the barbs
plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
self._length, self._pivot, self.sizes,
self.fill_empty, self.flip)
self.set_verts(plot_barbs)
# Set the color array
if C is not None:
self.set_array(c)
# Update the offsets in case the masked data changed
xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
self._offsets = xy
def set_offsets(self, xy):
"""
Set the offsets for the barb polygons. This saves the offets passed in
and actually sets version masked as appropriate for the existing U/V
data. *offsets* should be a sequence.
ACCEPTS: sequence of pairs of floats
"""
self.x = xy[:, 0]
self.y = xy[:, 1]
x, y, u, v = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v)
xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
collections.PolyCollection.set_offsets(self, xy)
set_offsets.__doc__ = collections.PolyCollection.set_offsets.__doc__
barbs_doc = _barbs_doc
| gpl-3.0 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tests/frame/test_axis_select_reindex.py | 1 | 33367 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
from numpy import random
import numpy as np
from pandas.compat import lrange, lzip, u
from pandas import (compat, DataFrame, Series, Index, MultiIndex,
date_range, isnull)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
from pandas.core.common import PerformanceWarning
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSelectReindex(tm.TestCase, TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
_multiprocess_can_split_ = True
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
self.assertEqual(obj.index.name, 'first')
self.assertEqual(obj.columns.name, 'second')
self.assertEqual(list(df.columns), ['d', 'e', 'f'])
self.assertRaises(ValueError, df.drop, ['g'])
self.assertRaises(ValueError, df.drop, ['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.ix[[1, 2], :])
self.assertRaises(ValueError, simple.drop, 5)
self.assertRaises(ValueError, simple.drop, 'C', 1)
self.assertRaises(ValueError, simple.drop, [1, 5])
self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.ix[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
self.assertTrue(lexsorted_df.columns.is_lexsorted())
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
self.assertFalse(not_lexsorted_df.columns.is_lexsorted())
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on='a')
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ['a', 'b', ('a', ''), ('c', 'c1')]
expected = DataFrame(columns=columns,
data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(newFrame):
self.assertTrue(tm.equalContents(series.index, newFrame.index))
emptyFrame = self.frame.reindex(Index([]))
self.assertEqual(len(emptyFrame.index), 0)
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(nonContigFrame):
self.assertTrue(tm.equalContents(series.index,
nonContigFrame.index))
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
self.assertIs(newFrame.index, self.frame.index)
# length zero
newFrame = self.frame.reindex([])
self.assertTrue(newFrame.empty)
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
self.assertEqual(len(newFrame.index), len(self.frame.index))
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
self.assertTrue(newFrame.index.equals(self.ts1.index))
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result, self.frame)
self.assertFalse(result is self.frame)
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],
'date': ['2015-03-22', np.nan,
'2012-01-08', np.nan],
'amount': [2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
self.assertEqual(df.index.name, 'iname')
df = df.reindex(Index(np.arange(10), name='tmpname'))
self.assertEqual(df.index.name, 'tmpname')
s = Series(random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
self.assertEqual(df.columns.name, 'iname')
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
self.assertEqual(smaller['A'].dtype, np.int64)
bigger = smaller.reindex(self.intframe.index)
self.assertEqual(bigger['A'].dtype, np.float64)
smaller = self.intframe.reindex(columns=['A', 'B'])
self.assertEqual(smaller['A'].dtype, np.int64)
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
newFrame = self.frame.reindex(columns=['A', 'B', 'E'])
assert_series_equal(newFrame['B'], self.frame['B'])
self.assertTrue(np.isnan(newFrame['E']).all())
self.assertNotIn('C', newFrame)
# length zero
newFrame = self.frame.reindex(columns=[])
self.assertTrue(newFrame.empty)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(np.ones((3, 3)),
index=[datetime(2012, 1, 1),
datetime(2012, 1, 2),
datetime(2012, 1, 3)],
columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(
columns=some_cols).index.freq
self.assertEqual(index_freq, both_freq)
self.assertEqual(index_freq, seq_freq)
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
self.assertTrue(np.isnan(result.values[-5:]).all())
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
assert_frame_equal(result, expected)
# reindex fails
self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))
def test_align(self):
af, bf = self.frame.align(self.frame)
self.assertIsNot(af._data, self.frame._data)
af, bf = self.frame.align(self.frame, copy=False)
self.assertIs(af._data, self.frame._data)
# axis = 0
other = self.frame.ix[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
self.assertTrue(bf.columns.equals(other.columns))
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='right', axis=0)
self.assertTrue(bf.columns.equals(other.columns))
self.assertTrue(bf.index.equals(other.index))
self.assertTrue(af.index.equals(other.index))
# axis = 1
other = self.frame.ix[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
self.assertTrue(bf.columns.equals(self.frame.columns))
self.assertTrue(bf.index.equals(other.index))
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='inner', axis=1)
self.assertTrue(bf.columns.equals(other.columns))
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(other.columns))
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(other.columns))
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(self.mixed_frame.columns))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=None)
self.assertTrue(bf.index.equals(Index([])))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
# mixed floats/ints
af, bf = self.mixed_float.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
af, bf = self.mixed_int.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
# try to align dataframe to series along bad axis
self.assertRaises(ValueError, self.frame.align, af.ix[0, :3],
join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
self.assertTrue(isinstance(right, Series))
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {}
for c in self.frame.columns:
expected[c] = s
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(right, expected)
# GH 9558
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})
assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
def test_align_fill_method_inner(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('inner', meth, ax, fax)
def test_align_fill_method_outer(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('outer', meth, ax, fax)
def test_align_fill_method_left(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('left', meth, ax, fax)
def test_align_fill_method_right(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('right', meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.ix[0:4, :10]
right = self.frame.ix[2:, 6:]
empty = self.frame.ix[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10 * 10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join='left')
res2l, res2r = df2.align(df1, join='right')
expl = df1
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join='right')
res2l, res2r = df2.align(df1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
s = pd.Series([1, 2, 4], index=list('ABD'), name='x')
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
exp2 = pd.Series([1, 2, np.nan, 4, np.nan],
index=list('ABCDE'), name='x')
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def test_filter(self):
# items
filtered = self.frame.filter(['A', 'B', 'E'])
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
# other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
assert_frame_equal(filtered, expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
self.assertEqual(len(filtered.columns), 2)
# regex with ints in column names
# from PR #10384
df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
expected = DataFrame(
0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))
filtered = df.filter(regex='^[0-9]+$')
assert_frame_equal(filtered, expected)
expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])
# shouldn't remove anything
filtered = expected.filter(regex='^[0-9]+$')
assert_frame_equal(filtered, expected)
# pass in None
with assertRaisesRegexp(TypeError, 'Must pass'):
self.frame.filter(items=None)
# objects
filtered = self.mixed_frame.filter(like='foo')
self.assertIn('foo', filtered)
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
self.assertTrue('C' in filtered)
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_select(self):
f = lambda x: x.weekday() == 2
result = self.tsframe.select(f, axis=0)
expected = self.tsframe.reindex(
index=self.tsframe.index[[f(x) for x in self.tsframe.index]])
assert_frame_equal(result, expected)
result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
expected = self.frame.reindex(columns=['B', 'D'])
# TODO should reindex check_names?
assert_frame_equal(result, expected, check_names=False)
def test_take(self):
# homogeneous
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# neg indicies
order = [2, 1, -1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
self.assertRaises(IndexError, df.take, [3, 1, 2, 30], axis=0)
self.assertRaises(IndexError, df.take, [3, 1, 2, -31], axis=0)
self.assertRaises(IndexError, df.take, [3, 1, 2, 5], axis=1)
self.assertRaises(IndexError, df.take, [3, 1, 2, -5], axis=1)
# mixed-dtype
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# neg indicies
order = [4, 1, -2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float, self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[0][1]))
reindexed = frame.reindex(columns=lrange(3))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[1]).all())
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
self.assertIn('foo', reindexed)
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
self.assertNotIn('foo', reindexed)
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
self.assertTrue(reindexed.columns.equals(index))
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
self.assertEqual(smaller['E'].dtype, np.float64)
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
self.assertRaises(ValueError, self.intframe.reindex_axis, rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
newFrame = self.frame.reindex_axis(cols, axis=1)
assert_frame_equal(newFrame, self.frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=['a', 'b'],
index=[100.0, 101.0, np.nan, 102.0, 103.0])
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(lrange(4), lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(lrange(4), lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(lrange(2), lrange(2))
expected = df.reindex(lrange(2)).reindex(columns=lrange(2))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])
result = df.reindex(index=[0, 1], columns=['a', 'b'])
expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])
assert_frame_equal(result, expected)
| mit |
jls713/jfactors | flattened/ret2_hists.py | 1 | 1039 | ## Generates Fig. 9 of SEG (2016)
## ============================================================================
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
## ============================================================================
## 1. load in data from the three assumptions
data,datama,datasj=np.genfromtxt('triaxial_results/ReticulumII_nop_hr'),np.genfromtxt('triaxial_results/ReticulumII_ma_hr'),np.genfromtxt('triaxial_results/ReticulumII_sj_hr')
## 2. Use seaborn KDE plot
sns.kdeplot(data.T[4],shade=True,label='Uniform (U)')
sns.kdeplot(datama.T[4],shade=True,ls='dotted',label='Major axis (R)')
sns.kdeplot(datasj.T[4],shade=True,ls='dashed',label='SJ 2016 (T)')
plt.xlabel(r'$\mathcal{F}_\mathrm{J}$')
plt.ylabel(r'$\mathrm{d}N/\mathrm{d}\mathcal{F}_\mathrm{J}$')
plt.xlim(-.5,1.)
l=plt.axvline(0.,ls='dashed',alpha=0.5,color='k')
l.set_dashes((2,1))
plt.legend()
plt.savefig('RetII_hr.pdf',bbox_inches='tight')
## ============================================================================
| mit |
fabianp/scikit-learn | examples/svm/plot_custom_kernel.py | 115 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
thilbern/scikit-learn | sklearn/tests/test_pipeline.py | 17 | 12512 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import BaseEstimator, clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(BaseEstimator):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
""" Test the various init parameters of the pipeline.
"""
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params()
params2 = pipe2.get_params()
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
""" Test the various methods of the pipeline (anova).
"""
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
"""Test that the pipeline can take fit parameters
"""
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_methods_pca_svm():
"""Test the various methods of the pipeline (pca + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
"""Test the various methods of the pipeline (preprocessing + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
Habasari/sms-tools | lectures/04-STFT/plots-code/time-freq-compromise.py | 19 | 1255 | import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import utilFunctions as UF
import matplotlib.pyplot as plt
from scipy.signal import hamming
from scipy.fftpack import fft
import math
(fs, x) = UF.wavread('../../../sounds/piano.wav')
plt.figure(1, figsize=(9.5, 6))
w = np.hamming(256)
N = 256
H = 128
mX1, pX1 = STFT.stftAnal(x, fs, w, N, H)
plt.subplot(211)
numFrames = int(mX1[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX1[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX1))
plt.title('mX (piano.wav), M=256, N=256, H=128')
plt.autoscale(tight=True)
w = np.hamming(1024)
N = 1024
H = 128
mX2, pX2 = STFT.stftAnal(x, fs, w, N, H)
plt.subplot(212)
numFrames = int(mX2[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX2[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX2))
plt.title('mX (piano.wav), M=1024, N=1024, H=128')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('time-freq-compromise.png')
plt.show()
| agpl-3.0 |
deanjohnr/macrotrendfollow | backtest_factors.py | 1 | 8014 | ### backtest_factors.py ###
import pandas as pd
import numpy as np
import time
import datetime
import json
# Measures returns of feature selection parameters
def get_returns(df_result,
df_test,
forward_period,
factor_type,
factor_top_count,
minimum_sample_size,
factor_threshold,
minimum_asset_pct):
min_quantile = df_result['factor_bucket'].min()
max_quantile = df_result['factor_bucket'].max()
# Filter to minimum sample count
df_result = df_result[df_result[str(forward_period)+'_count'] >= minimum_sample_size]
# Set Factor Measure
factor_measure = str(forward_period)+'_'+factor_type
# Compute difference between max and min quantiles
df_meandiff = (df_result[df_result['factor_bucket'] == max_quantile][[factor_measure]]
- df_result[df_result['factor_bucket'] == min_quantile][[factor_measure]])
# Filter to top factors with minimum score
df_top = df_meandiff.drop_duplicates().sort_values(factor_measure, ascending=False).reset_index().groupby('asset').head(factor_top_count).sort_values(['asset',factor_measure])
df_top = df_top[df_top[factor_measure] >= factor_threshold]
df_bot = df_meandiff.drop_duplicates().sort_values(factor_measure, ascending=False).reset_index().groupby('asset').tail(factor_top_count).sort_values(['asset',factor_measure])
df_bot = df_bot[df_bot[factor_measure] <= -factor_threshold]
# Output final set of features
df_algofeatures = df_top.append(df_bot).sort_values('asset')
asset_pct = float(len(df_algofeatures['asset'].drop_duplicates()))/float(len(df_test['asset'].drop_duplicates()))
if asset_pct < minimum_asset_pct:
return None
# Join test data and chosen features
df_backtest = df_test.reset_index().merge(df_algofeatures[['asset','feature',factor_measure]],
how='inner', left_on=['asset','feature'], right_on=['asset','feature'])
# Cap scores to limit position size skew and clean infinite numbers
df_backtest.loc[df_backtest['factor_zscore'] > 3,'factor_zscore'] = 3
df_backtest.loc[df_backtest['factor_zscore'] < -3,'factor_zscore'] = -3
# Determine long/short direction of the factor
df_backtest['direction'] = df_backtest['factor_zscore']/df_backtest['factor_zscore'].abs()
# Use scores as portfolio asset weighting
df_backtest['asset_weight'] = df_backtest['factor_zscore']*df_backtest['direction']
df_backtest = df_backtest.dropna()
df_backtest = df_backtest.groupby(['date','asset'])[['asset_weight',target]].mean()
df_backtest['gross_weight'] = df_backtest['asset_weight'].abs()
df_denom = df_backtest.groupby(['date'])[['gross_weight']].sum()
df_count = df_backtest.groupby(['date'])[['asset_weight']].count()
df_backtest = df_backtest.merge(df_denom, left_index=True, right_index=True, suffixes=['','_sum'])
df_backtest = df_backtest.merge(df_count, left_index=True, right_index=True, suffixes=['','_count'])
df_backtest['portfolio_weight'] = (df_backtest['asset_weight']/(df_backtest['gross_weight_sum']))
# Add uniform index weights to compare returns
df_backtest['index_weight'] = 1.0/df_backtest['asset_weight_count']
df_backtest = df_backtest.reset_index()
# Limits to Tuesdays for rebalancing
df_backtest['dayofweek'] = df_backtest['date'].apply(lambda x: pd.to_datetime(x).dayofweek)
df_backtest = df_backtest[df_backtest['dayofweek']==1].set_index(keys=['date','asset'])
# Calculate weekly returns
df_backtest['portfolio_return'] = df_backtest[target].unstack().pct_change(1).shift(-1).stack() * df_backtest['portfolio_weight']
df_backtest['index_return'] = df_backtest[target].unstack().pct_change(1).shift(-1).stack() * df_backtest['index_weight']
# Calculate cumulative returns
df_return = df_backtest.groupby(['date'])[['portfolio_return','index_return']].sum()
df_value = df_return.rolling(window=len(df_return), min_periods=1).apply(lambda x: np.prod(1 + x))-1
df_return = df_return.merge(df_value, how='inner', left_index=True, right_index=True)
# Calculate returns in excess of index
df_return['excess_return'] = df_return['portfolio_return_y']-df_return['index_return_y']
# Plot Returns
df_result = df_return[['portfolio_return_y','index_return_y','excess_return']].dropna().tail(1)
df_result['forward_period'] = forward_period
df_result['factor_type'] = factor_type
df_result['factor_top_count'] = factor_top_count
df_result['minimum_sample_size'] = minimum_sample_size
df_result['factor_threshold'] = factor_threshold
df_result['minimum_asset_pct'] = minimum_asset_pct
df_result['asset_pct'] = asset_pct
return df_result
### INITIALIZE CONFIGURATION ###
factor_types = None
factor_top_counts = [5,10,20]
minimum_sample_sizes = [10,30,100,200]
factor_thresholds = [0.01,0.03,0.8,1.0,1.2]
minimum_asset_pct = 0.5
### LOAD CONFIGURATION ###
# Load Configuration File #
try:
with open('config.json') as config_file:
config = json.load(config_file)
except:
print('Error loading config.json file')
raise
# Assign Configuration Variables #
# Target field, normally price
try:
target = str(config['data']['google']['target'])
except:
print('Error configuring algorithm target')
raise
# Factor Measurement Types
try:
factor_types = np.array(config['backtest']['factor']['factor_types'])
except:
print('Error configuring factor measurement types')
raise
# Factor Rank Top Selection Count
try:
factor_top_counts = np.array(config['backtest']['factor']['factor_top_counts'])
except:
pass
# Factor Rank Top Selection Count
try:
factor_thresholds = np.array(config['backtest']['factor']['factor_thresholds'])
except:
pass
# Factor Minimum Sample Size
try:
minimum_sample_sizes = np.array(config['backtest']['factor']['minimum_sample_sizes'])
except:
pass
# Factor Minimum Usable Asset Percentage
try:
minimum_asset_pct = float(config['backtest']['factor']['minimum_asset_pct'])
except:
pass
# Get Forward Looking Return Periods
try:
forward_periods = np.array(config['measurement']['forward_periods'])
except:
print('Error configuring forward periods')
raise
# Load test data
df_test = pd.read_csv('results/test/test_data.csv')
# Load factor data
df_result = pd.DataFrame()
tickers = df_test['asset'].drop_duplicates().values
for ticker in tickers:
df_result = df_result.append(pd.read_csv('results/factors/'+ticker+'_factors.csv'))
df_result = df_result.set_index(keys=['asset','feature'], drop=False)
df_result = df_result.drop_duplicates()
denom = float(len(forward_periods)*len(factor_types)*len(factor_top_counts)*len(minimum_sample_sizes)*len(factor_thresholds))
print('Permutations: '+str(denom))
df_parameter = pd.DataFrame()
i = 0
for forward_period in forward_periods:
for factor_type in factor_types:
for factor_top_count in factor_top_counts:
for minimum_sample_size in minimum_sample_sizes:
for factor_threshold in factor_thresholds:
returns = get_returns(df_result,
df_test,
forward_period,
factor_type,
factor_top_count,
minimum_sample_size,
factor_threshold,
minimum_asset_pct)
i += 1
if i%10 == 0:
print(str(round(i/denom,2)))
if returns is not None:
df_parameter = df_parameter.append(returns)
print('Storing Results')
df_parameter.to_csv('results/parameter_returns.csv')
print('Complete') | mit |
cclib/cclib | test/io/testscripts.py | 3 | 4801 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2019, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Unit tests for main scripts (ccget, ccwrite)."""
import os
import unittest
from unittest import mock
import cclib
__filedir__ = os.path.dirname(__file__)
__filepath__ = os.path.realpath(__filedir__)
__datadir__ = os.path.join(__filepath__, "..", "..", "data")
INPUT_FILE = os.path.join(
__datadir__,
'ADF/basicADF2007.01/dvb_gopt.adfout'
)
CJSON_OUTPUT_FILENAME = 'dvb_gopt.cjson'
@mock.patch("cclib.scripts.ccget.ccread")
class ccgetTest(unittest.TestCase):
def setUp(self):
try:
from cclib.scripts import ccget
except ImportError:
self.fail("ccget cannot be imported")
self.main = ccget.ccget
@mock.patch("cclib.scripts.ccget.sys.argv", ["ccget"])
def test_empty_argv(self, mock_ccread):
"""Does the script fail as expected if called without parameters?"""
with self.assertRaises(SystemExit):
self.main()
@mock.patch(
"cclib.scripts.ccget.sys.argv",
["ccget", "atomcoords", INPUT_FILE]
)
def test_ccread_invocation(self, mock_ccread):
self.main()
self.assertEqual(mock_ccread.call_count, 1)
ccread_call_args, ccread_call_kwargs = mock_ccread.call_args
self.assertEqual(ccread_call_args[0], INPUT_FILE)
@mock.patch("logging.warning")
@mock.patch(
"cclib.scripts.ccget.sys.argv",
["ccget", "atomcoord", INPUT_FILE]
)
def test_ccread_invocation_matching_args(self, mock_warn, mock_ccread):
self.main()
self.assertEqual(mock_warn.call_count, 1)
warn_call_args, warn_call_kwargs = mock_warn.call_args
warn_message = warn_call_args[0]
self.assertEqual(warn_message, "Attribute 'atomcoord' not found, but attribute 'atomcoords' is close. Using 'atomcoords' instead.")
self.assertEqual(mock_ccread.call_count, 1)
ccread_call_args, ccread_call_kwargs = mock_ccread.call_args
self.assertEqual(ccread_call_args[0], INPUT_FILE)
@mock.patch("cclib.scripts.ccwrite.ccwrite")
class ccwriteTest(unittest.TestCase):
def setUp(self):
try:
from cclib.scripts import ccwrite
except ImportError:
self.fail("ccwrite cannot be imported")
self.main = ccwrite.main
@mock.patch('cclib.scripts.ccwrite.sys.argv', ['ccwrite'])
def test_empty_argv(self, mock_ccwrite):
"""Does the script fail as expected if called without parameters?"""
with self.assertRaises(SystemExit):
self.main()
@mock.patch(
"cclib.scripts.ccwrite.sys.argv",
["ccwrite", "cjson", INPUT_FILE]
)
def test_ccwrite_call(self, mock_ccwrite):
"""is ccwrite called with the given parameters?"""
self.main()
self.assertEqual(mock_ccwrite.call_count, 1)
ccwrite_call_args, ccwrite_call_kwargs = mock_ccwrite.call_args
self.assertEqual(ccwrite_call_args[1], 'cjson')
self.assertEqual(ccwrite_call_args[2], CJSON_OUTPUT_FILENAME)
class ccframeTest(unittest.TestCase):
def setUp(self):
# It would be best to test with Pandas and not a mock!
if not hasattr(cclib.io.ccio, "pd"):
cclib.io.ccio.pd = mock.MagicMock()
def test_main_empty_argv(self):
"""Does main() fail as expected if called without arguments?"""
with self.assertRaises(SystemExit):
cclib.scripts.ccframe.main()
@mock.patch(
"cclib.scripts.ccframe.sys.argv",
["ccframe", INPUT_FILE]
)
@mock.patch("cclib.io.ccio._has_pandas", False)
def test_main_without_pandas(self):
"""Does ccframe fail if Pandas can't be imported?"""
with self.assertRaisesRegex(
ImportError, "You must install `pandas` to use this function"
):
cclib.scripts.ccframe.main()
@mock.patch(
"cclib.scripts.ccframe.sys.argv",
["ccframe", INPUT_FILE]
)
@mock.patch("cclib.io.ccio._has_pandas", True)
def test_main(self):
"""Is ccframe called with the given parameters?"""
with mock.patch('sys.stdout') as mock_stdout:
cclib.scripts.ccframe.main()
self.assertEqual(mock_stdout.write.call_count, 2)
df, newline = mock_stdout.write.call_args_list
if isinstance(df[0][0], mock.MagicMock):
self.assertEqual(df[0][0].name, 'mock.DataFrame()')
else:
# TODO: this is what we really should be testing
pass
self.assertEqual(newline[0][0], '\n')
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
dcprojects/CoolProp | dev/scripts/fit_shape_factor.py | 5 | 8075 | from CoolProp import CoolProp as CP
from PDSim.misc.datatypes import Collector
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from scipy.odr import *
import textwrap
fluid_REF = 'Propane'
Tcrit_REF = CP.Props(fluid_REF,'Tcrit')
omega_REF = CP.Props(fluid_REF,"accentric")
molemass_REF = CP.Props(fluid_REF,'molemass')
rhocrit_REF = CP.Props(fluid_REF,'rhocrit')
Zcrit_REF = CP.DerivTerms('Z',Tcrit_REF,rhocrit_REF,fluid_REF)
fluid = 'DimethylEther'
molemass = CP.Props(fluid,'molemass')
Ttriple = CP.Props(fluid,'Ttriple')
Tcrit = CP.Props(fluid,'Tcrit')
omega = CP.Props(fluid,"accentric")
rhocrit = CP.Props(fluid,'rhocrit')
pcrit = CP.Props(fluid,'pcrit')
Zcrit = CP.DerivTerms('Z',Tcrit,rhocrit,fluid)
N = 12
RHO,TTT,RHO0,TTT0 = Collector(),Collector(),Collector(),Collector()
rhomax = CP.Props('D','T',Ttriple,'Q',0,fluid)
#Build a database of "experimental" data
for T in np.linspace(Ttriple,Tcrit+50,80):
for rho in np.linspace(1e-10,rhomax,80):
T0,rho0 = CP.conformal_Trho(fluid, fluid_REF, T, rho)
p = CP.Props('P', 'T', T, 'D', rho, fluid)
ar = CP.DerivTerms("phir",T,rho,fluid)
ar_REF = CP.DerivTerms("phir",T0,rho0,fluid_REF)
Z = CP.DerivTerms("Z",T,rho,fluid)
Z_REF = CP.DerivTerms("Z",T0,rho0,fluid_REF)
#goodstate = ((T > Tcrit and p > pcrit) or (T<Tcrit and rho > CP.rhosatL_anc(fluid,T) ))
goodstate = (T > Tcrit or rho > CP.rhosatL_anc(fluid,T) or rho < CP.rhosatV_anc(fluid,T) )
#goodstate = True
#Want positive value, and single-phase
if ((T0/T)>0.1 and T/T0*Tcrit_REF/Tcrit < 3 and T0/T < 1e6 and goodstate):
if abs((ar-ar_REF)*2+(Z-Z_REF)**2) > 1e-5:
print ar-ar_REF,Z-Z_REF
TTT << T
RHO << rho
TTT0 << T0
RHO0 << rho0
tau = Tcrit/np.array(TTT.vec)
delta = np.array(RHO.vec)/rhocrit
THETA = np.array(TTT.vec)/np.array(TTT0.vec)*Tcrit_REF/Tcrit
PHI = np.array(RHO0.vec)/np.array(RHO.vec)*rhocrit/rhocrit_REF #Ratio of MOLAR densities - here the molar masses cancel out to make phi non-dimensional
from CoolProp.Plots.Plots import Trho
Trho(fluid)
#plt.plot(RHO.vec,TTT.vec,'.')
#plt.show()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(np.array(RHO.vec),np.array(TTT.vec),THETA)
plt.close('all')
print 'rhomin = ',np.min(RHO.vec)
#Define the objective function
def OBJECTIVE_theta(c,x):
tau = x[0,:]
delta = x[1,:]
A1 = c[0]-c[1]*np.log(tau)
A2 = c[2]-c[3]*np.log(tau)
A3 = c[4]-c[5]*np.log(tau)
A4 = c[6]-c[7]*np.log(tau)**2
DELTA = (delta-1)**2+(tau-1)**2
PSI_theta = c[8]*delta*np.exp(-c[9]*DELTA**2)
return 1+(omega-omega_REF)*(A1+A2*np.exp(-delta**2)+A3*np.exp(-delta**c[10])+A4*np.exp(-delta**c[11])+PSI_theta)
#Define the objective function
def OBJECTIVE_phi(c,x):
tau = x[0,:]
delta = x[1,:]
A1 = c[0]-c[1]*np.log(tau)
A2 = c[2]-c[3]*np.log(tau)
A3 = c[4]-c[5]*np.log(tau)
A4 = c[6]-c[7]*np.log(tau)**2
DELTA = (delta-1)**2+(tau-1)**2
PSI_theta = c[8]*delta*np.exp(-c[9]*DELTA**2)
return Zcrit_REF/Zcrit*(1+(omega-omega_REF)*(A1+A2*np.exp(-delta**2)+A3*np.exp(-delta**c[10])+A4*np.exp(-delta**c[11])+PSI_theta))
print 'starting fit for theta'
XXX = np.r_[np.array(tau,ndmin = 2), np.array(delta,ndmin=2)]
def fit_theta():
mod = Model(OBJECTIVE_theta)
mydata = Data(XXX.copy(), THETA)
beta0 = [100 for _ in range(N)]
myodr = ODR(mydata, mod, beta0=beta0)
myoutput = myodr.run()
myoutput.pprint()
print myoutput.sum_square
YFIT = OBJECTIVE_theta(myoutput.beta,XXX)
plt.plot(THETA,YFIT,'o',mfc='none')
plt.show()
ERR = YFIT-THETA
MAE = np.mean(np.abs(YFIT/THETA-1))*100
from CoolProp.Plots.Plots import Trho
Trho(fluid)
plt.plot(np.array(RHO.vec)[np.abs(ERR)<5e-2],np.array(TTT.vec)[np.abs(ERR)<5e-2],'.')
plt.show()
return myoutput.beta,MAE
def fit_phi():
mod = Model(OBJECTIVE_phi)
mydata = Data(XXX.copy(), PHI)
beta0 = [100 for _ in range(N)]
myodr = ODR(mydata, mod, beta0=beta0)
myoutput = myodr.run()
myoutput.pprint()
print myoutput.sum_square
YFIT = OBJECTIVE_theta(myoutput.beta,XXX)
plt.plot(PHI,YFIT,'o',mfc='none')
plt.show()
ERR = YFIT-PHI
from CoolProp.Plots.Plots import Trho
Trho(fluid)
plt.plot(np.array(RHO.vec)[np.abs(ERR)<5e-2],np.array(TTT.vec)[np.abs(ERR)<5e-2],'.')
MAE = np.mean(np.abs(YFIT/PHI-1))*100
plt.show()
return myoutput.beta,MAE
c,theta_MAE = fit_theta()
d,phi_MAE = fit_phi()
def write_output(c,d, theta_MAE, phi_MAE):
import time
from datetime import date
cdata = ', '.join(['{val:0.16g}'.format(val = v) for v in c])
ddata = ', '.join(['{val:0.16g}'.format(val = v) for v in d])
name = fluid
rhomin = np.min(RHO.vec)
timestamp = date.fromtimestamp(time.time()).strftime("%A, %d. %B %Y")
template = textwrap.dedent(
"""
double {name:s}Class::viscosity_Trho(double T, double rho)
{{
/*
Fitting of shape factor curves to R134a data. This method is employed because solving
for the shape factors is computationally very expensive and not very nice
convergence behavior is experienced. Thus we can use the ECS method,
but with about the execution time of a conventional viscosity correlation.
This function code was automatically generated by the fit_shape_factor.py
script in the dev/ folder on {timestamp:s}
Mean absolute errors of shape factor prediction:
theta = {theta_MAE:g} %
phi = {phi_MAE:g} %
*/
double e_k, sigma, tau, delta, A1, A2, A3, A4, theta, Tc, Tc0, T0, rho0;
double DELTA, PSI_theta, psi, f, h, F_eta, M, M0, delta_omega, rho0bar;
double B1, B2, B3, B4, PSI_phi, Zc, Zc0, rhoc0, rhoc, log_tau, phi, rhobar;
double c[] = {{{cdata:s}}};
double d[] = {{{ddata:s}}};
tau = reduce.T/T;
delta = rho/reduce.rho;
R134aClass R134a = R134aClass();
R134a.post_load();
delta_omega = params.accentricfactor-R134a.params.accentricfactor;
Zc = reduce.p/(reduce.rho*R()*reduce.T);
Zc0 = R134a.reduce.p/(R134a.reduce.rho*R134a.R()*R134a.reduce.T);
Tc = reduce.T;
Tc0 = R134a.reduce.T;
rhoc = reduce.rho;
rhoc0 = R134a.reduce.rho;
M = params.molemass;
M0 = R134a.params.molemass;
rhobar = rho/M;
if (rho > {rhomin:g})
{{
DELTA = pow(delta-1,2)+pow(tau-1,2);
log_tau = log(tau);
A1 = c[0]-c[1]*log_tau;
A2 = c[2]-c[3]*log_tau;
A3 = c[4]-c[5]*log_tau;
A4 = c[6]-c[7]*pow(log_tau,2);
PSI_theta = c[8]*delta*exp(-c[9]*pow(DELTA,2));
theta = 1+(delta_omega)*(A1+A2*exp(-pow(delta,2))+A3*exp(-pow(delta,c[10]))+A4*exp(-pow(delta,c[11]))+PSI_theta);
B1 = d[0]-d[1]*log_tau;
B2 = d[2]-d[3]*log_tau;
B3 = d[4]-d[5]*log_tau;
B4 = d[6]-d[7]*pow(log_tau,2);
PSI_phi = d[8]*delta*exp(-d[9]*pow(DELTA,2));
phi = Zc0/Zc*(1+(delta_omega)*(B1+B2*exp(-pow(delta,2))+B3*exp(-pow(delta,d[10]))+B4*exp(-pow(delta,d[11]))+PSI_phi));
}}
else
{{
// Assume unity shape factors at low density
theta = 1.0; phi = 1.0;
}}
T0 = T*Tc0/theta/Tc;
h = M/M0*rhoc0/rhoc*phi;
rho0bar = rhobar*h;
rho0 = M0*rho0bar;
psi = ECS_psi_viscosity(delta);
f = T/T0;
F_eta = sqrt(f)*pow(h,-2.0/3.0)*sqrt(M/M0);
ECSParams(&e_k,&sigma);
return viscosity_dilute(T,e_k,sigma) + R134a.viscosity_background(T0,rho0*psi)*F_eta;
}}
"""
)
print template.format(**locals())
write_output(c,d,theta_MAE,phi_MAE)
| mit |
microsoft/EconML | econml/cate_interpreter/_interpreters.py | 1 | 26259 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import abc
import numbers
import numpy as np
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.utils import check_array
from ..policy import PolicyTree
from .._tree_exporter import (_SingleTreeExporterMixin,
_CateTreeDOTExporter, _CateTreeMPLExporter,
_PolicyTreeDOTExporter, _PolicyTreeMPLExporter)
class _SingleTreeInterpreter(_SingleTreeExporterMixin, metaclass=abc.ABCMeta):
@abc.abstractmethod
def interpret(self, cate_estimator, X):
"""
Interpret a linear CATE estimator when applied to a set of features
Parameters
----------
cate_estimator : :class:`.LinearCateEstimator`
The fitted estimator to interpret
X : array-like
The features against which to interpret the estimator;
must be compatible shape-wise with the features used to fit
the estimator
"""
pass
class SingleTreeCateInterpreter(_SingleTreeInterpreter):
"""
An interpreter for the effect estimated by a CATE estimator
Parameters
----------
include_model_uncertainty : bool, optional, default False
Whether to include confidence interval information when building a
simplified model of the cate model. If set to True, then
cate estimator needs to support the `const_marginal_ate_inference` method.
uncertainty_level : double, optional, default .1
The uncertainty level for the confidence intervals to be constructed
and used in the simplified model creation. If value=alpha
then a multitask decision tree will be built such that all samples
in a leaf have similar target prediction but also similar alpha
confidence intervals.
uncertainty_only_on_leaves : bool, optional, default True
Whether uncertainty information should be displayed only on leaf nodes.
If False, then interpretation can be slightly slower, especially for cate
models that have a computationally expensive inference method.
splitter : string, optional, default "best"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int or None, optional, default None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional, default 2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional, default 1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional, default 0.
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float or {"auto", "sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, optional, default None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
max_leaf_nodes : int or None, optional, default None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional, default 0.
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
"""
def __init__(self, *,
include_model_uncertainty=False,
uncertainty_level=.1,
uncertainty_only_on_leaves=True,
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.):
self.include_uncertainty = include_model_uncertainty
self.uncertainty_level = uncertainty_level
self.uncertainty_only_on_leaves = uncertainty_only_on_leaves
self.criterion = "mse"
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
def interpret(self, cate_estimator, X):
"""
Interpret the heterogeneity of a CATE estimator when applied to a set of features
Parameters
----------
cate_estimator : :class:`.LinearCateEstimator`
The fitted estimator to interpret
X : array-like
The features against which to interpret the estimator;
must be compatible shape-wise with the features used to fit
the estimator
Returns
-------
self: object instance
"""
self.tree_model_ = DecisionTreeRegressor(criterion=self.criterion,
splitter=self.splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
random_state=self.random_state,
max_leaf_nodes=self.max_leaf_nodes,
min_impurity_decrease=self.min_impurity_decrease)
y_pred = cate_estimator.const_marginal_effect(X)
self.tree_model_.fit(X, y_pred.reshape((y_pred.shape[0], -1)))
paths = self.tree_model_.decision_path(X)
node_dict = {}
for node_id in range(paths.shape[1]):
mask = paths.getcol(node_id).toarray().flatten().astype(bool)
Xsub = X[mask]
if (self.include_uncertainty and
((not self.uncertainty_only_on_leaves) or (self.tree_model_.tree_.children_left[node_id] < 0))):
res = cate_estimator.const_marginal_ate_inference(Xsub)
node_dict[node_id] = {'mean': res.mean_point,
'std': res.std_point,
'ci': res.conf_int_mean(alpha=self.uncertainty_level)}
else:
cate_node = y_pred[mask]
node_dict[node_id] = {'mean': np.mean(cate_node, axis=0),
'std': np.std(cate_node, axis=0)}
self.node_dict_ = node_dict
return self
def _make_dot_exporter(self, *, out_file, feature_names, treatment_names, max_depth, filled,
leaves_parallel, rotate, rounded,
special_characters, precision):
return _CateTreeDOTExporter(self.include_uncertainty, self.uncertainty_level,
out_file=out_file, feature_names=feature_names,
treatment_names=treatment_names,
max_depth=max_depth,
filled=filled,
leaves_parallel=leaves_parallel, rotate=rotate, rounded=rounded,
special_characters=special_characters, precision=precision)
def _make_mpl_exporter(self, *, title, feature_names, treatment_names, max_depth,
filled,
rounded, precision, fontsize):
return _CateTreeMPLExporter(self.include_uncertainty, self.uncertainty_level,
title=title, feature_names=feature_names,
treatment_names=treatment_names,
max_depth=max_depth,
filled=filled,
rounded=rounded,
precision=precision, fontsize=fontsize)
class SingleTreePolicyInterpreter(_SingleTreeInterpreter):
"""
An interpreter for a policy estimated based on a CATE estimation
Parameters
----------
include_model_uncertainty : bool, optional, default False
Whether to include confidence interval information when building a
simplified model of the cate model. If set to True, then
cate estimator needs to support the `const_marginal_ate_inference` method.
uncertainty_level : double, optional, default .1
The uncertainty level for the confidence intervals to be constructed
and used in the simplified model creation. If value=alpha
then a multitask decision tree will be built such that all samples
in a leaf have similar target prediction but also similar alpha
confidence intervals.
uncertainty_only_on_leaves : bool, optional, default True
Whether uncertainty information should be displayed only on leaf nodes.
If False, then interpretation can be slightly slower, especially for cate
models that have a computationally expensive inference method.
risk_level : float or None, optional (default=None)
If None then the point estimate of the CATE of every point will be used as the
effect of treatment. If any float alpha and risk_seeking=False (default), then the
lower end point of an alpha confidence interval of the CATE will be used.
Otherwise if risk_seeking=True, then the upper end of an alpha confidence interval
will be used.
risk_seeking : bool, optional, default False,
Whether to use an optimistic or pessimistic value for the effect estimate at a
sample point. Used only when risk_level is not None.
max_depth : int or None, optional, default None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional, default 2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional, default 1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional, default 0.
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float or {"auto", "sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
min_balancedness_tol: float in [0, .5], default=.45
How imbalanced a split we can tolerate. This enforces that each split leaves at least
(.5 - min_balancedness_tol) fraction of samples on each side of the split; or fraction
of the total weight of samples, when sample_weight is not None. Default value, ensures
that at least 5% of the parent node weight falls in each side of the split. Set it to 0.0 for no
balancedness and to .5 for perfectly balanced splits. For the formal inference theory
to be valid, this has to be any positive constant bounded away from zero.
min_impurity_decrease : float, optional, default 0.
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
random_state : int, RandomState instance or None, optional, default None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
tree_model_ : :class:`~econml.policy.PolicyTree`
The policy tree model that represents the learned policy; available only after
:meth:`interpret` has been called.
policy_value_ : float
The value of applying the learned policy, applied to the sample used with :meth:`interpret`
always_treat_value_ : float
The value of the policy that always treats all units, applied to the sample used with :meth:`interpret`
"""
def __init__(self, *,
include_model_uncertainty=False,
uncertainty_level=.1,
uncertainty_only_on_leaves=True,
risk_level=None,
risk_seeking=False,
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
min_balancedness_tol=.45,
min_impurity_decrease=0.,
random_state=None):
self.include_uncertainty = include_model_uncertainty
self.uncertainty_level = uncertainty_level
self.uncertainty_only_on_leaves = uncertainty_only_on_leaves
self.risk_level = risk_level
self.risk_seeking = risk_seeking
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.min_impurity_decrease = min_impurity_decrease
self.min_balancedness_tol = min_balancedness_tol
def interpret(self, cate_estimator, X, sample_treatment_costs=None):
"""
Interpret a policy based on a linear CATE estimator when applied to a set of features
Parameters
----------
cate_estimator : :class:`.LinearCateEstimator`
The fitted estimator to interpret
X : array-like
The features against which to interpret the estimator;
must be compatible shape-wise with the features used to fit
the estimator
sample_treatment_costs : array-like, optional
The cost of treatment. Can be a scalar or have dimension (n_samples, n_treatments)
or (n_samples,) if T is a vector
Returns
-------
self: object instance
"""
if X is not None:
X = check_array(X)
X_in = X
else:
X = np.empty(shape=(1, 0))
X_in = None
self.tree_model_ = PolicyTree(criterion='neg_welfare',
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
min_impurity_decrease=self.min_impurity_decrease,
min_balancedness_tol=self.min_balancedness_tol,
honest=False,
random_state=self.random_state)
if self.risk_level is None:
y_pred = cate_estimator.const_marginal_effect(X_in)
elif not self.risk_seeking:
y_pred, _ = cate_estimator.const_marginal_effect_interval(X_in, alpha=self.risk_level)
else:
_, y_pred = cate_estimator.const_marginal_effect_interval(X_in, alpha=self.risk_level)
# average the outcome dimension if it exists and ensure 2d y_pred
if y_pred.ndim == 3:
y_pred = np.mean(y_pred, axis=1)
elif y_pred.ndim == 2:
if (len(cate_estimator._d_y) > 0) and cate_estimator._d_y[0] > 1:
y_pred = np.mean(y_pred, axis=1, keepdims=True)
elif y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if sample_treatment_costs is not None:
if isinstance(sample_treatment_costs, numbers.Real):
y_pred -= sample_treatment_costs
else:
sample_treatment_costs = check_array(sample_treatment_costs, ensure_2d=False)
if sample_treatment_costs.ndim == 1:
sample_treatment_costs = sample_treatment_costs.reshape((-1, 1))
if sample_treatment_costs.shape == y_pred.shape:
y_pred -= sample_treatment_costs
else:
raise ValueError("`sample_treatment_costs` should be a double scalar "
"or have dimension (n_samples, n_treatments) or (n_samples,) if T is a vector")
# get index of best treatment
all_y = np.hstack([np.zeros((y_pred.shape[0], 1)), np.atleast_1d(y_pred)])
self.tree_model_.fit(X, all_y)
self.policy_value_ = np.mean(np.max(self.tree_model_.predict_value(X), axis=1))
self.always_treat_value_ = np.mean(y_pred, axis=0)
paths = self.tree_model_.decision_path(X)
node_dict = {}
for node_id in range(paths.shape[1]):
mask = paths.getcol(node_id).toarray().flatten().astype(bool)
Xsub = X_in[mask] if X_in is not None else None
if (self.include_uncertainty and
((not self.uncertainty_only_on_leaves) or (self.tree_model_.tree_.children_left[node_id] < 0))):
res = cate_estimator.const_marginal_ate_inference(Xsub)
node_dict[node_id] = {'mean': res.mean_point,
'std': res.std_point,
'ci': res.conf_int_mean(alpha=self.uncertainty_level)}
else:
cate_node = y_pred[mask]
node_dict[node_id] = {'mean': np.mean(cate_node, axis=0),
'std': np.std(cate_node, axis=0)}
self.node_dict_ = node_dict
return self
def treat(self, X):
"""
Using the policy model learned by a call to :meth:`interpret`, assign treatment to a set of units
Parameters
----------
X : array-like
The features for the units to treat;
must be compatible shape-wise with the features used during interpretation
Returns
-------
T : array-like
The treatments implied by the policy learned by the interpreter, with treatment 0, meaning
no treatment, and treatment 1 meains the first treatment, etc.
"""
assert self.tree_model_ is not None, "Interpret must be called prior to trying to assign treatment."
return self.tree_model_.predict(X)
def _make_dot_exporter(self, *, out_file, feature_names, treatment_names, max_depth, filled,
leaves_parallel, rotate, rounded,
special_characters, precision):
title = "Average policy gains over no treatment: {} \n".format(np.around(self.policy_value_, precision))
title += "Average policy gains over constant treatment policies for each treatment: {}".format(
np.around(self.policy_value_ - self.always_treat_value_, precision))
return _PolicyTreeDOTExporter(out_file=out_file, title=title,
treatment_names=treatment_names,
feature_names=feature_names,
max_depth=max_depth,
filled=filled, leaves_parallel=leaves_parallel, rotate=rotate,
rounded=rounded, special_characters=special_characters,
precision=precision)
def _make_mpl_exporter(self, *, title, feature_names, treatment_names, max_depth, filled,
rounded, precision, fontsize):
title = "" if title is None else title
title += "Average policy gains over no treatment: {} \n".format(np.around(self.policy_value_, precision))
title += "Average policy gains over constant treatment policies for each treatment: {}".format(
np.around(self.policy_value_ - self.always_treat_value_, precision))
return _PolicyTreeMPLExporter(treatment_names=treatment_names,
title=title,
feature_names=feature_names,
max_depth=max_depth,
filled=filled,
rounded=rounded,
precision=precision, fontsize=fontsize)
| mit |
shusenl/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
ldirer/scikit-learn | examples/svm/plot_svm_regression.py | 120 | 1520 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
kaichogami/scikit-learn | sklearn/decomposition/dict_learning.py | 42 | 46134 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000, check_input=True, verbose=0):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose: int
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=verbose, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(fit_intercept=False, verbose=verbose, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
# TODO: Should verbose argument be passed to this?
new_code = orthogonal_mp_gram(
Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization),
tol=None, norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1, check_input=True, verbose=0):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose : int, optional
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if check_input:
if algorithm == 'lasso_cd':
dictionary = check_array(dictionary, order='C', dtype='float64')
X = check_array(X, order='C', dtype='float64')
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
.. versionadded:: 0.17
*cd* coordinate descent method to improve speed.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
.. versionadded:: 0.17
*lasso_cd* coordinate descent method to improve speed.
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
tosolveit/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
joshua-cogliati-inl/raven | framework/unSupervisedLearning.py | 1 | 64594 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing interface with SciKit-Learn clustering
Created on Feb 13, 2015
@author: senrs
TODO:
For Clustering:
1) paralleization: n_jobs parameter to some of the algorithms
"""
#for future compatibility with Python 3-----------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3-------------------------------------------
#External Modules---------------------------------------------------------------
from sklearn import cluster, mixture, manifold, decomposition, covariance, neural_network
from sklearn import metrics
from sklearn.neighbors import kneighbors_graph
import scipy.cluster as hier
import numpy as np
import abc
import ast
import copy
import platform
#External Modules End-----------------------------------------------------------
#Internal Modules---------------------------------------------------------------
from utils import utils
from utils import mathUtils
import MessageHandler
import DataObjects
#Internal Modules End-----------------------------------------------------------
# FIXME: temporarily force to use Agg backend for now, otherwise it will cause segmental fault for test:
# test_dataMiningHierarchical.xml in tests/framework/PostProcessors/DataMiningPostProcessor/Clustering
# For the record, when using dendrogram, we have to force matplotlib.use('Agg')
# In the future, I think all the plots should moved to OutStreamPlots -- wangc
#display = utils.displayAvailable()
#if not display:
# matplotlib.use('Agg')
if utils.displayAvailable() and platform.system() != 'Windows':
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pylab as plt
class unSupervisedLearning(utils.metaclass_insert(abc.ABCMeta), MessageHandler.MessageUser):
"""
This is the general interface to any unSuperisedLearning learning method.
Essentially it contains a train, and evaluate methods
"""
returnType = '' ## this describe the type of information generated the
## possibility are 'boolean', 'integer', 'float'
modelType = '' ## the broad class of the interpolator
@staticmethod
def checkArrayConsistency(arrayIn):
"""
This method checks the consistency of the in-array
@ In, arrayIn, a 1D numpy array, the array to validate
@ Out, (consistent, errorMsg), tuple,
consistent is a boolean where false means the input array is not a
1D numpy array.
errorMsg, string, the error message if the input array is inconsistent.
"""
if type(arrayIn) != np.ndarray:
return (False, ' The object is not a numpy array')
## The input data matrix kind is different for different clustering
## algorithms, e.g.:
## [n_samples, n_features] for MeanShift and KMeans
## [n_samples,n_samples] for AffinityPropogation and SpectralCLustering
## In other words, MeanShift and KMeans work with points in a vector space,
## whereas AffinityPropagation and SpectralClustering can work with
## arbitrary objects, as long as a similarity measure exists for such
## objects. The input matrix supplied to unSupervisedLearning models as 1-D
## arrays of size [n_samples], (either n_features of or n_samples of them)
if len(arrayIn.shape) != 1:
return(False, ' The array must be 1-d')
return (True, '')
def __init__(self, messageHandler, **kwargs):
"""
constructor for unSupervisedLearning class.
@ In, messageHandler, object, Message handler object
@ In, kwargs, dict, arguments for the unsupervised learning algorithm
"""
self.printTag = 'unSupervised'
self.messageHandler = messageHandler
## booleanFlag that controls the normalization procedure. If true, the
## normalization is performed. Default = True
if kwargs != None:
self.initOptionDict = kwargs
else:
self.initOptionDict = {}
## Labels are passed, if known a priori (optional), they used in quality
## estimate
if 'Labels' in self.initOptionDict.keys():
self.labelFeature = self.initOptionDict['Labels']
self.initOptionDict.pop('Labels')
else:
self.labelFeature = None
if 'Features' in self.initOptionDict.keys():
self.features = self.initOptionDict['Features'].split(',')
self.initOptionDict.pop('Features')
else:
self.features = None
if 'verbosity' in self.initOptionDict:
self.verbosity = self.initOptionDict['verbosity']
self.initOptionDict.pop('verbosity')
else:
self.verbosity = None
# average value and sigma are used for normalization of the feature data
# a dictionary where for each feature a tuple (average value, sigma)
self.muAndSigmaFeatures = {}
#these need to be declared in the child classes!!!!
self.amITrained = False
## The normalized training data
self.normValues = None
def updateFeatures(self, features):
"""
Change the Features that this classifier targets. If this ROM is trained already, raises an error.
@ In, features, list(str), list of new features
@ Out, None
"""
self.raiseAWarning('Features for learning engine type "{}" have been reset, so ROM is untrained!'.format(self.printTag))
self.amITrained = False
self.features = features
def train(self, tdict, metric=None):
"""
Method to perform the training of the unSuperVisedLearning algorithm
NB. The unSuperVisedLearning object is committed to convert the dictionary
that is passed (in), into the local format the interface with the kernels
requires. So far the base class will do the translation into numpy.
@ In, tdict, dict, training dictionary
@ Out, None
"""
self.metric = metric
if not isinstance(tdict, dict):
self.raiseAnError(IOError, ' method "train". The training set needs to be provided through a dictionary. Type of the in-object is ' + str(type(tdict)))
featureCount = len(self.features)
if not isinstance(tdict[utils.first(tdict.keys())],dict):
realizationCount = utils.first(tdict.values()).size
############################################################################
## Error-handling
## Do all of our error handling upfront to make the meat of the code more
## readable:
## Check if the user requested something that is not available
unidentifiedFeatures = set(self.features) - set(tdict.keys())
if len(unidentifiedFeatures) > 0:
## Me write English good!
if len(unidentifiedFeatures) == 1:
msg = 'The requested feature: %s does not exist in the training set.' % list(unidentifiedFeatures)[0]
else:
msg = 'The requested features: %s do not exist in the training set.' % str(list(unidentifiedFeatures))
self.raiseAnError(IOError, msg)
## Check that all of the values have the same length
if not isinstance(utils.first(tdict.values()), dict):
for name, val in tdict.items():
if name in self.features and realizationCount != val.size:
self.raiseAnError(IOError, ' In training set, the number of realizations are inconsistent among the requested features.')
## Check if a label feature is provided by the user and in the training data
if self.labelFeature in tdict:
self.labelValues = tidct[self.labelFeature]
resp = self.checkArrayConsistency(self.labelValues)
if not resp[0]:
self.raiseAnError(IOError, 'In training set for ground truth labels ' + self.labelFeature + ':' + resp[1])
else:
self.raiseAWarning(' The ground truth labels are not known a priori')
self.labelValues = None
## Not sure when this would ever happen, but check that the data you are
## given is a 1D array?
# for name,val in tdict.items():
# if name in self.features:
# resp = self.checkArrayConsistency(val)
# if not resp[0]:
# self.raiseAnError(IOError, ' In training set for feature ' + name + ':' + resp[1])
## End Error-handling
############################################################################
if metric is None:
self.normValues = np.zeros(shape = (realizationCount, featureCount))
for cnt, feat in enumerate(self.features):
featureValues = tdict[feat]
(mu,sigma) = mathUtils.normalizationFactors(featureValues)
## Store the normalized training data, and the normalization factors for
## later use
self.normValues[:, cnt] = (featureValues - mu) / sigma
self.muAndSigmaFeatures[feat] = (mu,sigma)
else:
# metric != None
## The dictionary represents a HistorySet
if isinstance(utils.first(tdict.values()),dict):
## normalize data
## But why this way? This should be one of the options, this looks like
## a form of shape matching, however what if I don't want similar
## shapes, I want similar valued curves in space? sigma and mu should
## not be forced to be computed within a curve.
tdictNorm={}
for key in tdict:
tdictNorm[key]={}
for var in tdict[key]:
(mu,sigma) = mathUtils.normalizationFactors(tdict[key][var])
tdictNorm[key][var] = (tdict[key][var]-mu)/sigma
cardinality = len(tdictNorm.keys())
self.normValues = np.zeros((cardinality,cardinality))
keys = list(tdictNorm.keys())
for i in range(cardinality):
for j in range(i,cardinality):
# process the input data for the metric, numpy.array is required
assert(list(tdictNorm[keys[i]].keys()) == list(tdictNorm[keys[j]].keys()))
numParamsI = len(tdictNorm[keys[i]].keys())
numStepsI = len(utils.first(tdictNorm[keys[i]].values()))
numStepsJ = len(utils.first(tdictNorm[keys[j]].values()))
inputI = np.empty((numParamsI, numStepsI))
inputJ = np.empty((numParamsI, numStepsJ))
for ind, params in enumerate(tdictNorm[keys[i]].keys()):
valueI = tdictNorm[keys[i]][params]
valueJ = tdictNorm[keys[j]][params]
inputI[ind] = valueI
inputJ[ind] = valueJ
pairedData = ((inputI,None), (inputJ,None))
# FIXME: Using loops can be very slow for large number of realizations
self.normValues[i][j] = metric.evaluate(pairedData)
if i != j:
self.normValues[j][i] = self.normValues[i][j]
else:
## PointSet
normValues = np.zeros(shape = (realizationCount, featureCount))
self.normValues = np.zeros(shape = (realizationCount, realizationCount))
for cnt, feat in enumerate(self.features):
featureValues = tdict[feat]
(mu,sigma) = mathUtils.normalizationFactors(featureValues)
normValues[:, cnt] = (featureValues - mu) / sigma
# compute the pairwised distance for given matrix
self.normValues = metric.evaluatePairwise((normValues,None))
self.__trainLocal__()
self.amITrained = True
## I'd be willing to bet this never gets called, and if it did it would crash
## under specific settings, namely using a history set. - unknown (maybe Dan?)
## -> for the record, I call it to get the labels in the ROMCollection.Clusters - talbpaul
def evaluate(self, edict):
"""
Method to perform the evaluation of a point or a set of points through
the previous trained unSuperVisedLearning algorithm
NB. The superVisedLearning object is committed to convert the dictionary
that is passed (in), into the local format the interface with the kernels
requires.
@ In, edict, dict, evaluation dictionary
@ Out, evaluation, numpy.array, array of evaluated points
"""
if not self.amITrained:
self.raiseAnError('ROM must be trained before evaluating!')
if not isinstance(edict, dict):
self.raiseAnError(IOError, ' Method "evaluate". The evaluate request/s need/s to be provided through a dictionary. Type of the in-object is ' + str(type(edict)))
names = edict.keys()
realizationCount = utils.first(edict.values()).size
featureCount = len(self.features)
############################################################################
## Error-handling
## Do all of our error handling upfront to make the meat of the code more
## readable:
## Check if the user requested something that is not available
unidentifiedFeatures = set(self.features) - set(edict.keys())
if len(unidentifiedFeatures) > 0:
## Me write English good!
if len(unidentifiedFeatures) == 1:
msg = 'The requested feature: %s does not exist in the evaluate set.' % list(unidentifiedFeatures)[0]
else:
msg = 'The requested features: %s do not exist in the evaluate set.' % str(list(unidentifiedFeatures))
self.raiseAnError(IOError, msg)
for name,values in edict.items():
resp = self.checkArrayConsistency(values)
if not resp[0]:
self.raiseAnError(IOError, ' In evaluate request for feature ' + name + ':' + resp[1])
## End Error-handling
############################################################################
## I don't think this is necessary?
# if self.labelFeature in edict.keys():
# self.labelValues = edict[self.labelFeature]
# construct the evaluation matrix
normedValues = np.zeros(shape = (realizationCount, featureCount))
for cnt, feature in enumerate(self.features):
featureValues = edict[feature]
(mu,sigma) = self.muAndSigmaFeatures[feature]
normedValues[:, cnt] = (featureValues - mu) / sigma
evaluation = self.__evaluateLocal__(normedValues)
return evaluation
def confidence(self):
"""
This call is used to get an estimate of the confidence in the prediction
of the clusters. The base class self.confidence checks if the clusters are
already evaluated (trained) then calls the local confidence
@ In, None
@ Out, confidence, float, the confidence
"""
if self.amITrained:
return self.__confidenceLocal__()
else:
self.raiseAnError(IOError, ' The confidence check is performed before training.')
def getDataMiningType(self):
"""
This method is used to return the type of data mining algorithm to be employed
@ In, none
@ Out, none
"""
pass
@abc.abstractmethod
def __trainLocal__(self):
"""
Perform training...
@ In, none
@ Out, none
"""
@abc.abstractmethod
def __evaluateLocal__(self, featureVals):
"""
@ In, featureVals, 2-D numpy.array, [n_samples,n_features]
@ Out, targetVals , 1-D numpy.array, [n_samples]
"""
@abc.abstractmethod
def __confidenceLocal__(self):
"""
This should return an estimation of the quality of the prediction.
@ In, none
@ Out, none
"""
#
#
class SciKitLearn(unSupervisedLearning):
"""
SciKitLearn interface for unsupervised Learning
"""
modelType = 'SciKitLearn'
availImpl = {}
availImpl['cluster'] = {} # Generalized Cluster
availImpl['cluster']['AffinityPropogation' ] = (cluster.AffinityPropagation , 'float') # Perform Affinity Propagation Clustering of data.
availImpl['cluster']['DBSCAN' ] = (cluster.DBSCAN , 'float') # Perform DBSCAN clustering from vector array or distance matrix.
availImpl['cluster']['KMeans' ] = (cluster.KMeans , 'float') # K-Means Clustering
availImpl['cluster']['MiniBatchKMeans' ] = (cluster.MiniBatchKMeans , 'float') # Mini-Batch K-Means Clustering
availImpl['cluster']['MeanShift' ] = (cluster.MeanShift , 'float') # Mean Shift Clustering
availImpl['cluster']['SpectralClustering' ] = (cluster.SpectralClustering , 'float') # Apply clustering to a projection to the normalized laplacian.
availImpl['cluster']['Agglomerative' ] = (cluster.AgglomerativeClustering, 'float') # Agglomerative Clustering - Feature of SciKit-Learn version 0.15
# availImpl['cluster']['FeatureAgglomeration' ] = (cluster.FeatureAgglomeration , 'float') # - Feature of SciKit-Learn version 0.15
# availImpl['cluster']['Ward' ] = (cluster.Ward , 'float') # Ward hierarchical clustering: constructs a tree and cuts it.
# availImpl['bicluster'] = {}
# availImpl['bicluster']['SpectralBiclustering'] = (cluster.bicluster.SpectralBiclustering, 'float') # Spectral biclustering (Kluger, 2003).
# availImpl['bicluster']['SpectralCoclustering'] = (cluster.bicluster.SpectralCoclustering, 'float') # Spectral Co-Clustering algorithm (Dhillon, 2001).
availImpl['mixture'] = {} # Generalized Gaussion Mixture Models (Classification)
availImpl['mixture']['GMM' ] = (mixture.GaussianMixture , 'float') # Gaussian Mixture Model
## Comment is not even right on it, but the DPGMM is being deprecated by SKL who
## admits that it is not working correctly which also explains why it is buried in
## their documentation.
# availImpl['mixture']['DPGMM'] = (mixture.DPGMM, 'float') # Variational Inference for the Infinite Gaussian Mixture Model.
availImpl['mixture']['VBGMM'] = (mixture.BayesianGaussianMixture, 'float') # Variational Inference for the Gaussian Mixture Model
availImpl['manifold'] = {} # Manifold Learning (Embedding techniques)
availImpl['manifold']['LocallyLinearEmbedding' ] = (manifold.LocallyLinearEmbedding , 'float') # Locally Linear Embedding
availImpl['manifold']['Isomap' ] = (manifold.Isomap , 'float') # Isomap
availImpl['manifold']['MDS' ] = (manifold.MDS , 'float') # MultiDimensional Scaling
availImpl['manifold']['SpectralEmbedding' ] = (manifold.SpectralEmbedding , 'float') # Spectral Embedding for Non-linear Dimensionality Reduction
# availImpl['manifold']['locally_linear_embedding'] = (manifold.locally_linear_embedding, 'float') # Perform a Locally Linear Embedding analysis on the data.
# availImpl['manifold']['spectral_embedding' ] = (manifold.spectral_embedding , 'float') # Project the sample on the first eigen vectors of the graph Laplacian.
availImpl['decomposition'] = {} # Matrix Decomposition
availImpl['decomposition']['PCA' ] = (decomposition.PCA , 'float') # Principal component analysis (PCA)
# availImpl['decomposition']['ProbabilisticPCA' ] = (decomposition.ProbabilisticPCA , 'float') # Additional layer on top of PCA that adds a probabilistic evaluationPrincipal component analysis (PCA)
availImpl['decomposition']['RandomizedPCA' ] = (decomposition.PCA , 'float') # Principal component analysis (PCA) using randomized SVD
availImpl['decomposition']['KernelPCA' ] = (decomposition.KernelPCA , 'float') # Kernel Principal component analysis (KPCA)
availImpl['decomposition']['FastICA' ] = (decomposition.FastICA , 'float') # FastICA: a fast algorithm for Independent Component Analysis.
availImpl['decomposition']['TruncatedSVD' ] = (decomposition.TruncatedSVD , 'float') # Dimensionality reduction using truncated SVD (aka LSA).
availImpl['decomposition']['SparsePCA' ] = (decomposition.SparsePCA , 'float') # Sparse Principal Components Analysis (SparsePCA)
availImpl['decomposition']['MiniBatchSparsePCA' ] = (decomposition.MiniBatchSparsePCA , 'float') # Mini-batch Sparse Principal Components Analysis
# availImpl['decomposition']['ProjectedGradientNMF'] = (decomposition.ProjectedGradientNMF, 'float') # Non-Negative matrix factorization by Projected Gradient (NMF)
# availImpl['decomposition']['FactorAnalysis' ] = (decomposition.FactorAnalysis , 'float') # Factor Analysis (FA)
# availImpl['decomposition']['NMF' ] = (decomposition.NMF , 'float') # Non-Negative matrix factorization by Projected Gradient (NMF)
# availImpl['decomposition']['SparseCoder' ] = (decomposition.SparseCoder , 'float') # Sparse coding
# availImpl['decomposition']['DictionaryLearning' ] = (decomposition.DictionaryLearning , 'float') # Dictionary Learning
# availImpl['decomposition']['MiniBatchDictionaryLearning'] = (decomposition.MiniBatchDictionaryLearning, 'float') # Mini-batch dictionary learning
# availImpl['decomposition']['fastica' ] = (decomposition.fastica , 'float') # Perform Fast Independent Component Analysis.
# availImpl['decomposition']['dict_learning' ] = (decomposition.dict_learning , 'float') # Solves a dictionary learning matrix factorization problem.
# availImpl['covariance'] = {} # Covariance Estimators
# availImpl['covariance']['EmpiricalCovariance'] = (covariance.EmpiricalCovariance, 'float') # Maximum likelihood covariance estimator
# availImpl['covariance']['EllipticEnvelope' ] = (covariance.EllipticEnvelope , 'float') # An object for detecting outliers in a Gaussian distributed dataset.
# availImpl['covariance']['GraphLasso' ] = (covariance.GraphLasso , 'float') # Sparse inverse covariance estimation with an l1-penalized estimator.
# availImpl['covariance']['GraphLassoCV' ] = (covariance.GraphLassoCV , 'float') # Sparse inverse covariance w/ cross-validated choice of the l1 penalty
# availImpl['covariance']['LedoitWolf' ] = (covariance.LedoitWolf , 'float') # LedoitWolf Estimator
# availImpl['covariance']['MinCovDet' ] = (covariance.MinCovDet , 'float') # Minimum Covariance Determinant (MCD): robust estimator of covariance
# availImpl['covariance']['OAS' ] = (covariance.OAS , 'float') # Oracle Approximating Shrinkage Estimator
# availImpl['covariance']['ShrunkCovariance' ] = (covariance.ShrunkCovariance , 'float') # Covariance estimator with shrinkage
# availImpl['neuralNetwork'] = {} # Covariance Estimators
# availImpl['neuralNetwork']['BernoulliRBM'] = (neural_network.BernoulliRBM, 'float') # Bernoulli Restricted Boltzmann Machine (RBM).
def __init__(self, messageHandler, **kwargs):
"""
constructor for SciKitLearn class.
@ In, messageHandler, MessageHandler, Message handler object
@ In, kwargs, dict, arguments for the SciKitLearn algorithm
@ Out, None
"""
unSupervisedLearning.__init__(self, messageHandler, **kwargs)
self.printTag = 'SCIKITLEARN'
if 'SKLtype' not in self.initOptionDict.keys():
self.raiseAnError(IOError, ' to define a scikit learn unSupervisedLearning Method the SKLtype keyword is needed (from KDD ' + self.name + ')')
SKLtype, SKLsubType = self.initOptionDict['SKLtype'].split('|')
self.initOptionDict.pop('SKLtype')
if not SKLtype in self.__class__.availImpl.keys():
self.raiseAnError(IOError, ' Unknown SKLtype ' + SKLtype + '(from KDD ' + self.name + ')')
if not SKLsubType in self.__class__.availImpl[SKLtype].keys():
self.raiseAnError(IOError, ' Unknown SKLsubType ' + SKLsubType + '(from KDD ' + self.name + ')')
self.SKLtype = SKLtype
self.SKLsubType = SKLsubType
self.__class__.returnType = self.__class__.availImpl[SKLtype][SKLsubType][1]
self.Method = self.__class__.availImpl[SKLtype][SKLsubType][0]()
paramsDict = self.Method.get_params()
## Let's only keep the parameters that the Method understands, throw
## everything else away, maybe with a warning message?
tempDict = {}
for key, value in self.initOptionDict.items():
if key in paramsDict:
try:
tempDict[key] = ast.literal_eval(value)
except:
tempDict[key] = value
else:
self.raiseAWarning('Ignoring unknown parameter %s to the method of type %s' % (key, SKLsubType))
self.initOptionDict = tempDict
self.Method.set_params(**self.initOptionDict)
self.normValues = None
self.outputDict = {}
def __trainLocal__(self):
"""
Perform training on samples in self.normValues: array,
shape = [n_samples, n_features] or [n_samples, n_samples]
@ In, None
@ Out, None
"""
## set bandwidth for MeanShift clustering
if hasattr(self.Method, 'bandwidth'):
if 'bandwidth' not in self.initOptionDict.keys():
self.initOptionDict['bandwidth'] = cluster.estimate_bandwidth(self.normValues,quantile=0.3)
self.Method.set_params(**self.initOptionDict)
## We need this connectivity if we want to use structured ward
if hasattr(self.Method, 'connectivity'):
## we should find a smart way to define the number of neighbors instead of
## default constant integer value(10)
connectivity = kneighbors_graph(self.normValues, n_neighbors = min(10,len(self.normValues[:,0])-1))
connectivity = 0.5 * (connectivity + connectivity.T)
self.initOptionDict['connectivity'] = connectivity
self.Method.set_params(**self.initOptionDict)
self.outputDict['outputs'] = {}
self.outputDict['inputs' ] = self.normValues
## This is the stuff that will go into the solution export or just float
## around and maybe never be used
self.metaDict = {}
## What are you doing here? Calling half of these methods does nothing
## unless you store the data somewhere. If you are going to blindly call
## whatever methods that exist in the class, then at least store them for
## later. Why is this done again on the PostProcessor side? I am struggling
## to understand what this code's purpose is except to obfuscate our
## interaction with skl.
# if hasattr(self.Method, 'fit_predict'):
# self.Method.fit_predict(self.normValues)
# elif hasattr(self.Method, 'predict'):
# self.Method.fit(self.normValues)
# self.Method.predict(self.normValues)
# elif hasattr(self.Method, 'fit_transform'):
# self.Method.fit_transform(self.normValues)
# elif hasattr(self.Method, 'transform'):
# self.Method.fit(self.normValues)
# self.Method.transform(self.normValues)
self.Method.fit(self.normValues)
## I don't care what algorithm you ran, these are the only things I care
## about, if I find one of them, then I am going to save it in our defined
## variable names
variableMap = {'labels_': 'labels',
'embedding_': 'embeddingVectors',
'embedding_vectors_': 'embeddingVectors'}
## This will store stuff that should go into the solution export, but
## these each need some massaging so we will not handle this automatically.
# metaMap = {'cluster_centers_': 'clusterCenters',
# 'means_': 'means',
# 'covars_': 'covars'}
## Not used right now, but maybe someone will want it?
# otherMap = {'n_clusters': 'noClusters',
# 'weights_': 'weights',
# 'cluster_centers_indices_': 'clusterCentersIndices',
# 'precs_': 'precs',
# 'noComponents_': 'noComponents',
# 'reconstructionError': 'reconstruction_error_',
# 'explained_variance_': 'explainedVariance',
# 'explained_variance_ratio_': 'explainedVarianceRatio'}
for key,val in self.Method.__dict__.items():
if key in variableMap:
## Translate the skl name to our naming convention
self.outputDict['outputs'][variableMap[key]] = copy.deepcopy(val)
## The meta information needs special handling otherwise, we could just
## do this here and be done in two lines
# if key in metaMap:
# self.metaDict[metaMap[key]] = copy.deepcopy(val)
## Below generates the output Dictionary from the trained algorithm, can be
## defined in a new method....
if 'cluster' == self.SKLtype:
if hasattr(self.Method, 'cluster_centers_') :
centers = self.Method.cluster_centers_
elif self.metric is None:
## This methods is used by any other clustering algorithm that does
## not generatecluster_centers_ to generate the cluster centers as the
## average location of all points in the cluster.
if hasattr(self.Method,'n_clusters'):
numClusters = self.Method.n_clusters
else:
numClusters = len(set(self.Method.labels_))
centers = np.zeros([numClusters,len(self.features)])
counter = np.zeros(numClusters)
for val,index in enumerate(self.Method.labels_):
centers[index] += self.normValues[val]
counter[index] += 1
for index,val in enumerate(centers):
if counter[index] == 0.:
self.raiseAnError(RuntimeError, 'The data-mining clustering method '
+ str(self.Method)
+ ' has generated a 0-size cluster')
centers[index] = centers[index] / float(counter[index])
else:
centers = None
if centers is not None:
## I hope these arrays are consistently ordered...
## We are mixing our internal storage of muAndSigma with SKLs
## representation of our data, I believe it is fair to say that we
## hand the data to SKL in the same order that we have it stored.
for cnt, feature in enumerate(self.features):
(mu,sigma) = self.muAndSigmaFeatures[feature]
for center in centers:
center[cnt] = center[cnt] * sigma + mu
self.metaDict['clusterCenters'] = centers
elif 'mixture' == self.SKLtype:
# labels = self.Method.fit_predict(self.normValues)
## The fit_predict is not available in all versions of sklearn for GMMs
## besides the data should already be fit above
labels = self.Method.predict(self.normValues)
self.outputDict['outputs']['labels'] = labels
if hasattr(self.Method, 'converged_'):
if not self.Method.converged_:
self.raiseAnError(RuntimeError, self.SKLtype + '|' + self.SKLsubType
+ ' did not converged. (from KDD->'
+ self.SKLsubType + ')')
## For both means and covars below:
## We are mixing our internal storage of muAndSigma with SKLs
## representation of our data, I believe it is fair to say that we
## hand the data to SKL in the same order that we have it stored.
if hasattr(self.Method, 'means_'):
means = copy.deepcopy(self.Method.means_)
for cnt, feature in enumerate(self.features):
(mu,sigma) = self.muAndSigmaFeatures[feature]
for center in means:
center[cnt] = center[cnt] * sigma + mu
self.metaDict['means'] = means
if hasattr(self.Method, 'covariances_') :
covariance = copy.deepcopy(self.Method.covariances_)
for row, rowFeature in enumerate(self.features):
rowSigma = self.muAndSigmaFeatures[rowFeature][1]
for col, colFeature in enumerate(self.features):
colSigma = self.muAndSigmaFeatures[colFeature][1]
#if covariance type == full, the shape is (n_components, n_features, n_features)
if len(covariance.shape) == 3:
covariance[:,row,col] = covariance[:,row,col] * rowSigma * colSigma
else:
#XXX if covariance type == diag, this will be wrong.
covariance[row,col] = covariance[row,col] * rowSigma * colSigma
self.metaDict['covars'] = covariance
elif 'decomposition' == self.SKLtype:
if 'embeddingVectors' not in self.outputDict['outputs']:
if hasattr(self.Method, 'transform'):
embeddingVectors = self.Method.transform(self.normValues)
self.outputDict['outputs']['embeddingVectors'] = embeddingVectors
elif hasattr(self.Method, 'fit_transform'):
embeddingVectors = self.Method.fit_transform(self.normValues)
self.outputDict['outputs']['embeddingVectors'] = embeddingVectors
else:
self.raiseAWarning('The embedding vectors could not be computed.')
if hasattr(self.Method, 'components_'):
self.metaDict['components'] = self.Method.components_
if hasattr(self.Method, 'means_'):
self.metaDict['means'] = self.Method.means_
if hasattr(self.Method, 'explained_variance_'):
self.explainedVariance_ = copy.deepcopy(self.Method.explained_variance_)
self.metaDict['explainedVariance'] = self.explainedVariance_
if hasattr(self.Method, 'explained_variance_ratio_'):
self.metaDict['explainedVarianceRatio'] = self.Method.explained_variance_ratio_
def __evaluateLocal__(self, featureVals):
"""
Method to return labels of an already trained unSuperVised algorithm.
@ In, featureVals, numpy.array, feature values
@ Out, labels, numpy.array, labels
"""
if hasattr(self.Method, 'predict'):
labels = self.Method.predict(featureVals)
else:
labels = self.Method.fit_predict(featureVals)
return labels
def __confidenceLocal__(self):
"""
This should return an estimation dictionary of the quality of the
prediction.
@ In, None
@ Out, self.outputdict['confidence'], dict, dictionary of the confidence
metrics of the algorithms
"""
self.outputDict['confidence'] = {}
## I believe you should always have labels populated when dealing with a
## clustering algorithm, this second condition may be redundant
if 'cluster' == self.SKLtype and 'labels' in self.outputDict['outputs']:
labels = self.outputDict['outputs']['labels']
if np.unique(labels).size > 1:
self.outputDict['confidence']['silhouetteCoefficient'] = metrics.silhouette_score(self.normValues , labels)
if hasattr(self.Method, 'inertia_'):
self.outputDict['confidence']['inertia'] = self.Method.inertia_
## If we have ground truth labels, then compute some additional confidence
## metrics
if self.labelValues is not None:
self.outputDict['confidence']['homogeneity' ] = metrics.homogeneity_score(self.labelValues, labels)
self.outputDict['confidence']['completenes' ] = metrics.completeness_score(self.labelValues, labels)
self.outputDict['confidence']['vMeasure' ] = metrics.v_measure_score(self.labelValues, labels)
self.outputDict['confidence']['adjustedRandIndex' ] = metrics.adjusted_rand_score(self.labelValues, labels)
self.outputDict['confidence']['adjustedMutualInformation'] = metrics.adjusted_mutual_info_score(self.labelValues, labels)
elif 'mixture' == self.SKLtype:
if hasattr(self.Method, 'aic'):
self.outputDict['confidence']['aic' ] = self.Method.aic(self.normValues) ## Akaike Information Criterion
self.outputDict['confidence']['bic' ] = self.Method.bic(self.normValues) ## Bayesian Information Criterion
self.outputDict['confidence']['score'] = self.Method.score(self.normValues) ## log probabilities of each data point
return self.outputDict['confidence']
def getDataMiningType(self):
"""
This method is used to return the type of data mining algorithm to be employed
@ In, none
@ Out, self.SKLtype, string, type of data mining algorithm
"""
return self.SKLtype
#
#
class temporalSciKitLearn(unSupervisedLearning):
"""
Data mining library to perform SciKitLearn algorithms along temporal data
"""
def __init__(self, messageHandler, **kwargs):
"""
constructor for temporalSciKitLearn class.
@ In, messageHandler, Message handler object
@ In, kwargs, arguments for the SciKitLearn algorithm
@ Out, None
"""
unSupervisedLearning.__init__(self, messageHandler, **kwargs)
self.printTag = 'TEMPORALSCIKITLEARN'
if 'SKLtype' not in self.initOptionDict.keys():
self.raiseAnError(IOError, ' to define a scikit learn unSupervisedLearning Method the SKLtype keyword is needed (from KDD ' + self.name + ')')
self.SKLtype, self.SKLsubType = self.initOptionDict['SKLtype'].split('|')
self.pivotParameter = self.initOptionDict.get('pivotParameter', 'Time')
#Pop necessary to keep from confusing SciKitLearn with extra option
self.reOrderStep = int(self.initOptionDict.pop('reOrderStep', 5))
# return a SciKitLearn instance as engine for SKL data mining
self.SKLEngine = returnInstance('SciKitLearn',self, **self.initOptionDict)
self.normValues = None
self.outputDict = {}
@staticmethod
def checkArrayConsistency(arrayin, shape):
"""
This method checks the consistency of the in-array
@ In, object... It should be an array
@ Out, tuple, tuple[0] is a bool (True -> everything is ok, False -> something wrong), tuple[1], string ,the error mesg
"""
if type(arrayin) != np.ndarray:
return (False, ' The object is not a numpy array')
if arrayin.shape[0] != shape[0] or arrayin.shape[1] != shape[1]:
return (False, ' The object shape is not correct')
## The input data matrix kind is different for different clustering methods
## e.g. [n_samples, n_features] for MeanShift and KMeans
## [n_samples,n_samples] for AffinityPropogation and SpectralClustering
## In other words, MeanShift and KMeans work with points in a vector space,
## whereas AffinityPropagation and SpectralClustering can work with
## arbitrary objects, as long as a similarity measure exists for such
## objects
## The input matrix supplied to unSupervisedLearning models as 1-D arrays o
## size [n_samples], (either n_features of or n_samples of them)
# if len(arrayin.shape) != 1: return(False, ' The array must be 1-d')
return (True, '')
def __deNormalizeData__(self,feat,t,data):
"""
Method to denormalize data based on the mean and standard deviation stored
in self.
@In, feat, string, the feature for which the input is to be denormalized
@In, t, float, time step identifier
@In, data, list, input values to be denormalized
@Out, deNormData, list, output values after denormalization
"""
N = data.shape[0]
deNormData = np.zeros(shape=data.shape)
mu, sig = self.muAndSigmaFeatures[feat][0,t], self.muAndSigmaFeatures[feat][1,t]
for n in range(N):
deNormData[n] = data[n]*sig+mu
return deNormData
def train(self, tdict):
"""
Method to train this class.
@ In, tdict, dictionary, training dictionary
@ Out, None
"""
## need to overwrite train method because time dependent data mining
## requires different treatment of input
if type(tdict) != dict:
self.raiseAnError(IOError, ' method "train". The training set needs to be provided through a dictionary. Type of the in-object is ' + str(type(tdict)))
names = list(tdict.keys())
values = list(tdict.values())
self.numberOfSample = values[0].shape[0]
self.numberOfHistoryStep = values[0].shape[1]
############################################################################
## Error-handling
## Do all of our error handling upfront to make the meat of the code more
## readable:
## Check if the user requested something that is not available
unidentifiedFeatures = set(self.features) - set(names)
if len(unidentifiedFeatures) > 0:
## Me write English good!
if len(unidentifiedFeatures) == 1:
msg = 'The requested feature: %s does not exist in the training set.' % list(unidentifiedFeatures)[0]
else:
msg = 'The requested features: %s do not exist in the training set.' % str(list(unidentifiedFeatures))
self.raiseAnError(IOError, msg)
## Check that all of the values have the same length
## Check if a label feature is provided by the user and in the training data
if self.labelFeature in names:
self.labelValues = tidct[self.labelFeature]
resp = self.checkArrayConsistency(self.labelValues,[self.numberOfSample, self.numberOfHistoryStep])
if not resp[0]:
self.raiseAnError(IOError, 'In training set for ground truth labels ' + self.labelFeature + ':' + resp[1])
else:
self.raiseAWarning(' The ground truth labels are not known a priori')
self.labelValues = None
## End Error-handling
############################################################################
self.normValues = {}
for cnt,feature in enumerate(self.features):
resp = self.checkArrayConsistency(tdict[feature], [self.numberOfSample, self.numberOfHistoryStep])
if not resp[0]:
self.raiseAnError(IOError, ' In training set for feature ' + feature + ':' + resp[1])
self.normValues[feature] = np.zeros(shape = tdict[feature].shape)
self.muAndSigmaFeatures[feature] = np.zeros(shape=(2,self.numberOfHistoryStep))
for t in range(self.numberOfHistoryStep):
featureValues = tdict[feature][:,t]
(mu,sigma) = mathUtils.normalizationFactors(featureValues)
## Store the normalized training data, and the normalization factors for
## later use
self.normValues[feature][:,t] = (featureValues - mu) / sigma
self.muAndSigmaFeatures[feature][0,t] = mu
self.muAndSigmaFeatures[feature][1,t] = sigma
self.inputDict = tdict
self.__trainLocal__()
self.amITrained = True
def __trainLocal__(self):
"""
Method to train this class.
"""
self.outputDict['outputs'] = {}
self.outputDict['inputs' ] = self.normValues
## This is the stuff that will go into the solution export or just float
## around and maybe never be used
self.metaDict = {}
for t in range(self.numberOfHistoryStep):
sklInput = {}
for feat in self.features:
sklInput[feat] = self.inputDict[feat][:,t]
self.SKLEngine.features = sklInput
self.SKLEngine.train(sklInput)
self.SKLEngine.confidence()
## Store everything from the specific timestep's SKLEngine into a running
## list
for key,val in self.SKLEngine.outputDict['outputs'].items():
if key not in self.outputDict['outputs']:
self.outputDict['outputs'][key] = {} # [None]*self.numberOfHistoryStep
self.outputDict['outputs'][key][t] = val
for key,val in self.SKLEngine.metaDict.items():
if key not in self.metaDict:
self.metaDict[key] = {} # [None]*self.numberOfHistoryStep
self.metaDict[key][t] = val
if self.SKLtype in ['cluster']:
if 'clusterCenters' not in self.metaDict.keys():
self.metaDict['clusterCenters'] = {}
if 'clusterCentersIndices' not in self.metaDict.keys():
self.metaDict['clusterCentersIndices'] = {}
# # collect labels
# if hasattr(self.SKLEngine.Method, 'labels_'):
# self.outputDict['labels'][t] = self.SKLEngine.Method.labels_
# # collect cluster centers
if hasattr(self.SKLEngine.Method, 'cluster_centers_'):
self.metaDict['clusterCenters'][t] = np.zeros(shape=self.SKLEngine.metaDict['clusterCenters'].shape)
for cnt, feat in enumerate(self.features):
self.metaDict['clusterCenters'][t][:,cnt] = self.SKLEngine.metaDict['clusterCenters'][:,cnt]
else:
self.metaDict['clusterCenters'][t] = self.__computeCenter__(sklInput, self.outputDict['outputs']['labels'][t])
# collect number of clusters
if hasattr(self.SKLEngine.Method, 'n_clusters'):
noClusters = self.SKLEngine.Method.n_clusters
else:
noClusters = self.metaDict['clusterCenters'][t].shape[0]
# collect cluster indices
# if hasattr(self.SKLEngine.Method, 'cluster_centers_indices_'):
# self.metaDict['clusterCentersIndices'][t] = self.SKLEngine.Method.cluster_centers_indices_
# self.metaDict['clusterCentersIndices'][t] = range(noClusters)
# else:
# self.metaDict['clusterCentersIndices'][t] = range(noClusters) # use list(set(self.SKLEngine.Method.labels_)) to collect outliers
self.metaDict['clusterCentersIndices'][t] = list(range(noClusters))
# # collect optional output
# if hasattr(self.SKLEngine.Method, 'inertia_'):
# if 'inertia' not in self.outputDict.keys(): self.outputDict['inertia'] = {}
# self.outputDict['inertia'][t] = self.SKLEngine.Method.inertia_
# re-order clusters
if t > 0:
remap = self.__reMapCluster__(t, self.metaDict['clusterCenters'], self.metaDict['clusterCentersIndices'])
for n in range(len(self.metaDict['clusterCentersIndices'][t])):
self.metaDict['clusterCentersIndices'][t][n] = remap[self.metaDict['clusterCentersIndices'][t][n]]
for n in range(len(self.outputDict['outputs']['labels'][t])):
if self.outputDict['outputs']['labels'][t][n] >=0:
self.outputDict['outputs']['labels'][t][n] = remap[self.SKLEngine.Method.labels_[n]]
## TODO: Remap the cluster centers now...
elif self.SKLtype in ['mixture']:
if 'means' not in self.metaDict.keys():
self.metaDict['means'] = {}
if 'componentMeanIndices' not in self.metaDict.keys():
self.metaDict['componentMeanIndices'] = {}
# # collect component membership
if 'labels' not in self.outputDict['outputs']:
self.outputDict['outputs']['labels'] = {}
self.outputDict['outputs']['labels'][t] = self.SKLEngine.evaluate(sklInput)
# # collect component means
if hasattr(self.SKLEngine.Method, 'means_'):
self.metaDict['means'][t] = np.zeros(shape=self.SKLEngine.Method.means_.shape)
for cnt, feat in enumerate(self.features):
self.metaDict['means'][t][:,cnt] = self.__deNormalizeData__(feat,t,self.SKLEngine.Method.means_[:,cnt])
else:
self.metaDict['means'][t] = self.__computeCenter__(Input['Features'], self.outputDict['labels'][t])
# # collect number of components
if hasattr(self.SKLEngine.Method, 'n_components'):
numComponents = self.SKLEngine.Method.n_components
else:
numComponents = self.metaDict['means'][t].shape[0]
# # collect component indices
self.metaDict['componentMeanIndices'][t] = list(range(numComponents))
# # collect optional output
if hasattr(self.SKLEngine.Method, 'weights_'):
if 'weights' not in self.metaDict.keys():
self.metaDict['weights'] = {}
self.metaDict['weights'][t] = self.SKLEngine.Method.weights_
if 'covars' in self.SKLEngine.metaDict:
if 'covars' not in self.metaDict.keys():
self.metaDict['covars'] = {}
self.metaDict['covars'][t] = self.SKLEngine.metaDict['covars']
if hasattr(self.SKLEngine.Method, 'precs_'):
if 'precs' not in self.metaDict.keys():
self.metaDict['precs'] = {}
self.metaDict['precs'][t] = self.SKLEngine.Method.precs_
# if hasattr(self.SKLEngine.Method, 'converged_'):
# if 'converged' not in self.outputDict.keys():
# self.outputDict['converged'] = {}
# self.outputDict['converged'][t] = self.SKLEngine.Method.converged_
# re-order components
if t > 0:
remap = self.__reMapCluster__(t, self.metaDict['means'], self.metaDict['componentMeanIndices'])
for n in range(len(self.metaDict['componentMeanIndices'][t])):
self.metaDict['componentMeanIndices'][t][n] = remap[self.metaDict['componentMeanIndices'][t][n]]
for n in range(len(self.outputDict['outputs']['labels'][t])):
if self.outputDict['outputs']['labels'][t][n] >=0:
self.outputDict['outputs']['labels'][t][n] = remap[self.outputDict['outputs']['labels'][t][n]]
elif 'manifold' == self.SKLtype:
# if 'noComponents' not in self.outputDict.keys():
# self.outputDict['noComponents'] = {}
if 'embeddingVectors' not in self.outputDict['outputs']:
self.outputDict['outputs']['embeddingVectors'] = {}
if hasattr(self.SKLEngine.Method, 'embedding_'):
self.outputDict['outputs']['embeddingVectors'][t] = self.SKLEngine.Method.embedding_
if 'transform' in dir(self.SKLEngine.Method):
self.outputDict['outputs']['embeddingVectors'][t] = self.SKLEngine.Method.transform(self.SKLEngine.normValues)
elif 'fit_transform' in dir(self.SKLEngine.Method):
self.outputDict['outputs']['embeddingVectors'][t] = self.SKLEngine.Method.fit_transform(self.SKLEngine.normValues)
# if hasattr(self.SKLEngine.Method, 'reconstruction_error_'):
# if 'reconstructionError_' not in self.outputDict.keys():
# self.outputDict['reconstructionError_'] = {}
# self.outputDict['reconstructionError_'][t] = self.SKLEngine.Method.reconstruction_error_
elif 'decomposition' == self.SKLtype:
for var in ['explainedVarianceRatio','means','explainedVariance',
'components']:
if var not in self.metaDict:
self.metaDict[var] = {}
if hasattr(self.SKLEngine.Method, 'components_'):
self.metaDict['components'][t] = self.SKLEngine.Method.components_
## This is not the same thing as the components above! This is the
## transformed data, the other composes the transformation matrix to get
## this. Whoever designed this, you are causing me no end of headaches
## with this code... I am pretty sure this can all be handled within the
## post-processor rather than adding this frankenstein of code just to
## gain access to the skl techniques.
if 'embeddingVectors' not in self.outputDict['outputs']:
if 'transform' in dir(self.SKLEngine.Method):
embeddingVectors = self.SKLEngine.Method.transform(self.SKLEngine.normValues)
elif 'fit_transform' in dir(self.SKLEngine.Method):
embeddingVectors = self.SKLEngine.Method.fit_transform(self.SKLEngine.normValues)
self.outputDict['outputs']['embeddingVectors'][t] = embeddingVectors
if hasattr(self.SKLEngine.Method, 'means_'):
self.metaDict['means'][t] = self.SKLEngine.Method.means_
if hasattr(self.SKLEngine.Method, 'explained_variance_'):
self.metaDict['explainedVariance'][t] = self.SKLEngine.Method.explained_variance_
if hasattr(self.SKLEngine.Method, 'explained_variance_ratio_'):
self.metaDict['explainedVarianceRatio'][t] = self.SKLEngine.Method.explained_variance_ratio_
else:
self.raiseAnError(IOError, 'Unknown type: ' + str(self.SKLtype))
def __computeCenter__(self, data, labels):
"""
Method to compute cluster center for clustering algorithms that do not return such information.
This is needed to re-order cluster number
@In, data, dict, each value of the dict is a 1-d array of data
@In, labels, list, list of label for each sample
@Out, clusterCenter, array, shape = [no_clusters, no_features], center coordinate
"""
point = {}
for cnt, l in enumerate(labels):
if l >= 0 and l not in point.keys():
point[l] = []
if l >= 0:
point[l].append(cnt)
noCluster = len(point.keys())
if noCluster == 0:
self.raiseAnError(ValueError, 'number of cluster is 0!!!')
clusterCenter = np.zeros(shape=(noCluster,len(self.features)))
for cnt, feat in enumerate(self.features):
for ind, l in enumerate(point.keys()):
clusterCenter[ind,cnt] = np.average(data[feat][point[l]])
return clusterCenter
def __computeDist__(self,t,n1,n2,dataCenter,opt):
"""
Computes the distance between two cluster centers.
Four different distance metrics are implemented, which can be specified by input opt
@In, t, float, current time
@In, n1, integer, center index 1
@In, n2, integer, center index 2
@In, dataCenter, dict, each value contains the center coordinate at each time step
@In, opt, string, specifies which distance metric to use
@Out, dist, float, distance between center n1 and center n2
"""
x1 = dataCenter[t-1][n1,:]
x2 = dataCenter[t][n2,:]
if opt in ['Distance']:
dist = np.sqrt(np.dot(x1-x2,x1-x2))
return dist
if opt in ['Overlap']:
l1 = self.outputDict['outputs']['labels'][t-1]
l2 = self.SKLEngine.Method.labels_
point1 = []
point2 = []
for n in range(len(l1)):
if l1[n] == n1:
point1.append(n)
for n in range(len(l2)):
if l2[n] == n2:
point2.append(n)
dist = - len(set(point1).intersection(point2))
return dist
if opt in ['DistVariance']:
l1 = self.outputDict['outputs']['labels'][t-1]
l2 = self.SKLEngine.Method.labels_
dist = np.sqrt(np.dot(x1-x2,x1-x2))
v1 = v2 = N1 = N2 = 0
noFeat = len(self.features)
for n in range(len(l1)):
# compute variance of points with label l1
if l1[n] == n1:
x = np.zeros(shape=(noFeat,))
for cnt, feat in enumerate(self.features):
x[cnt] = self.inputDict[feat][n,t-1]
v1 += np.sqrt(np.dot(x-x1,x-x1))**2
N1 += 1
for n in range(len(l2)):
# compute variance of points with label l2
if l2[n] == n2:
x = np.zeros(shape=(noFeat,))
for cnt, feat in enumerate(self.features):
x[cnt] = self.inputDict[feat][n,t]
v2 += np.sqrt(np.dot(x-x2,x-x2))**2
N2 += 1
dist += np.abs(np.sqrt(v1/(N1-1)*1.0) - np.sqrt(v2/(N2-1)*1.0))
return dist
if opt in ['DistanceWithDecay']:
K = self.reOrderStep
decR = 1
dist = 0
for k in range(1,K+1):
if t-k >= 0:
if n1 < dataCenter[t-k].shape[0]:
x1 = dataCenter[t-k][n1,:]
dist += np.sqrt(np.dot(x1-x2,x1-x2))*np.exp(-(k-1)*decR)
return dist
def __reMapCluster__(self,t,dataCenter,dataCenterIndex):
"""
Computes the remapping relationship between the current time step cluster and the previous time step
@In, t, float, current time
@In, dataCenter, dict, each value contains the center coordinate at each time step
@In, dataCenterIndex, dict, each value contains the center index at each time step
@Out, remap, list, remapping relation between the current time step cluster and the previous time step
"""
indices1 = dataCenterIndex[t-1]
indices2 = dataCenterIndex[t]
N1 = dataCenter[t-1].shape[0]
N2 = dataCenter[t].shape[0]
dMatrix = np.zeros(shape=(N1,N2))
for n1 in range(N1):
for n2 in range(N2):
dMatrix[n1,n2] = self.__computeDist__(t,n1,n2,dataCenter,'DistanceWithDecay')
_, mapping = self.__localReMap__(dMatrix, (list(range(N1)), list(range(N2))))
remap = {}
f1, f2 = [False]*N1, [False]*N2
for mp in mapping:
i1, i2 = mp[0], mp[1]
if f1[i1] or f2[i2]:
self.raiseAnError(ValueError, 'Mapping is overlapped. ')
remap[indices2[i2]] = indices1[i1]
f1[i1], f2[i2] = True, True
if N2 > N1:
# for the case the new cluster comes up
tmp = 1
for n2 in range(N2):
if indices2[n2] not in remap.keys():
remap[indices2[n2]] = max(indices1)+tmp
# remap[indices2[n2]] = self.maxNoClusters + 1 # every discontinuity would introduce a new cluster index.
return remap
def __localReMap__(self, dMatrix,loc):
"""
Method to return the mapping based on distance stored in dMatrix, the returned mapping shall minimize the global sum of distance
This function is recursively called to find the global minimum, so is computationally expensive --- FIXME
@In, dMatrix, array, shape = (no_clusterAtPreviousTimeStep, no_clusterAtCurrentTimeStep)
@In, loc, tuple, the first element is the cluster indeces for previous time step and the second one is for the current time step
@Out, sumDist, float, global sum of distance
@Out, localReMap, list, remapping relation between the row and column identifier of dMatrix
"""
if len(loc[0]) == 1:
sumDist, localReMap = np.inf, -1
n1 = loc[0][0]
for n2 in loc[1]:
if dMatrix[n1,n2] < sumDist:
sumDist = dMatrix[n1,n2]
localReMap = n2
return sumDist, [(n1,localReMap)]
elif len(loc[1]) == 1:
sumDist, localReMap = np.inf, -1
n2 = loc[1][0]
for n1 in loc[0]:
if dMatrix[n1,n2] < sumDist:
sumDist = dMatrix[n1,n2]
localReMap = n1
return sumDist, [(localReMap,n2)]
else:
sumDist, i1, i2, localReMap = np.inf, -1, -1, []
n1 = loc[0][0]
temp1 = copy.deepcopy(loc[0])
temp1.remove(n1)
for n2 in loc[1]:
temp2 = copy.deepcopy(loc[1])
temp2.remove(n2)
d_temp, l = self.__localReMap__(dMatrix, (temp1,temp2))
if dMatrix[n1,n2] + d_temp < sumDist:
sumDist = dMatrix[n1,n2] + d_temp
i1, i2, localReMap = n1, n2, l
localReMap.append((i1,i2))
return sumDist, localReMap
def __evaluateLocal__(self, featureVals):
"""
Not implemented for this class
"""
pass
def __confidenceLocal__(self):
"""
Not implemented for this class
"""
pass
def getDataMiningType(self):
"""
This method is used to return the type of data mining algorithm to be employed
@ In, none
@ Out, self.SKLtype, string, type of data mining algorithm
"""
return self.SKLtype
class Scipy(unSupervisedLearning):
"""
Scipy interface for hierarchical Learning
"""
modelType = 'Scipy'
availImpl = {}
availImpl['cluster'] = {}
availImpl['cluster']['Hierarchical'] = (hier.hierarchy, 'float') # Perform Hierarchical Clustering of data.
def __init__(self, messageHandler, **kwargs):
"""
constructor for Scipy class.
@ In, messageHandler, MessageHandler, Message handler object
@ In, kwargs, dict, arguments for the Scipy algorithm
@ Out, None
"""
unSupervisedLearning.__init__(self, messageHandler, **kwargs)
self.printTag = 'SCIPY'
if 'SCIPYtype' not in self.initOptionDict.keys():
self.raiseAnError(IOError, ' to define a Scipy unSupervisedLearning Method the SCIPYtype keyword is needed (from KDD ' + self.name + ')')
SCIPYtype, SCIPYsubType = self.initOptionDict['SCIPYtype'].split('|')
self.initOptionDict.pop('SCIPYtype')
if not SCIPYtype in self.__class__.availImpl.keys():
self.raiseAnError(IOError, ' Unknown SCIPYtype ' + SCIPYtype)
if not SCIPYsubType in self.__class__.availImpl[SCIPYtype].keys():
self.raiseAnError(IOError, ' Unknown SCIPYsubType ' + SCIPYsubType)
self.__class__.returnType = self.__class__.availImpl[SCIPYtype][SCIPYsubType][1]
self.Method = self.__class__.availImpl[SCIPYtype][SCIPYsubType][0]
self.SCIPYtype = SCIPYtype
self.SCIPYsubType = SCIPYsubType
self.normValues = None
self.outputDict = {}
def __trainLocal__(self):
"""
Perform training on samples in self.normValues: array, shape = [n_samples, n_features] or [n_samples, n_samples]
@ In, None
@ Out, None
"""
self.outputDict['outputs'] = {}
self.outputDict['inputs' ] = self.normValues
if hasattr(self.Method, 'linkage'):
self.linkage = self.Method.linkage(self.normValues,self.initOptionDict['method'],self.initOptionDict['metric'])
if 'dendrogram' in self.initOptionDict and self.initOptionDict['dendrogram'] == 'true':
self.advDendrogram(self.linkage,
p = float(self.initOptionDict['p']),
leaf_rotation = 90.,
leaf_font_size = 12.,
truncate_mode = self.initOptionDict['truncationMode'],
show_leaf_counts = self.initOptionDict['leafCounts'],
show_contracted = self.initOptionDict['showContracted'],
annotate_above = self.initOptionDict['annotatedAbove'],
#orientation = self.initOptionDict['orientation'],
max_d = self.initOptionDict['level'])
self.labels_ = hier.hierarchy.fcluster(self.linkage, self.initOptionDict['level'],self.initOptionDict['criterion'])
self.outputDict['outputs']['labels'] = self.labels_
return self.labels_
def advDendrogram(self,*args, **kwargs):
"""
This methods actually creates the dendrogram
@ In, None
@ Out, None
"""
plt.figure()
max_d = kwargs.pop('max_d', None)
if max_d and 'color_threshold' not in kwargs:
kwargs['color_threshold'] = max_d
annotate_above = kwargs.pop('annotate_above', 0)
ddata = hier.hierarchy.dendrogram(*args, **kwargs)
if not kwargs.get('no_plot', False):
plt.title('Hierarchical Clustering Dendrogram')
plt.xlabel('sample index')
plt.ylabel('distance')
for i, d, c in zip(ddata['icoord'], ddata['dcoord'], ddata['color_list']):
x = 0.5 * sum(i[1:3])
y = d[1]
if y > annotate_above:
plt.plot(x, y, 'o', c=c)
plt.annotate("%.3g" % y, (x, y), xytext=(15, 11),
textcoords='offset points',
va='top', ha='center')
if max_d:
plt.axhline(y=max_d, c='0.1')
if 'dendFileID' in self.initOptionDict:
title = self.initOptionDict['dendFileID'] + '.pdf'
else:
title = 'dendrogram.pdf'
plt.savefig(title)
plt.close()
def __evaluateLocal__(self,*args, **kwargs):
"""
Method to return output of an already trained scipy algorithm.
@ In, featureVals, numpy.array, feature values
@ Out, self.dData, numpy.array, dendrogram
"""
pass
def __confidenceLocal__(self):
pass
def getDataMiningType(self):
"""
This method is used to return the type of data mining algorithm to be employed
@ In, none
@ Out, self.SCIPYtype, string, type of data mining algorithm
"""
return self.SCIPYtype
__interfaceDict = {}
__interfaceDict['SciKitLearn'] = SciKitLearn
__interfaceDict['temporalSciKitLearn'] = temporalSciKitLearn
__interfaceDict['Scipy'] = Scipy
__base = 'unSuperVisedLearning'
def returnInstance(modelClass, caller, **kwargs):
"""
This function return an instance of the request model type
@ In, modelClass, string, representing the instance to create
@ In, caller, object, object that will share its messageHandler instance
@ In, kwargs, dict, a dictionary specifying the keywords and values needed to create the instance.
@ Out, object, an instance of a Model
"""
try:
return __interfaceDict[modelClass](caller.messageHandler, **kwargs)
except KeyError as ae:
# except Exception as(ae):
caller.raiseAnError(NameError, 'unSupervisedLearning', 'Unknown ' + __base + ' type ' + str(modelClass)+'.Error: '+ str(ae))
def returnClass(modelClass, caller):
"""
This function return an instance of the request model type
@ In, modelClass, string, representing the class to retrieve
@ In, caller, object, object that will share its messageHandler instance
@ Out, the class definition of the Model
"""
try:
return __interfaceDict[modelClass]
except KeyError:
caller.raiseanError(NameError, 'unSupervisedLearning', 'not known ' + __base + ' type ' + modelClass)
| apache-2.0 |
DTMilodowski/LiDAR_canopy | src/canopy_light_environment_driver.py | 1 | 1906 | import numpy as np
import sys
from matplotlib import pyplot as plt
import LiDAR_tools as lidar
import auxilliary_functions as aux
import LiDAR_MacHorn_LAD_profiles as LAD1
import LiDAR_radiative_transfer_LAD_profiles as LAD2
import structural_metrics as structure
import plot_LAD_profiles as plot_LAD
import canopy_microclimate as clim
sys.path.append('/home/dmilodow/DataStore_DTM/BALI/SPA_BALI_data_and_analysis/scripts/field_data/')
import load_field_data as field
# define the profiles' location
LAD_file = './output/BALI_subplot_LAD_profiles_MacHorn_1m.npz'
LAD_profiles = np.load(LAD_file)
Plots = LAD_profiles.keys()
heights = np.arange(1.,81.)
light_absorption = {}
light_transmittance = {}
k = 0.7
n_plots = len(Plots)
color_string = 'blue'
label_string = '-'
for pp in range(0,n_plots):
plot = Plots[pp]
print plot
I = np.zeros(LAD_profiles[plot].shape)
A = np.zeros(LAD_profiles[plot].shape)
n_sub = I.shape[0]
for ss in range(0,n_sub):
I[ss,:]=clim.estimate_canopy_light_transmittance(LAD_profiles[plot][ss],heights,k)
A[ss,:]=clim.estimate_canopy_light_absorption(I[ss,:],k)
light_transmittance[plot] = I.copy()
light_absorption[plot] = A.copy()
figure_name = output_dir + '/light_environment/'+Plots[pp]+'_subplot_transmitance'
plot_LAD.plot_subplot_transmittance_profiles(light_transmittance[plot],heights,color_string,label_string,figure_name)
figure_name = output_dir + '/light_environment/'+Plots[pp]+'_subplot_absorption'
plot_LAD.plot_subplot_absorption_profiles(light_absorption[plot],heights,color_string,label_string,figure_name)
OutFile = '/home/dmilodow/DataStore_DTM/BALI/LiDAR/src/output/BALI_subplot_lighttransmittance'
np.savez(OutFile+'.npz', **light_transmittance)
OutFile = '/home/dmilodow/DataStore_DTM/BALI/LiDAR/src/output/BALI_subplot_light_absorption'
np.savez(OutFile+'.npz', **light_absorption)
| gpl-3.0 |
joshbohde/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 9 | 1511 | """
=====================================
Blind source separation using FastICA
=====================================
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 2 instruments playing simultaneously and 2 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn.decomposition import FastICA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 10, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
S = np.c_[s1, s2]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1], [0.5, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA()
S_ = ica.fit(X).transform(X) # Get the estimated sources
A_ = ica.get_mixing_matrix() # Get estimated mixing matrix
assert np.allclose(X, np.dot(S_, A_.T))
###############################################################################
# Plot results
pl.figure()
pl.subplot(3, 1, 1)
pl.plot(S)
pl.title('True Sources')
pl.subplot(3, 1, 2)
pl.plot(X)
pl.title('Observations (mixed signal)')
pl.subplot(3, 1, 3)
pl.plot(S_)
pl.title('ICA estimated sources')
pl.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
pl.show()
| bsd-3-clause |
Djabbz/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 31 | 50760 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (alpha * (t + t0)) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
dkillick/cartopy | lib/cartopy/tests/mpl/test_crs.py | 2 | 1672 | # (C) British Crown Copyright 2013 - 2014, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from cartopy.tests.mpl import ImageTesting
@ImageTesting(['lambert_conformal_south'])
def test_lambert_south():
# Reference image: http://www.icsm.gov.au/mapping/map_projections.html
crs = ccrs.LambertConformal(central_longitude=140, cutoff=65,
secant_latitudes=(-30, -60))
ax = plt.axes(projection=crs)
ax.coastlines()
ax.gridlines()
@ImageTesting(['mercator_squashed'])
def test_mercator_squashed():
globe = ccrs.Globe(semimajor_axis=10000, semiminor_axis=9000,
ellipse=None)
crs = ccrs.Mercator(globe=globe, min_latitude=-40, max_latitude=40)
ax = plt.axes(projection=crs)
ax.coastlines()
ax.gridlines()
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| lgpl-3.0 |
Windy-Ground/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
giorgiop/scikit-learn | sklearn/linear_model/coordinate_descent.py | 13 | 81631 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import _preprocess_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..exceptions import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1], dtype=X.dtype))
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=np.float64,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations.' +
' Fitting data with very small alpha' +
' may cause precision problems.',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the penalty terms. Defaults to 1.0.
See the notes for the exact mathematical meaning of this
parameter.``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if isinstance(self.precompute, six.string_types):
raise ValueError('precompute should be one of True, False or'
' array-like. Got %r' % self.precompute)
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc',
order='F', dtype=[np.float64, np.float32],
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
y = check_array(y, order='F', copy=False, dtype=X.dtype.type,
ensure_2d=False)
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_offset, y_offset, X_scale)
# workaround since _set_intercept will cast self.coef_ into float64
self.coef_ = np.asarray(self.coef_, dtype=X.dtype)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted ``coef_`` """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matrices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = check_array(y, dtype=np.float64, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_offset, y_offset, X_scale)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
cjayb/mne-python | tutorials/preprocessing/plot_45_projectors_background.py | 9 | 22444 | # -*- coding: utf-8 -*-
"""
.. _tut-projectors-background:
Background on projectors and projections
========================================
This tutorial provides background information on projectors and Signal Space
Projection (SSP), and covers loading and saving projectors, adding and removing
projectors from Raw objects, the difference between "applied" and "unapplied"
projectors, and at what stages MNE-Python applies projectors automatically.
.. contents:: Page contents
:local:
:depth: 2
We'll start by importing the Python modules we need; we'll also define a short
function to make it easier to make several plots that look similar:
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
from scipy.linalg import svd
import mne
def setup_3d_axes():
ax = plt.axes(projection='3d')
ax.view_init(azim=-105, elev=20)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim(-1, 5)
ax.set_ylim(-1, 5)
ax.set_zlim(0, 5)
return ax
###############################################################################
# What is a projection?
# ^^^^^^^^^^^^^^^^^^^^^
#
# In the most basic terms, a *projection* is an operation that converts one set
# of points into another set of points, where repeating the projection
# operation on the resulting points has no effect. To give a simple geometric
# example, imagine the point :math:`(3, 2, 5)` in 3-dimensional space. A
# projection of that point onto the :math:`x, y` plane looks a lot like a
# shadow cast by that point if the sun were directly above it:
ax = setup_3d_axes()
# plot the vector (3, 2, 5)
origin = np.zeros((3, 1))
point = np.array([[3, 2, 5]]).T
vector = np.hstack([origin, point])
ax.plot(*vector, color='k')
ax.plot(*point, color='k', marker='o')
# project the vector onto the x,y plane and plot it
xy_projection_matrix = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0]])
projected_point = xy_projection_matrix @ point
projected_vector = xy_projection_matrix @ vector
ax.plot(*projected_vector, color='C0')
ax.plot(*projected_point, color='C0', marker='o')
# add dashed arrow showing projection
arrow_coords = np.concatenate([point, projected_point - point]).flatten()
ax.quiver3D(*arrow_coords, length=0.96, arrow_length_ratio=0.1, color='C1',
linewidth=1, linestyle='dashed')
###############################################################################
#
# .. note::
#
# The ``@`` symbol indicates matrix multiplication on NumPy arrays, and was
# introduced in Python 3.5 / NumPy 1.10. The notation ``plot(*point)`` uses
# Python `argument expansion`_ to "unpack" the elements of ``point`` into
# separate positional arguments to the function. In other words,
# ``plot(*point)`` expands to ``plot(3, 2, 5)``.
#
# Notice that we used matrix multiplication to compute the projection of our
# point :math:`(3, 2, 5)`onto the :math:`x, y` plane:
#
# .. math::
#
# \left[
# \begin{matrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0 \end{matrix}
# \right]
# \left[ \begin{matrix} 3 \\ 2 \\ 5 \end{matrix} \right] =
# \left[ \begin{matrix} 3 \\ 2 \\ 0 \end{matrix} \right]
#
# ...and that applying the projection again to the result just gives back the
# result again:
#
# .. math::
#
# \left[
# \begin{matrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0 \end{matrix}
# \right]
# \left[ \begin{matrix} 3 \\ 2 \\ 0 \end{matrix} \right] =
# \left[ \begin{matrix} 3 \\ 2 \\ 0 \end{matrix} \right]
#
# From an information perspective, this projection has taken the point
# :math:`x, y, z` and removed the information about how far in the :math:`z`
# direction our point was located; all we know now is its position in the
# :math:`x, y` plane. Moreover, applying our projection matrix to *any point*
# in :math:`x, y, z` space will reduce it to a corresponding point on the
# :math:`x, y` plane. The term for this is a *subspace*: the projection matrix
# projects points in the original space into a *subspace* of lower dimension
# than the original. The reason our subspace is the :math:`x,y` plane (instead
# of, say, the :math:`y,z` plane) is a direct result of the particular values
# in our projection matrix.
#
#
# Example: projection as noise reduction
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Another way to describe this "loss of information" or "projection into a
# subspace" is to say that projection reduces the rank (or "degrees of
# freedom") of the measurement — here, from 3 dimensions down to 2. On the
# other hand, if you know that measurement component in the :math:`z` direction
# is just noise due to your measurement method, and all you care about are the
# :math:`x` and :math:`y` components, then projecting your 3-dimensional
# measurement into the :math:`x, y` plane could be seen as a form of noise
# reduction.
#
# Of course, it would be very lucky indeed if all the measurement noise were
# concentrated in the :math:`z` direction; you could just discard the :math:`z`
# component without bothering to construct a projection matrix or do the matrix
# multiplication. Suppose instead that in order to take that measurement you
# had to pull a trigger on a measurement device, and the act of pulling the
# trigger causes the device to move a little. If you measure how
# trigger-pulling affects measurement device position, you could then "correct"
# your real measurements to "project out" the effect of the trigger pulling.
# Here we'll suppose that the average effect of the trigger is to move the
# measurement device by :math:`(3, -1, 1)`:
trigger_effect = np.array([[3, -1, 1]]).T
###############################################################################
# Knowing that, we can compute a plane that is orthogonal to the effect of the
# trigger (using the fact that a plane through the origin has equation
# :math:`Ax + By + Cz = 0` given a normal vector :math:`(A, B, C)`), and
# project our real measurements onto that plane.
# compute the plane orthogonal to trigger_effect
x, y = np.meshgrid(np.linspace(-1, 5, 61), np.linspace(-1, 5, 61))
A, B, C = trigger_effect
z = (-A * x - B * y) / C
# cut off the plane below z=0 (just to make the plot nicer)
mask = np.where(z >= 0)
x = x[mask]
y = y[mask]
z = z[mask]
###############################################################################
# Computing the projection matrix from the ``trigger_effect`` vector is done
# using `singular value decomposition <svd_>`_ (SVD); interested readers may
# consult the internet or a linear algebra textbook for details on this method.
# With the projection matrix in place, we can project our original vector
# :math:`(3, 2, 5)` to remove the effect of the trigger, and then plot it:
# sphinx_gallery_thumbnail_number = 2
# compute the projection matrix
U, S, V = svd(trigger_effect, full_matrices=False)
trigger_projection_matrix = np.eye(3) - U @ U.T
# project the vector onto the orthogonal plane
projected_point = trigger_projection_matrix @ point
projected_vector = trigger_projection_matrix @ vector
# plot the trigger effect and its orthogonal plane
ax = setup_3d_axes()
ax.plot_trisurf(x, y, z, color='C2', shade=False, alpha=0.25)
ax.quiver3D(*np.concatenate([origin, trigger_effect]).flatten(),
arrow_length_ratio=0.1, color='C2', alpha=0.5)
# plot the original vector
ax.plot(*vector, color='k')
ax.plot(*point, color='k', marker='o')
offset = np.full((3, 1), 0.1)
ax.text(*(point + offset).flat, '({}, {}, {})'.format(*point.flat), color='k')
# plot the projected vector
ax.plot(*projected_vector, color='C0')
ax.plot(*projected_point, color='C0', marker='o')
offset = np.full((3, 1), -0.2)
ax.text(*(projected_point + offset).flat,
'({}, {}, {})'.format(*np.round(projected_point.flat, 2)),
color='C0', horizontalalignment='right')
# add dashed arrow showing projection
arrow_coords = np.concatenate([point, projected_point - point]).flatten()
ax.quiver3D(*arrow_coords, length=0.96, arrow_length_ratio=0.1,
color='C1', linewidth=1, linestyle='dashed')
###############################################################################
# Just as before, the projection matrix will map *any point* in :math:`x, y, z`
# space onto that plane, and once a point has been projected onto that plane,
# applying the projection again will have no effect. For that reason, it should
# be clear that although the projected points vary in all three :math:`x`,
# :math:`y`, and :math:`z` directions, the set of projected points have only
# two *effective* dimensions (i.e., they are constrained to a plane).
#
# .. sidebar:: Terminology
#
# In MNE-Python, the matrix used to project a raw signal into a subspace is
# usually called a :term:`projector <projector>` or a *projection
# operator* — these terms are interchangeable with the term *projection
# matrix* used above.
#
# Projections of EEG or MEG signals work in very much the same way: the point
# :math:`x, y, z` corresponds to the value of each sensor at a single time
# point, and the projection matrix varies depending on what aspects of the
# signal (i.e., what kind of noise) you are trying to project out. The only
# real difference is that instead of a single 3-dimensional point :math:`(x, y,
# z)` you're dealing with a time series of :math:`N`-dimensional "points" (one
# at each sampling time), where :math:`N` is usually in the tens or hundreds
# (depending on how many sensors your EEG/MEG system has). Fortunately, because
# projection is a matrix operation, it can be done very quickly even on signals
# with hundreds of dimensions and tens of thousands of time points.
#
#
# .. _ssp-tutorial:
#
# Signal-space projection (SSP)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We mentioned above that the projection matrix will vary depending on what
# kind of noise you are trying to project away. Signal-space projection (SSP)
# :footcite:`UusitaloIlmoniemi1997` is a way of estimating what that projection
# matrix should be, by
# comparing measurements with and without the signal of interest. For example,
# you can take additional "empty room" measurements that record activity at the
# sensors when no subject is present. By looking at the spatial pattern of
# activity across MEG sensors in an empty room measurement, you can create one
# or more :math:`N`-dimensional vector(s) giving the "direction(s)" of
# environmental noise in sensor space (analogous to the vector for "effect of
# the trigger" in our example above). SSP is also often used for removing
# heartbeat and eye movement artifacts — in those cases, instead of empty room
# recordings the direction of the noise is estimated by detecting the
# artifacts, extracting epochs around them, and averaging. See
# :ref:`tut-artifact-ssp` for examples.
#
# Once you know the noise vectors, you can create a hyperplane that is
# orthogonal
# to them, and construct a projection matrix to project your experimental
# recordings onto that hyperplane. In that way, the component of your
# measurements associated with environmental noise can be removed. Again, it
# should be clear that the projection reduces the dimensionality of your data —
# you'll still have the same number of sensor signals, but they won't all be
# *linearly independent* — but typically there are tens or hundreds of sensors
# and the noise subspace that you are eliminating has only 3-5 dimensions, so
# the loss of degrees of freedom is usually not problematic.
#
#
# Projectors in MNE-Python
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# In our example data, :ref:`SSP <ssp-tutorial>` has already been performed
# using empty room recordings, but the :term:`projectors <projector>` are
# stored alongside the raw data and have not been *applied* yet (or,
# synonymously, the projectors are not *active* yet). Here we'll load
# the :ref:`sample data <sample-dataset>` and crop it to 60 seconds; you can
# see the projectors in the output of :func:`~mne.io.read_raw_fif` below:
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60).load_data()
###############################################################################
# In MNE-Python, the environmental noise vectors are computed using `principal
# component analysis <pca_>`_, usually abbreviated "PCA", which is why the SSP
# projectors usually have names like "PCA-v1". (Incidentally, since the process
# of performing PCA uses `singular value decomposition <svd_>`_ under the hood,
# it is also common to see phrases like "projectors were computed using SVD" in
# published papers.) The projectors are stored in the ``projs`` field of
# ``raw.info``:
print(raw.info['projs'])
###############################################################################
# ``raw.info['projs']`` is an ordinary Python :class:`list` of
# :class:`~mne.Projection` objects, so you can access individual projectors by
# indexing into it. The :class:`~mne.Projection` object itself is similar to a
# Python :class:`dict`, so you can use its ``.keys()`` method to see what
# fields it contains (normally you don't need to access its properties
# directly, but you can if necessary):
first_projector = raw.info['projs'][0]
print(first_projector)
print(first_projector.keys())
###############################################################################
# The :class:`~mne.io.Raw`, :class:`~mne.Epochs`, and :class:`~mne.Evoked`
# objects all have a boolean :attr:`~mne.io.Raw.proj` attribute that indicates
# whether there are any unapplied / inactive projectors stored in the object.
# In other words, the :attr:`~mne.io.Raw.proj` attribute is ``True`` if at
# least one :term:`projector` is present and all of them are active. In
# addition, each individual projector also has a boolean ``active`` field:
print(raw.proj)
print(first_projector['active'])
###############################################################################
# Computing projectors
# ~~~~~~~~~~~~~~~~~~~~
#
# In MNE-Python, SSP vectors can be computed using general purpose functions
# :func:`mne.compute_proj_raw`, :func:`mne.compute_proj_epochs`, and
# :func:`mne.compute_proj_evoked`. The general assumption these functions make
# is that the data passed contains raw data, epochs or averages of the artifact
# you want to repair via projection. In practice this typically involves
# continuous raw data of empty room recordings or averaged ECG or EOG
# artifacts. A second set of high-level convenience functions is provided to
# compute projection vectors for typical use cases. This includes
# :func:`mne.preprocessing.compute_proj_ecg` and
# :func:`mne.preprocessing.compute_proj_eog` for computing the ECG and EOG
# related artifact components, respectively; see :ref:`tut-artifact-ssp` for
# examples of these uses. For computing the EEG reference signal as a
# projector, the function :func:`mne.set_eeg_reference` can be used; see
# :ref:`tut-set-eeg-ref` for more information.
#
# .. warning:: It is best to compute projectors only on channels that will be
# used (e.g., excluding bad channels). This ensures that
# projection vectors will remain ortho-normalized and that they
# properly capture the activity of interest.
#
#
# Visualizing the effect of projectors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# You can see the effect the projectors are having on the measured signal by
# comparing plots with and without the projectors applied. By default,
# ``raw.plot()`` will apply the projectors in the background before plotting
# (without modifying the :class:`~mne.io.Raw` object); you can control this
# with the boolean ``proj`` parameter as shown below, or you can turn them on
# and off interactively with the projectors interface, accessed via the
# :kbd:`Proj` button in the lower right corner of the plot window. Here we'll
# look at just the magnetometers, and a 2-second sample from the beginning of
# the file.
mags = raw.copy().crop(tmax=2).pick_types(meg='mag')
for proj in (False, True):
fig = mags.plot(butterfly=True, proj=proj)
fig.subplots_adjust(top=0.9)
fig.suptitle('proj={}'.format(proj), size='xx-large', weight='bold')
###############################################################################
# Additional ways of visualizing projectors are covered in the tutorial
# :ref:`tut-artifact-ssp`.
#
#
# Loading and saving projectors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# SSP can be used for other types of signal cleaning besides just reduction of
# environmental noise. You probably noticed two large deflections in the
# magnetometer signals in the previous plot that were not removed by the
# empty-room projectors — those are artifacts of the subject's heartbeat. SSP
# can be used to remove those artifacts as well. The sample data includes
# projectors for heartbeat noise reduction that were saved in a separate file
# from the raw data, which can be loaded with the :func:`mne.read_proj`
# function:
ecg_proj_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_ecg-proj.fif')
ecg_projs = mne.read_proj(ecg_proj_file)
print(ecg_projs)
###############################################################################
# There is a corresponding :func:`mne.write_proj` function that can be used to
# save projectors to disk in ``.fif`` format:
#
# .. code-block:: python3
#
# mne.write_proj('heartbeat-proj.fif', ecg_projs)
#
# .. note::
#
# By convention, MNE-Python expects projectors to be saved with a filename
# ending in ``-proj.fif`` (or ``-proj.fif.gz``), and will issue a warning
# if you forgo this recommendation.
#
#
# Adding and removing projectors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Above, when we printed the ``ecg_projs`` list that we loaded from a file, it
# showed two projectors for gradiometers (the first two, marked "planar"), two
# for magnetometers (the middle two, marked "axial"), and two for EEG sensors
# (the last two, marked "eeg"). We can add them to the :class:`~mne.io.Raw`
# object using the :meth:`~mne.io.Raw.add_proj` method:
raw.add_proj(ecg_projs)
###############################################################################
# To remove projectors, there is a corresponding method
# :meth:`~mne.io.Raw.del_proj` that will remove projectors based on their index
# within the ``raw.info['projs']`` list. For the special case of replacing the
# existing projectors with new ones, use
# ``raw.add_proj(ecg_projs, remove_existing=True)``.
#
# To see how the ECG projectors affect the measured signal, we can once again
# plot the data with and without the projectors applied (though remember that
# the :meth:`~mne.io.Raw.plot` method only *temporarily* applies the projectors
# for visualization, and does not permanently change the underlying data).
# We'll compare the ``mags`` variable we created above, which had only the
# empty room SSP projectors, to the data with both empty room and ECG
# projectors:
mags_ecg = raw.copy().crop(tmax=2).pick_types(meg='mag')
for data, title in zip([mags, mags_ecg], ['Without', 'With']):
fig = data.plot(butterfly=True, proj=True)
fig.subplots_adjust(top=0.9)
fig.suptitle('{} ECG projector'.format(title), size='xx-large',
weight='bold')
###############################################################################
# When are projectors "applied"?
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# By default, projectors are applied when creating :class:`epoched
# <mne.Epochs>` data from :class:`~mne.io.Raw` data, though application of the
# projectors can be *delayed* by passing ``proj=False`` to the
# :class:`~mne.Epochs` constructor. However, even when projectors have not been
# applied, the :meth:`mne.Epochs.get_data` method will return data *as if the
# projectors had been applied* (though the :class:`~mne.Epochs` object will be
# unchanged). Additionally, projectors cannot be applied if the data are not
# :ref:`preloaded <memory>`. If the data are `memory-mapped`_ (i.e., not
# preloaded), you can check the ``_projector`` attribute to see whether any
# projectors will be applied once the data is loaded in memory.
#
# Finally, when performing inverse imaging (i.e., with
# :func:`mne.minimum_norm.apply_inverse`), the projectors will be
# automatically applied. It is also possible to apply projectors manually when
# working with :class:`~mne.io.Raw`, :class:`~mne.Epochs` or
# :class:`~mne.Evoked` objects via the object's :meth:`~mne.io.Raw.apply_proj`
# method. For all instance types, you can always copy the contents of
# :samp:`{<instance>}.info['projs']` into a separate :class:`list` variable,
# use :samp:`{<instance>}.del_proj({<index of proj(s) to remove>})` to remove
# one or more projectors, and then add them back later with
# :samp:`{<instance>}.add_proj({<list containing projs>})` if desired.
#
# .. warning::
#
# Remember that once a projector is applied, it can't be un-applied, so
# during interactive / exploratory analysis it's a good idea to use the
# object's :meth:`~mne.io.Raw.copy` method before applying projectors.
#
#
# Best practices
# ~~~~~~~~~~~~~~
#
# In general, it is recommended to apply projectors when creating
# :class:`~mne.Epochs` from :class:`~mne.io.Raw` data. There are two reasons
# for this recommendation:
#
# 1. It is computationally cheaper to apply projectors to data *after* the
# data have been reducted to just the segments of interest (the epochs)
#
# 2. If you are applying amplitude-based rejection criteria to epochs, it is
# preferable to reject based on the signal *after* projectors have been
# applied, because the projectors may reduce noise in some epochs to
# tolerable levels (thereby increasing the number of acceptable epochs and
# consequenty increasing statistical power in any later analyses).
#
#
# References
# ^^^^^^^^^^
#
# .. footbibliography::
#
#
# .. LINKS
#
# .. _`argument expansion`:
# https://docs.python.org/3/tutorial/controlflow.html#tut-unpacking-arguments
# .. _`pca`: https://en.wikipedia.org/wiki/Principal_component_analysis
# .. _`svd`: https://en.wikipedia.org/wiki/Singular_value_decomposition
# .. _`memory-mapped`: https://en.wikipedia.org/wiki/Memory-mapped_file
| bsd-3-clause |
soravux/deap | examples/coev/coop_adapt.py | 12 | 5084 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""This example contains the adaptation test from *Potter, M. and De Jong, K.,
2001, Cooperative Coevolution: An Architecture for Evolving Co-adapted
Subcomponents.* section 4.2.3. A species is added each 100 generations.
"""
import random
try:
import matplotlib.pyplot as plt
except ImportError:
plt = False
import numpy
from deap import algorithms
from deap import tools
import coop_base
IND_SIZE = coop_base.IND_SIZE
SPECIES_SIZE = coop_base.SPECIES_SIZE
TARGET_SIZE = 30
NUM_SPECIES = 1
noise = "*##*###*###*****##*##****#*##*###*#****##******##*#**#*#**######"
schematas = ("1##1###1###11111##1##1111#1##1###1#1111##111111##1#11#1#11######",
"1##1###1###11111##1##1000#0##0###0#0000##000000##0#00#0#00######",
"0##0###0###00000##0##0000#0##0###0#0000##001111##1#11#1#11######")
toolbox = coop_base.toolbox
if plt:
toolbox.register("evaluate_nonoise", coop_base.matchSetStrengthNoNoise)
def main(extended=True, verbose=True):
target_set = []
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "species", "evals", "std", "min", "avg", "max"
ngen = 300
adapt_length = 100
g = 0
add_next = [adapt_length]
for i in range(len(schematas)):
target_set.extend(toolbox.target_set(schematas[i], int(TARGET_SIZE/len(schematas))))
species = [toolbox.species() for _ in range(NUM_SPECIES)]
# Init with random a representative for each species
representatives = [random.choice(s) for s in species]
if plt and extended:
# We must save the match strength to plot them
t1, t2, t3 = list(), list(), list()
while g < ngen:
# Initialize a container for the next generation representatives
next_repr = [None] * len(species)
for i, s in enumerate(species):
# Vary the species individuals
s = algorithms.varAnd(s, toolbox, 0.6, 1.0)
r = representatives[:i] + representatives[i+1:]
for ind in s:
ind.fitness.values = toolbox.evaluate([ind] + r, target_set)
record = stats.compile(s)
logbook.record(gen=g, species=i, evals=len(s), **record)
if verbose:
print(logbook.stream)
# Select the individuals
species[i] = toolbox.select(s, len(s)) # Tournament selection
next_repr[i] = toolbox.get_best(s)[0] # Best selection
g += 1
if plt and extended:
# Compute the match strength without noise for the
# representatives on the three schematas
t1.append(toolbox.evaluate_nonoise(representatives,
toolbox.target_set(schematas[0], 1), noise)[0])
t2.append(toolbox.evaluate_nonoise(representatives,
toolbox.target_set(schematas[1], 1), noise)[0])
t3.append(toolbox.evaluate_nonoise(representatives,
toolbox.target_set(schematas[2], 1), noise)[0])
representatives = next_repr
# Add a species at every *adapt_length* generation
if add_next[-1] <= g < ngen:
species.append(toolbox.species())
representatives.append(random.choice(species[-1]))
add_next.append(add_next[-1] + adapt_length)
if extended:
for r in representatives:
# print individuals without noise
print("".join(str(x) for x, y in zip(r, noise) if y == "*"))
if plt and extended:
# Do the final plotting
plt.plot(t1, '-', color="k", label="Target 1")
plt.plot(t2, '--', color="k", label="Target 2")
plt.plot(t3, ':', color="k", label="Target 3")
max_t = max(max(t1), max(t2), max(t3))
for n in add_next:
plt.plot([n, n], [0, max_t + 1], "--", color="k")
plt.legend(loc="lower right")
plt.axis([0, ngen, 0, max_t + 1])
plt.xlabel("Generations")
plt.ylabel("Number of matched bits")
plt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
czbiohub/singlecell-dash | singlecell_dash/apps/base.py | 1 | 1217 | """Building blocks for writing modular callbacks for plotly"""
import dash_html_components as html
import pandas as pd
CONFIG_DICT = {'modeBarButtonsToRemove': ['sendDataToCloud',
'pan2d',
'zoomIn2d',
'zoomOut2d',
'autoScale2d',
'resetScale2d',
'hoverCompareCartesian',
'hoverClosestCartesian',
'toggleSpikelines'],
'displaylogo': False
}
class BaseBlock:
config_dict = CONFIG_DICT.copy()
def __init__(self, app=None):
self.app = app
if self.app is not None and hasattr(self, 'callbacks'):
self.callbacks(self.app)
@property
def layout(self):
return html.Div()
@staticmethod
def _values_hovertext(series):
ids = series.index
values = series.values
strings = ['{}: {:.1f}'.format(i, v) for i, v in zip(ids, values)]
return pd.Series(strings, index=ids) | mit |
draperjames/bokeh | tests/compat/lc_offsets.py | 13 | 1127 | from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
# Simulate a series of ocean current profiles, successively
# offset by 0.1 m/s so that they form what is sometimes called
# a "waterfall" plot or a "stagger" plot.
nverts = 60
ncurves = 20
offs = (0.1, 0.0)
rs = np.random.RandomState([12345678])
yy = np.linspace(0, 2 * np.pi, nverts)
ym = np.amax(yy)
xx = (0.2 + (ym - yy) / ym) ** 2 * np.cos(yy - 0.4) * 0.5
segs = []
for i in range(ncurves):
xxx = xx + 0.02 * rs.randn(nverts)
curve = list(zip(xxx, yy * 100))
segs.append(curve)
colors = [(1.0, 0.0, 0.0, 1.0), (0.0, 0.5, 0.0, 1.0), (0.0, 0.0, 1.0, 1.0),
(0.0, 0.75, 0.75, 1.0), (0.75, 0.75, 0, 1.0), (0.75, 0, 0.75, 1.0),
(0.0, 0.0, 0.0, 1.0)]
col = LineCollection(segs, linewidth=5, offsets=offs)
ax = plt.axes()
ax.add_collection(col, autolim=True)
col.set_color(colors)
ax.set_title('Successive data offsets')
fig = plt.gcf()
output_file("lc_offsets.html", title="lc_offsets.py example")
show(mpl.to_bokeh())
| bsd-3-clause |
swharden/SWHLab | doc/uses/spectral-noise/02.py | 1 | 1155 | """Same as 01.py, but reports speed"""
import os
import sys
if not os.path.abspath('../../../') in sys.path:
sys.path.append('../../../')
import swhlab
import matplotlib.pyplot as plt
import numpy as np
import time
if __name__=="__main__":
abfFile=R"X:\Data\DIC1\2013\08-2013\08-16-2013-DP\13816004.abf"
abf=swhlab.ABF(abfFile) # defaults to sweep 0
print("analyzing %d sweeps (%.02f sec each)"%(abf.sweeps,abf.sweepLength))
times=[]
for sweep in abf.setsweeps():
t1=time.clock()
baseFrequency=60 # frequency (Hz) to silence
FFT=np.fft.fft(abf.sweepY) # frequency data (i/j vectors starting at 0Hz)
for i in range(50): # first 50 odd harmonics
I=int(baseFrequency*i+baseFrequency*len(abf.sweepY)/abf.pointsPerSec)
FFT[I],FFT[-I]=0,0 # remember to silence from both ends of the FFT
Ys2=np.fft.ifft(FFT) # all done
times.append(time.clock()-t1)
times=np.array(times)*1000 # now in ms
print("analysis took %.02f +/- %.02f ms per sweep"%(np.average(times),np.std(times)))
# analyzing 60 sweeps (5.00 sec each)
# analysis took 6.47 +/- 1.71 ms per sweep | mit |
panaviatornado/hfhom | corrterm/weighted_graph.py | 2 | 22026 | # Caltech SURF 2013
# FILE: weighted_graph.py
# 01.05.14
'''
Plumbed 3-manifolds represented as negative-definite weighted forests with at
most 2 bad vertices
Uses networkx to draw graphs and get quadratic form
'''
# It is recommended you do NOT edit the exported (saved) files by hand.
# Skipping a node number (e.g. having N3 but not N2) will raise an error.
# Known limitations
# cannot handle missing nodes
# can only delete last node
from Tkinter import *
import tkFileDialog, tkMessageBox
import networkx as nx
import matplotlib.pyplot as plt
import traceback
from gui_output import OutputWindow
import numpy
from graph_quad import symmetric, is_negative_definite
from ndqf import NDQF
class GraphPopup(Frame):
'''
Graph controls window.
If used in non-GUI mode, can only load graphs. (cannot create/edit/delete)
Saving will open a window. (really meant to be used in GUI mode; non-GUI
mode is for running tests)
'''
def __init__(self, master, graph=None, condense=False, show_hom=True,
show_quad=False, show_weighted=False,use_multi=False,gui=True):
self.condense = condense # variable
self.show_hom = show_hom # variable
self.show_quad = show_quad # variable
self.show_weighted = show_weighted # variable
self.use_multi = use_multi
self.info = 'unknown' # inputinfo for the output window
self.nodes = []
if graph:
self.graph = graph # networkx graph nx.Graph()
# Nodes are named N0, N1, N2,...
# Nodes have int attr. 'weight', 'parent' (index)
else: # None
self.graph = nx.Graph()
self.num_nodes = 0
self.gui = gui
if not gui:
return
self.master = master
self.top = Toplevel(master)
self.top.title('Graph controls')
self.frame = self.top
# New node commands
Label(self.frame, text='New node').grid(row=1, column=0)
Label(self.frame, text='Parent').grid(row=0, column=1)
self.n_parent_var = IntVar()
self.n_parent_var.set(-1) # initial value
self.n_parent_opt = [-1]
self.n_parentmenu = OptionMenu(self.frame, self.n_parent_var,
*self.n_parent_opt)
self.n_parentmenu.grid(row=1, column=1)
Label(self.frame, text='Weight').grid(row=0, column=2)
self.n_weight = Entry(self.frame, width=4)
self.n_weight.grid(row=1, column=2)
Button(self.frame, text='Create', command=self.create_node).grid(row=1,
column=3)
separator = Frame(self.frame, height=2, bd=1, relief=SUNKEN)
separator.grid(row=2, sticky='we', padx=5, pady=5, columnspan=5)
# Edit node commands
Label(self.frame, text='Edit node').grid(row=4, column=0)
Label(self.frame, text='Node #').grid(row=3, column=1)
Label(self.frame, text='New parent').grid(row=3, column=2)
Label(self.frame, text='New weight').grid(row=3, column=3)
self.e_node_var = IntVar()
self.e_parent_var = IntVar()
self.e_node_var.set(0) # initial value
#self.e_parent_var.set(-1) # initial value
# Select which node to edit
self.e_node_opt = [0,]
self.e_nodemenu = OptionMenu(self.frame, self.e_node_var,
*self.e_node_opt)
self.e_nodemenu.grid(row=4, column=1)
# Select a new parent
self.e_parent_opt = ['same',-1,0]
self.e_parentmenu = OptionMenu(self.frame, self.e_parent_var,
*self.e_parent_opt)
self.e_parentmenu.grid(row=4, column=2)
self.e_weight = Entry(self.frame, width=4)
self.e_weight.grid(row=4, column=3)
Button(self.frame, text='Done', command=self.edit_node).grid(row=4,
column=4)
separator = Frame(self.frame, height=2, bd=1, relief=SUNKEN)
separator.grid(row=5, sticky='we', padx=5, pady=5, columnspan=5)
# Delete last node
Label(self.frame, text='Delete last node').grid(row=6, column=0)
self.del_node = Label(self.frame, text='Node #%i'%self.num_nodes)
self.del_node.grid(row=6, column=1)
Button(self.frame, text='Delete', command=self.delete_node).grid(row=6,
column=4)
separator = Frame(self.frame, height=2, bd=1, relief=SUNKEN)
separator.grid(row=7, sticky='we', padx=5, pady=5, columnspan=5)
# File buttons
Button(self.frame, text='Draw graph', command=self.update_graph).grid(\
row=9, column=0)
Button(self.frame, text='Save as', command=self.save).grid(row=9,
column=1)
Button(self.frame, text='Load', command=self.load).grid(row=9, column=2)
Button(self.frame, text='Done/compute', command=self.close).grid(row=9,
column=3)
Button(self.frame, text='Cancel', command=self.cancel).grid(row=9,
column=4)
#self.update_graph() # show matplotlib drawing screen
def update_graph(self):
'''Redraw graph in matplotlib.pyplot.'''
plt.clf() # erase figure
labels = dict((n, '%s,%s' %(n,a['weight'])) \
for n,a in self.graph.nodes(data=True))
nx.draw_graphviz(self.graph, labels=labels, node_size=700, width=3,
alpha=0.7)
plt.show()
def missing_nodes(self):
'''
Return list of indices of missing nodes, using highest numbered node as
the total number of nodes.
By 'missing a node', mean that node numbering skips from, eg. N1 to N3,
with N2 missing.
Raises error if any node indices aren't integers or are negative ints.
'''
nodes = self.graph.nodes(data=False)
num_nodes = 0
indices = []
for node in nodes:
indices.append(int(node[1:])) # node is Nk, node[1:] gives int k
assert int(node[1:]) >= 0
if int(node[1:]) > num_nodes:
num_nodes = int(node[1:])
return set(range(num_nodes)).difference(set(indices))
def create_node(self):
'''
Create a new node from the "Create New" options.
A parent index of -1 means it is a root node (no parent).
Only works when used with the GUI.
'''
if not self.gui:
print 'Cannot create nodes in non-GUI mode'
return
try:
n_weight = int(self.n_weight.get())
except:
tkMessageBox.showerror('Weight', 'No data for weight or not an int')
raise ValueError('no data for weight or not an int')
new_name = 'N%i' % self.num_nodes
parent_index = self.n_parent_var.get()
self.graph.add_node(new_name, parent=parent_index, weight=n_weight)
self.nodes.append(new_name)
if parent_index != -1: # not a root node
# Create edge to parent
parent_node = self.nodes[parent_index]
self.graph.add_edge(parent_node, new_name)
# update New node dropdown menu (add node just created)
self.n_parent_opt.append(self.num_nodes)
self.n_parentmenu.destroy()
self.n_parentmenu = OptionMenu(self.frame, self.n_parent_var,
*self.n_parent_opt)
self.n_parent_var.set(self.num_nodes) # default parent is newest node
self.n_parentmenu.grid(row=1, column=1)
# update Edit dropdown menu
if self.num_nodes != 0:
self.e_node_opt.append(self.num_nodes)
self.e_nodemenu.destroy()
self.e_nodemenu = OptionMenu(self.frame, self.e_node_var,
*self.e_node_opt)
self.e_nodemenu.grid(row=4, column=1)
self.e_parent_opt.append(self.num_nodes)
self.e_parentmenu.destroy()
self.e_parentmenu = OptionMenu(self.frame, self.e_parent_var,
*self.e_parent_opt)
self.e_parentmenu.grid(row=4, column=2)
self.num_nodes += 1
self.update_graph()
# update Delete menu
self.del_node.destroy()
self.del_node = Label(self.frame, text='Node #%i'%(self.num_nodes-1))
self.del_node.grid(row=6, column=1)
def edit_node(self):
'''Edit node according to "Edit node" options.'''
if not self.gui:
print 'Cannot edit nodes in non-GUI mode'
return
if self.num_nodes == 0:
tkMessageBox.showerror('No nodes',
'No nodes to edit. You must create a node.')
raise ValueError('no nodes to edit')
node = self.e_node_var.get()
old_parent = self.graph.node[self.nodes[node]]['parent']
# Edit parent node
try:
e_parent = self.e_parent_var.get() # may not be an int ('same')
# make sure don't choose self as new parent (self loop)
if node == e_parent:
tkMessageBox.showwarning('Parent',
'Cannot choose self as parent')
return
# update parent attribute
self.graph.node[self.nodes[node]]['parent'] = e_parent
# update edges
if old_parent != -1: # wasn't a root node
self.graph.remove_edge(self.nodes[node], self.nodes[old_parent])
print 'Removing edge from %s to %s' %(self.nodes[node],
self.nodes[old_parent])
if e_parent != -1: # not making node a root node
self.graph.add_edge(self.nodes[node], self.nodes[e_parent])
print 'Adding edge from %s to %s' %(self.nodes[node],
self.nodes[e_parent])
else:
print 'Node %s is now a root node' % self.nodes[node]
except ValueError:
print 'Not changing parent' # chose string 'same' => ignore
# Edit weight
try:
weight = self.e_weight.get()
if weight != '':
self.graph.node['N%i'%node]['weight'] = int(weight)
except ValueError:
tkMessageBox.showerror('Invalid weight', 'Invalid weight.')
raise ValueError('Invalid weight')
self.update_graph() # redraw graph
def delete_node(self):
'''Delete the last node.'''
if not self.gui:
print 'Cannot delete nodes in non-GUI mode'
return
if self.num_nodes == 0:
tkMessageBox.showwarning('No nodes', 'Nothing to delete')
return
self.graph.remove_node('N%i'%(self.num_nodes-1))
print 'Deleted node %i'%(self.num_nodes-1)
# update Create menu
self.n_parent_opt.pop()
self.n_parentmenu.destroy()
self.n_parentmenu = OptionMenu(self.frame, self.n_parent_var,
*self.n_parent_opt)
self.n_parent_var.set(self.num_nodes-2) # default parent is newest node
self.n_parentmenu.grid(row=1, column=1)
# update Edit menu
self.e_node_opt.pop()
self.e_nodemenu.destroy()
self.e_nodemenu = OptionMenu(self.frame, self.e_node_var,
*self.e_node_opt)
self.e_nodemenu.grid(row=4, column=1)
self.e_parent_opt.pop()
self.e_parentmenu.destroy()
self.e_parentmenu = OptionMenu(self.frame, self.e_parent_var,
*self.e_parent_opt)
self.e_parentmenu.grid(row=4, column=2)
# update Delete menu
self.del_node.destroy()
self.del_node = Label(self.frame, text='Node #%i'%(self.num_nodes-2))
self.del_node.grid(row=6, column=1)
self.num_nodes -= 1
self.nodes.pop()
self.update_graph()
def save(self):
'''Save to file as adjacency list and node data.'''
options = {}
options['defaultextension'] = '.txt'
options['filetypes'] = [('all files', '.*'), ('text files', '.txt')]
filename = tkFileDialog.asksaveasfilename(**options)
if filename:
adjfile = open(filename, 'wb')
nx.write_adjlist(self.graph, adjfile)
adjfile.write('\nDATA\n')
adjfile.write(str(self.graph.nodes(data=True)))
adjfile.close()
print 'Graph data saved to %s' % filename
self.info = filename
def load(self, filename=None):
'''
Load adjacency list and data from file. This will remove the graph
created by the editor in the current session.
Lines at the beginning starting with # are ignored.
The adjacency list for the matrix is first.
The line following DATA that contains a list of all the nodes with
attributes must be on a single line (no line breaks).
It should be the output of graph.nodes(data=True).
Lines after this line are ignored.
Example file:
#weighted_graph.py
# GMT Fri Aug 16 22:24:35 2013
#
N0 N1 N2 N3
N1
N2
N3
DATA
[('N0', {'weight': -3, 'parent': -1}), ('N1', {'weight': -3, 'parent':
0}), ('N2', {'weight': -3, 'parent': 0}), ('N3', {'weight': -3,
'parent': 0})]
'''
if not filename:
# open file options
options = {}
options['defaultextension'] = '.txt'
options['filetypes'] = [('all files', '.*'), ('text files', '.txt')]
filename = tkFileDialog.askopenfilename(**options)
if filename == '': # canceled
return
# attempt to open file
try:
adjfile = open(filename, 'r')
self.info = filename
except:
if self.gui:
tkMessageBox.showwarning('Open file',
'Cannot open file %s.' %filename \
+'Aborting operation; please try again.')
raise IOError('failed to open file')
# get the adjacency list
adjlist = [] # list of strings
try:
while 1:
line = adjfile.readline()
if not line: # 'DATA' line should come before EOF
raise IOError('failed to load - possibly no graph data')
if line[:4] == 'DATA' or line[:4] == 'data':
break
if line[0] == '#' or line == '' or line == '\n': # skip
continue
adjlist.append(line[:-1]) # don't want \n char
self.graph = nx.parse_adjlist(adjlist) # got graph
self.num_nodes = len(self.graph.nodes(data=False))
if not is_forest(self.graph):
raise ValueError('not a forest (disjoint union of trees)')
missing_nodes = self.missing_nodes()
if missing_nodes != set([]):
raise ValueError('missing node(s)')
# get the node data- must be preceded by a line starting with 'DATA'
while 1:
node_data = adjfile.readline()
if not node_data: # EOF
raise IOError('no data')
if node_data != '' and node_data != '\n':
break
# use eval convert node_data (string) to list; make sure is a list
if node_data.strip()[0] != '[' or node_data.strip()[-1] != ']':
raise ValueError('cannot parse data')
# add node attributes (weight, parent)
for node in eval(node_data):
for attr in node[1].keys():
self.graph.node[node[0]][attr] = node[1][attr]
# node[1] is a dict of attr
# update self.nodes (keep in order)
self.nodes = []
for index in range(self.num_nodes):
self.nodes.append('N%i' % index)
except Exception as error:
if self.gui:
tkMessageBox.showwarning('Loading', 'Loading failed - %s%s\n%s'\
%(type(error),filename, traceback.format_exc()))
print traceback.print_exc()
return
# update graph control options
if self.gui:
self.n_parent_opt = range(-1, self.num_nodes)
self.n_parentmenu = OptionMenu(self.frame, self.n_parent_var,
*self.n_parent_opt)
self.n_parentmenu.grid(row=1, column=1)
self.e_node_opt = range(self.num_nodes)
self.e_nodemenu = OptionMenu(self.frame, self.e_node_var,
*self.e_node_opt)
self.e_nodemenu.grid(row=4, column=1)
self.e_parent_opt = ['same']
self.e_parent_opt.extend(range(-1, self.num_nodes))
self.e_parentmenu = OptionMenu(self.frame, self.e_parent_var,
*self.e_parent_opt)
self.e_parentmenu.grid(row=4, column=2)
self.del_node.destroy()
self.del_node = Label(self.frame,text='Node #%i'%(self.num_nodes-1))
self.del_node.grid(row=6, column=1)
self.update_graph()
print 'Graph data successfully loaded from %s' % filename
def close(self):
'''Close and output correction terms.'''
if not self.gui:
print 'Nothing to close'
return
if self.num_nodes == 0:
tkMessageBox.showerror('No nodes',
'No graph drawn. Closing editor.')
self.top.destroy()
return
self.save()
quad = g_quad(self.graph, self.nodes)
quadform = NDQF(quad)
corr = quadform.correction_terms(self.use_multi.get())
struct = quadform.group.struct()
self.top.destroy()
#self.master.quit()
if not self.show_weighted.get():
plt.close('all')
OutputWindow(self.master, corr, struct, quad, self.info,
condense=self.condense.get(),
showquad=self.show_quad.get(),
showhom=self.show_hom.get())
def cancel(self):
'''Exit program without computing correction terms.'''
if not self.gui:
print 'Nothing to close'
return
print 'Quitting...'
plt.close('all')
self.top.destroy()
#self.master.quit()
def is_forest(graph):
'''
Return True if graph is a forest (disjoint union of trees), otherwise
False.
A graph is a forest iff #nodes = #edges + #components.
e.g. see http://www-math.mit.edu/~sassaf/courses/314/solp2.pdf
'''
if len(graph.nodes(data=False)) == graph.number_of_edges() + \
nx.number_connected_components(graph):
return True
return False
def num_bad_vertices(graph, node_list):
'''
Return the number of bad vertices in networkx graph 'graph'.
Bad vertices are vertices such that -weight < degree.
'node_list' is a list of the names of all the nodes to check.
'''
num = 0
for node in node_list:
assert type(graph.node[node]['weight']) is int
assert type(graph.degree(node)) is int
if graph.node[node]['weight'] > -graph.degree(node):
num += 1
return num
def g_quad(graph, node_list, gui=True):
'''
Return quadratic form (numpy array) of networkx graph 'graph', ordered
according to the node names in 'node_list'.
Q(v,v) = weight(v)
Q(v,w) = 1 if v, w are connected by an edge; 0 otherwise
Thus Q is the adjacency matrix everywhere except the diagonal, and just
the weights down the diagonal.
'''
assert len(node_list) == len(graph.nodes(data=False))
assert is_forest(graph)
num_bad = num_bad_vertices(graph, node_list)
if num_bad > 2:
if gui:
tkMessageBox.showwarning('Bad vertices',
'More than two bad vertices. (There are %i.)'%num_bad)
raise ValueError('More than two bad vertices. (There are %i.)'%num_bad)
# create adjacency matrix, ordered according to node_list
adj = nx.to_numpy_matrix(graph, nodelist=node_list, dtype=numpy.int)
for index, node in enumerate(node_list): # change diagonal
adj[index, index] = graph.node[node]['weight']
if not is_negative_definite(adj):
if gui:
tkMessageBox.showwarning('Quadratic form',
'Quadratic form is not negative definite')
print adj
raise ValueError('quadratric form is not negative definite')
return adj
if __name__ == '__main__':
# this file really intended to be used with gui.py, not by itself
print 'Do NOT use Done/Compute. Use Cancel instead.'
print 'Then close the root tk window.'
G = nx.Graph()
root = Tk()
#root.withdraw()
app = GraphPopup(root, G)
root.wait_window()
print g_quad(app.graph, app.nodes) | gpl-2.0 |
jm-begon/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
MartinezLopez/icue | src/gui/eyeDisplay.py | 1 | 9633 | #!/usr/bin/python
#-*-coding: utf-8-*-
#
# eyeDisplay.py
#
# Author: Miguel Angel Martinez Lopez <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
from PyQt4 import QtGui, QtCore
import numpy as np
import time
import math
import pylab
from scipy.special import erfc
import logging
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
import matplotlib.pyplot as plt
from matplotlib.ticker import EngFormatter, MultipleLocator
from matplotlib.widgets import Cursor
from src.engine import oscilloscope
from src.engine import pin
#from src.engine import modbus
from src.util import resources
class EyeDisplay(QtGui.QWidget):
def __init__(self, ch, rate, length):
super(EyeDisplay, self).__init__()
self.osc = oscilloscope.Oscilloscope.Instance()
self.gen = pin.Pins.Instance()
#self.gen = modbus...
self.timer_osc = QtCore.QTimer()
self.timer_draw = QtCore.QTimer()
logging.basicConfig(level=logging.DEBUG)
self.setWindowTitle(self.tr('Eye diagramm from ch %s' % (ch,)))
self.setWindowIcon(QtGui.QIcon(resources.getPath('icono.gif')))
self.setFixedSize(900,700)
# Measurements available for whole class
self.measure_list = []
self.inc_t = 0
self.configure(ch, rate, length)
self.initUi()
QtCore.QObject.connect(self.timer_draw, QtCore.SIGNAL("timeout()"), self.refresh_data)
QtCore.QObject.connect(self.timer_osc, QtCore.SIGNAL("timeout()"), lambda chan=ch: self.acquire(chan))
self.timer_osc.start(700)
self.timer_draw.start(5000)
def initUi(self):
self.figure = plt.figure(1)
self.canvas = FigureCanvas(self.figure)
self.canvas.setParent(self)
self.ax1 = plt.subplot2grid((2,2),(0,0), colspan=2) #Eye diagramm
self.ax2 = plt.subplot2grid((2,2),(1,0)) #histogram
self.ax3 = plt.subplot2grid((2,2),(1,1)) #erfc
plt.subplots_adjust(left=0.15, right=0.85, bottom=0.1, top=0.9, hspace=0.25)
# Creation of units shown on plots
formatter_time = EngFormatter(unit='s', places=1)
formatter_amp = EngFormatter(unit='v', places=1)
self.ax1.set_xlabel(self.tr('time'))
self.ax1.set_ylabel(self.tr('amplitude'))
self.ax1.xaxis.set_major_formatter(formatter_time)
self.ax1.yaxis.set_major_formatter(formatter_amp)
#self.ax1.xaxis.set_minor_locator(MultipleLocator(self.inc_tiempo_t1 * 25))
self.ax1.yaxis.set_minor_locator(MultipleLocator(0.5))
# Plotting erfc
x_axis = np.arange(0, 10, 0.5)
self.ax3.semilogy(x_axis, 0.5*erfc(x_axis/math.sqrt(2)), color='#08088a')
logging.debug('se crea el eje semilogaritmico')
self.ax3.set_xlabel('q')
self.ax3.set_ylabel('BER')
# Creation of horizontal and vertical bars
self.barSample = self.ax1.axvline(linewidth=3, x=0, color='blue')
self.barThreshold = self.ax1.axhline(linewidth=3, y=0, color='green')
self.barThreshold2 = self.ax2.axvline(x=0, color='green')
self.bar_q = self.ax3.axvline(x=10, color='blue', linestyle='--') # Different from zero to avoiding problems at log calculation
self.bar_ber = self.ax3.axhline(y=10, color='blue', linestyle='--')
# Creation of font
font = QtGui.QFont()
font.setFamily(QtCore.QString.fromUtf8("Helvetica"))
font.setPixelSize(17)
# Creation of lable to show values of q and BER
self.results_label = QtGui.QLabel(self)
self.results_label.setFont(font)
# Matplotlib toolbar
self.mpl_toolbar = NavigationToolbar(self.canvas, self)
# Stop acquisition button
but_stop = QtGui.QPushButton(self.tr('Stop acquiring'), self)
but_stop.clicked.connect(self.stopAdq)
hbox = QtGui.QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(but_stop)
# Adding to layout
p1 = QtGui.QVBoxLayout()
p1.addWidget(self.canvas)
p1.addWidget(self.mpl_toolbar)
p1.addWidget(self.results_label)
p1.addLayout(hbox)
self.setLayout(p1)
# Interruption on mouse event
self.cid = self.figure.canvas.mpl_connect('button_press_event', self.on_press)
def refresh_data(self):
self.ax1.hold(True)
[self.ax1.plot(self.time_list, self.measure_list[i], '#0b610b') for i in xrange(len(self.measure_list))]
self.ax1.hold(False)
self.figure.canvas.draw()
def on_press(self, event):
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
QtCore.QCoreApplication.processEvents()
sampleValue = event.xdata
#valThreshold = event.ydata # It has some problems with q calculation, but at some point it might be useful
self.amp_range = self.ax1.yaxis.get_data_interval()
if (sampleValue < 0) or (sampleValue > self.time_list[len(self.time_list)-1]):
sampleValue = self.time_list[(len(self.time_list)-1)/2]
#if (valThreshold < self.amp_range[0]) or (valThreshold > self.amp_range[1]): # Same problem with q as before
valThreshold = (self.amp_range[0] + self.amp_range[1]) / 2
logging.debug('muestreo %s umbral %s', str(sampleValue), str(valThreshold))
self.plot(sampleValue, valThreshold)
def plot(self, sample, threshold):
# Avoid blocking
measure_list = self.measure_list
inc_t = self.inc_t
logging.debug('entramos en dibuja')
samplingPoint = int(sample/inc_t)
amp = []
for i in xrange(len(measure_list)): # Saving from -25 to 25 points around sampling point in every trace
try:
[amp.append(measure_list[i][samplingPoint + j]) for j in xrange(-25, 25)]
except IndexError:
logging.debug('oob')
# discrimination by threshold
val0 = []
val1 = []
ap0 = val0.append
ap1 = val1.append
for i in xrange(len(amp)):
if(amp[i] < threshold):
ap0(amp[i])
else:
ap1(amp[i])
# Plotting histograms and gaussians
self.ax2.cla()
self.ax2.set_xlabel(self.tr('amplitude'))
norm0, bins, patches = self.ax2.hist(val0, bins=200,range=[(5/4)*self.amp_range[0], (5/4)*self.amp_range[1]], normed=True, histtype='step', color='#8181f7', rwidth=100)
norm1, bins, patches = self.ax2.hist(val1, bins=200,range=[(5/4)*self.amp_range[0], (5/4)*self.amp_range[1]], normed=True, histtype='step', color='#fa5858', rwidth=100)
v0, sigma0 = self.avg_var(val0)
gauss0 = pylab.normpdf(bins, v0, sigma0)
self.ax2.plot(bins, gauss0, linewidth=2, color='#0404b4')#blue
v1, sigma1 = self.avg_var(val1)
gauss1 = pylab.normpdf(bins, v1, sigma1)
self.ax2.plot(bins, gauss1, linewidth=2, color='#b40404')#red
# BER calc
q = math.fabs(v1-v0)/(sigma1+sigma0)
ber = 0.5*erfc(q/math.sqrt(2))
self.show_results(v0, sigma0, v1, sigma1, q, ber, len(val0), len(val1))
# Replacement of bars
self.ax2.add_line(self.barThreshold2)
self.ax3.add_line(self.bar_q)
self.ax3.add_line(self.bar_ber)
self.barSample.set_xdata(sample)
self.barThreshold.set_ydata(threshold)
self.barThreshold2.set_xdata(threshold)
logging.debug('colocamos las barras en ax3')
self.bar_q.set_xdata(q)
self.bar_ber.set_ydata(ber)
logging.debug('colocadas')
self.canvas.draw()
logging.debug('ya se ha redibujado')
QtCore.QCoreApplication.processEvents()
QtGui.QApplication.restoreOverrideCursor()
def show_results(self, v0, sigma0, v1, sigma1, q, ber, num0, num1):
string = u'\tv0: %-*s \u03c3 0: %-*s N. samples 0: %-*s Q: %-*s \n\n\tv1: %-*s \u03c3 1: %-*s N. samples 1: %-*s BER: %.2e' % (17, str(round(v0*1000,1))+' mV', 17, str(round(sigma0*1000,1))+' mV', 17, str(num0), 17, str(round(q,2)), 17, str(round(v1*1000,1))+' mV', 17, str(round(sigma1*1000,1))+' mV', 17, str(num1), ber)
self.results_label.setText(string)
def avg_var(self, data):
avg = 0.0
var = 0.0
n = len(data)
for i in xrange(n):
avg += data[i]
avg = avg/n
cuad = math.pow
for i in xrange(n):
var += cuad(avg - data[i], 2)
var = math.sqrt(var / (n-1))
return avg, var
def configure(self, ch, _rate, _length):
#Dictionaries
timebase = {"10 Mbps":'50ns', "30 Mbps":'10ns', "70 Mbps":'5ns', "125 Mbps":'2.5ns'}
length = {"4":0, "8":1, "12":2, "16":3}
rate = {"125 Mbps":3, "70 Mbps":2, "30 Mbps":1, "10 Mbps":0}
#Gen config
self.gen.setClock(ch)
if ch == '1':
self.gen.setLength1(length[_length])
self.gen.setRate1(rate[_rate])
else:
self.gen.setLength2(length[_length])
self.gen.setRate2(rate[_rate])
#Scope config
self.osc.disp_channel(True, ch)
self.osc.set_display("YT")
self.osc.set_persistence_off()
self.osc.set_horizontal(timebase[_rate])
self.osc.autoset(ch)
self.osc.set_trigger('ext5', ch)
def acquire(self, ch):
measures, inc = self.osc.get_data(ch, 250, 1750, '1')
self.measure_list.append(measures)
self.inc_t = inc
self.time_list = []
[self.time_list.append(inc*i) for i in xrange(len(measures))]
def closeEvent(self, evnt):
self.timer_osc.stop()
self.timer_draw.stop()
super(EyeDisplay, self).closeEvent(evnt)
def stopAdq(self):
self.timer_osc.stop()
self.timer_draw.stop()
| gpl-2.0 |
deeplook/bokeh | examples/plotting/file/burtin.py | 43 | 4765 | from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
from bokeh.plotting import figure, show, output_file
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
x = np.zeros(len(df))
y = np.zeros(len(df))
output_file("burtin.html", title="burtin.py example")
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=[-420, 420], y_range=[-420, 420],
min_border=0, outline_line_color="black",
background_fill="#f0e1d2", border_fill="#f0e1d2")
p.line(x+1, y+1, alpha=0)
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [gram_color[gram] for gram in df.gram]
p.annular_wedge(
x, y, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
p.annular_wedge(x, y, inner_radius, rad(df.penicillin),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=drug_color['Penicillin'])
p.annular_wedge(x, y, inner_radius, rad(df.streptomycin),
-big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,
color=drug_color['Streptomycin'])
p.annular_wedge(x, y, inner_radius, rad(df.neomycin),
-big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,
color=drug_color['Neomycin'])
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(x, y, radius=radii, fill_color=None, line_color="white")
p.text(x[:-1], radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(x, y, inner_radius-10, outer_radius+10,
-big_angle+angles, -big_angle+angles, color="black")
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color.keys()),
text_font_size="9pt", text_align="left", text_baseline="middle")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
show(p)
| bsd-3-clause |
aestrivex/mne-python | mne/viz/tests/test_decoding.py | 7 | 3797 | # Authors: Denis Engemann <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
from nose.tools import assert_raises, assert_equals
import numpy as np
from mne.epochs import equalize_epoch_counts, concatenate_epochs
from mne.decoding import GeneralizationAcrossTime
from mne import io, Epochs, read_events, pick_types
from mne.utils import requires_sklearn, run_tests_if_main
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
warnings.simplefilter('always') # enable b/c these tests throw warnings
def _get_data(tmin=-0.2, tmax=0.5, event_id=dict(aud_l=1, vis_l=3),
event_id_gen=dict(aud_l=2, vis_l=4), test_times=None):
"""Aux function for testing GAT viz"""
gat = GeneralizationAcrossTime()
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
decim = 30
# Test on time generalization within one condition
with warnings.catch_warnings(record=True):
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, decim=decim)
epochs_list = [epochs[k] for k in event_id]
equalize_epoch_counts(epochs_list)
epochs = concatenate_epochs(epochs_list)
# Test default running
gat = GeneralizationAcrossTime(test_times=test_times)
gat.fit(epochs)
gat.score(epochs)
return gat
@requires_sklearn
def test_gat_plot_matrix():
"""Test GAT matrix plot"""
gat = _get_data()
gat.plot()
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_diagonal():
"""Test GAT diagonal plot"""
gat = _get_data()
gat.plot_diagonal()
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_times():
"""Test GAT times plot"""
gat = _get_data()
# test one line
gat.plot_times(gat.train_times_['times'][0])
# test multiple lines
gat.plot_times(gat.train_times_['times'])
# test multiple colors
n_times = len(gat.train_times_['times'])
colors = np.tile(['r', 'g', 'b'], np.ceil(n_times / 3))[:n_times]
gat.plot_times(gat.train_times_['times'], color=colors)
# test invalid time point
assert_raises(ValueError, gat.plot_times, -1.)
# test float type
assert_raises(ValueError, gat.plot_times, 1)
assert_raises(ValueError, gat.plot_times, 'diagonal')
del gat.scores_
assert_raises(RuntimeError, gat.plot)
def chance(ax):
return ax.get_children()[1].get_lines()[0].get_ydata()[0]
@requires_sklearn
def test_gat_chance_level():
"""Test GAT plot_times chance level"""
gat = _get_data()
ax = gat.plot_diagonal(chance=False)
ax = gat.plot_diagonal()
assert_equals(chance(ax), .5)
gat = _get_data(event_id=dict(aud_l=1, vis_l=3, aud_r=2, vis_r=4))
ax = gat.plot_diagonal()
assert_equals(chance(ax), .25)
ax = gat.plot_diagonal(chance=1.234)
assert_equals(chance(ax), 1.234)
assert_raises(ValueError, gat.plot_diagonal, chance='foo')
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_nonsquared():
"""Test GAT diagonal plot"""
gat = _get_data(test_times=dict(start=0.))
gat.plot()
ax = gat.plot_diagonal()
scores = ax.get_children()[1].get_lines()[2].get_ydata()
assert_equals(len(scores), len(gat.estimators_))
run_tests_if_main()
| bsd-3-clause |
abhishekgahlot/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
eblossom/gnuradio | gr-filter/examples/decimate.py | 58 | 6061 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 10000000 # number of samples to use
self._fs = 10000 # initial sampling rate
self._decim = 20 # Decimation rate
# Generate the prototype filter taps for the decimators with a 200 Hz bandwidth
self._taps = filter.firdes.low_pass_2(1, self._fs,
200, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._decim))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._decim
print "Taps per channel: ", tpc
# Build the input signal source
# We create a list of freqs, and a sine wave is generated and added to the source
# for each one of these frequencies.
self.signals = list()
self.add = blocks.add_cc()
freqs = [10, 20, 2040]
for i in xrange(len(freqs)):
self.signals.append(analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freqs[i], 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct a PFB decimator filter
self.pfb = filter.pfb.decimator_ccf(self._decim, self._taps, 0)
# Construct a standard FIR decimating filter
self.dec = filter.fir_filter_ccf(self._decim, self._taps)
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Create the sink for the decimated siganl
self.snk = blocks.vector_sink_c()
self.connect(self.pfb, self.snk)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(16,9))
fig2 = pylab.figure(2, figsize=(16,9))
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input to the decimator
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b")
p1_t = sp1_t.plot(t_in, x_in.imag, "r")
sp1_t.set_ylim([-tb._decim*1.1, tb._decim*1.1])
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot the output of the decimator
fs_o = tb._fs / tb._decim
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk.data()[Ns:Ns+Ne]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("PFB Decimated Signal", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o.real, "b-o")
p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
zlcnup/csmath | hw3_mog/mog.py | 1 | 4034 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
from matplotlib import pyplot
from pylab import *
from numpy.linalg import det
import numpy as np
import numpy.matlib as ml
import random
from math import *
def init_params(centers,k):
#使用kmeans算法设置EM算法的初始参数
zl_pMiu = centers
zl_pPi = zeros([1,k], dtype=float)
zl_pSigma = zeros([len(X[0]), len(X[0]), k], dtype=float)
#计算X到其他点的距离
zl_dist = zl_distvec(X, centers)
#分配X到最近的中心
labels = zl_dist.argmin(axis=1)
#重新计算每一个中心和参数
for j in range(k):
idx_j = (labels == j).nonzero()
zl_pMiu[j] = X[idx_j].mean(axis=0)
zl_pPi[0, j] = 1.0 * len(X[idx_j]) / nSamples
zl_pSigma[:, :, j] = cov(mat(X[idx_j]).T)
return zl_pMiu, zl_pPi, zl_pSigma
def zl_distvec(X, Y):
n = len(X)
m = len(Y)
xx = ml.sum(X*X, axis=1)
yy = ml.sum(Y*Y, axis=1)
xy = ml.dot(X, Y.T)
return tile(xx, (m, 1)).T+tile(yy, (n, 1)) - 2*xy
def calc_probability(k,zl_pMiu,zl_pSigma):
#计算后验误差概率
zl_Px = zeros([nSamples, k], dtype=float)
for i in range(k):
Xshift = mat(X - zl_pMiu[i, :])
inv_pSigma = mat(zl_pSigma[:, :, i]).I
coef = math.sqrt(2*3.14*det(mat(zl_pSigma[:, :, i])))
for j in range(nSamples):
tmp = (Xshift[j, :] * inv_pSigma * Xshift[j, :].T)
zl_Px[j, i] = 1.0 / coef * math.exp(-0.5*tmp)
return zl_Px
def data_generator(nSamples):
#产生高斯混合模型
mean = [15,15]
cov = [[10,0],[0,100]]
data = np.random.multivariate_normal(mean,cov,nSamples).T
return data
def eStep(zl_Px, zl_pPi):
#计算每个样本Xi由第K个函数产生的概率
zl_pGamma =mat(array(zl_Px) * array(zl_pPi))
zl_pGamma = zl_pGamma / zl_pGamma.sum(axis=1)
return zl_pGamma
def mStep(zl_pGamma):
zl_Nk = zl_pGamma.sum(axis=0)
zl_pMiu = diagflat(1/zl_Nk) * zl_pGamma.T * mat(X)
zl_pSigma = zeros([len(X[0]), len(X[0]), k], dtype=float)
for j in range(k):
Xshift = mat(X) - zl_pMiu[j, :]
for i in range(nSamples):
zl_pSigmaK = Xshift[i, :].T * Xshift[i, :]
zl_pSigmaK = zl_pSigmaK * zl_pGamma[i, j] / zl_Nk[0, j]
zl_pSigma[:, :, j] = zl_pSigma[:, :, j] + zl_pSigmaK
return zl_pGamma, zl_pMiu, zl_pSigma
def pylab_plot(X, labels, iter, k):
colors = np.eye(k,k = 0)
pyplot.plot(hold = False)
plt.xlabel('X')
plt.ylabel('Y')
plt.title('MOG')
plt.text(19,45, "Samples:%d K:%d"%(len(X),k))
labels = array(labels).ravel()
data_colors = [colors[lbl] for lbl in labels]
pyplot.scatter(X[:, 0], X[:, 1], c = data_colors, alpha = 0.5)
pyplot.show()
#pyplot.savefig('iter_%02d.png' % iter, format = 'png')
def MoG(X, k, threshold = 1e-10):
N = len(X)
labels = zeros(N, dtype = int)
centers = array(random.sample(list(X), k))
iter = 0
zl_pMiu, zl_pPi, zl_pSigma = init_params(centers,k)
Lprev = float('-10000')
pre_esp = 100000
while iter < 100:
zl_Px = calc_probability(k,zl_pMiu,zl_pSigma)
#EM算法的e-step
zl_pGamma = eStep(zl_Px, zl_pPi)
#EM算法的m-step
zl_pGamma, zl_pMiu, zl_pSigma = mStep(zl_pGamma)
labels = zl_pGamma.argmax(axis=1)
#检查是否收敛
L = sum(log(mat(zl_Px) * mat(zl_pPi).T))
cur_esp = L-Lprev
if cur_esp < threshold:
break
if cur_esp > pre_esp:
break
pre_esp = cur_esp
Lprev = L
iter += 1
pylab_plot(X, labels, iter, k)
if __name__ == '__main__':
#从控制台获取用户输入参数样本点数以及混合函数个数K
print("Please Input the value of nSamples and K")
nSamples = input("Input nSamples: ")
nSamples = int(nSamples)
k = input("Input k (3 or 4): ")
k = int(k)
#生成高斯数据
samples = data_generator(nSamples)
X = array(mat(samples).T)
MoG(X, k)
| mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/series/test_missing.py | 3 | 43943 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytz
import pytest
from datetime import timedelta, datetime
from distutils.version import LooseVersion
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, isnull, date_range,
MultiIndex, Index, Timestamp, NaT, IntervalIndex)
from pandas.compat import range
from pandas._libs.tslib import iNaT
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from .common import TestData
try:
import scipy
_is_scipy_ge_0190 = scipy.__version__ >= LooseVersion('0.19.0')
except:
_is_scipy_ge_0190 = False
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.Akima1DInterpolator missing')
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestSeriesMissingData(TestData):
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(NaT)
expected = Series([NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
result = s.fillna(NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-01-03 10:00'), pd.NaT])
null_loc = pd.Series([False, True, False, True])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, {0}]'.format(tz)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00',
tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
# with timezone
# GH 15855
df = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'), pd.NaT])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='pad'), exp)
df = pd.Series([pd.NaT, pd.Timestamp('2012-11-11 00:00:00+01:00')])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='bfill'), exp)
def test_datetime64tz_fillna_round_issue(self):
# GH 14872
data = pd.Series([pd.NaT, pd.NaT,
datetime(2016, 12, 12, 22, 24, 6, 100001,
tzinfo=pytz.utc)])
filled = data.fillna(method='bfill')
expected = pd.Series([datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc)])
assert_series_equal(filled, expected)
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
s = pd.Series([1., np.nan])
result = s.fillna(0, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
s = pd.Series([1., np.nan])
result = s.fillna({1: 0}, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
pytest.raises(TypeError, s.fillna, [1, 2])
pytest.raises(TypeError, s.fillna, (1, 2))
# related GH 9217, make sure limit is an int and greater than 0
s = Series([1, 2, 3, None])
for limit in [-1, 0, 1., 2.]:
for method in ['backfill', 'bfill', 'pad', 'ffill', None]:
with pytest.raises(ValueError):
s.fillna(1, limit=limit, method=method)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
pytest.raises(ValueError, ts.fillna)
pytest.raises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
assert 'ffil' in str(inst)
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH14956
series = pd.Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
assert_series_equal(series, result)
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
assert isnull(td1[0])
assert td1[0].value == iNaT
td1[0] = td[0]
assert not isnull(td1[0])
td1[1] = iNaT
assert isnull(td1[1])
assert td1[1].value == iNaT
td1[1] = td[1]
assert not isnull(td1[1])
td1[2] = NaT
assert isnull(td1[2])
assert td1[2].value == iNaT
td1[2] = td[2]
assert not isnull(td1[2])
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# assert isnull(result).sum() == 7
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
assert len(s.dropna()) == 0
s.dropna(inplace=True)
assert len(s) == 0
# invalid axis
pytest.raises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
tm.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, Asia/Tokyo]'
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
assert result.dtype == 'datetime64[ns, Asia/Tokyo]'
tm.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
tm.assert_series_equal(result, s)
assert result is not s
s2 = s.copy()
s2.dropna(inplace=True)
tm.assert_series_equal(s2, s)
def test_dropna_intervals(self):
s = Series([np.nan, 1, 2, 3], IntervalIndex.from_arrays(
[np.nan, 0, 1, 2],
[np.nan, 1, 2, 3]))
result = s.dropna()
expected = s.iloc[1:]
assert_series_equal(result, expected)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
assert len(result) == ts.count()
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notnull(ts)])
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isnull(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notnull(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
assert np.isnan(x[0]), np.isnan(expected[0])
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
# neither monotonic increasing or decreasing
rng2 = rng[[1, 0, 2]]
pytest.raises(ValueError, rng2.get_indexer, rng, method='pad')
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
assert result.name == self.ts.name
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
assert ts.name == name
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
class TestSeriesInterpolateData(TestData):
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
tm.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
pytest.raises(ValueError, non_ts.interpolate, method='time')
def test_interpolate_pchip(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_akima(self):
tm._skip_if_no_scipy()
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_from_derivatives(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with pytest.raises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
# GH #15662.
# new cubic and quadratic interpolation algorithms from scipy 0.19.0.
# previously `splmake` was used. See scipy/scipy#6710
if _is_scipy_ge_0190:
expected = Series([1, 3., 6.823529, 12., 18.058824, 25.])
else:
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
# GH 9217, make sure limit is an int and greater than 0
methods = ['linear', 'time', 'index', 'values', 'nearest', 'zero',
'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
'polynomial', 'spline', 'piecewise_polynomial', None,
'from_derivatives', 'pchip', 'akima']
s = pd.Series([1, 2, np.nan, np.nan, 5])
for limit in [-1, 0, 1., 2.]:
for method in methods:
with pytest.raises(ValueError):
s.interpolate(limit=limit, method=method)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
pytest.raises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
pytest.raises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with pytest.raises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with pytest.raises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with pytest.raises(ValueError):
s.interpolate(method='polynomial')
with pytest.raises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', min_version='0.15',
app='setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
def test_spline_smooth(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
assert (s.interpolate(method='spline', order=3, s=0)[5] !=
s.interpolate(method='spline', order=3)[5])
def test_spline_interpolation(self):
tm._skip_if_no_scipy()
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
def test_spline_error(self):
# see gh-10633
tm._skip_if_no_scipy()
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with pytest.raises(ValueError):
s.interpolate(method='spline')
with pytest.raises(ValueError):
s.interpolate(method='spline', order=0)
def test_interp_timedelta64(self):
# GH 6424
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 3]))
result = df.interpolate(method='time')
expected = Series([1., 2., 3.],
index=pd.to_timedelta([1, 2, 3]))
assert_series_equal(result, expected)
# test for non uniform spacing
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 4]))
result = df.interpolate(method='time')
expected = Series([1., 1.666667, 3.],
index=pd.to_timedelta([1, 2, 4]))
assert_series_equal(result, expected)
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).sort_values()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()
result = ts.reindex(new_index).interpolate(method='time')
tm.assert_numpy_array_equal(result.values, exp.values)
| mit |
theusual/kaggle-seeclickfix-ensemble | Bryan/data_io.py | 2 | 4219 | """
Functions for data IO
"""
__author__ = 'Bryan Gregory'
__email__ = '[email protected]'
__date__ = '09-06-2013'
#Internal modules
import utils
#Start logger to record all info, warnings, and errors to Logs/logfile.log
log = utils.start_logging(__name__)
#External modules
import json
import csv
import gc
import pandas as pd
import time
import os
from datetime import datetime
from sklearn.externals import joblib
#import JSON data into a dict
def load_json(file_path):
return [json.loads(line) for line in open(file_path)]
#import delimited flat file into a list
def load_flatfile(file_path, delimiter=''):
temp_array = []
#if no delimiter is specified, try to use the built-in delimiter detection
if delimiter == '':
csv_reader = csv.reader(open(file_path))
else:
csv_reader = csv.reader(open(file_path),delimiter)
for line in csv_reader:
temp_array += line
return temp_array #[line for line in csv_reader]
#import delimited flat file into a pandas dataframe
def load_flatfile_to_df(file_path, delimiter=''):
#if no delimiter is specified, try to use the built-in delimiter detection
if delimiter == '':
return pd.read_csv(file_path)
else:
return pd.read_csv(file_path, delimiter)
def save_predictions(df,target,model_name='',directory='Submits/',estimator_class='',note=''):
timestamp = datetime.now().strftime('%m-%d-%y_%H%M')
filename = directory+timestamp+'--'+model_name+'_'+estimator_class+'_'+note+'.csv'
#---Perform any manual predictions cleanup that may be necessary---#
#Save predictions
try:
df[target] = [x[0] for x in df[target]]
except IndexError:
df[target] = [x for x in df[target]]
df.ix[:,['id',target]].to_csv(filename, index=False)
log.info('Submission file saved: %s' % filename)
def save_combined_predictions(df,directory,filename,note=''):
#If previous combined predictions already exist, archive existing ones by renaming to append datetime
try:
modified_date = time.strptime(time.ctime(os.path.getmtime(directory+filename)), '%a %b %d %H:%M:%S %Y')
modified_date = datetime.fromtimestamp(time.mktime(modified_date)).strftime('%m-%d-%y_%H%M')
archived_file = directory+'Archive/'+filename[:len(filename)-4]+'--'+modified_date+'.csv'
os.rename(directory+filename,archived_file)
log.info('File already exists with given filename, archiving old file to: '+ archived_file)
except WindowsError:
pass
#Save predictions
df.to_csv(directory+filename, index=False)
log.info('Predictions saved: %s' % filename)
def save_cached_object(object, filename, directory='Cache/'):
"""Save cached objects in pickel format using joblib compression.
If a previous cached file exists, then get its modified date and append it to filename and archive it
"""
if filename[-4:] != '.pkl':
filename = filename+'.pkl'
try:
modified_date = time.strptime(time.ctime(os.path.getmtime(directory+filename)), '%a %b %d %H:%M:%S %Y')
modified_date = datetime.fromtimestamp(time.mktime(modified_date)).strftime('%m-%d-%y_%H%M')
archived_file = directory+'Archive/'+filename[:len(filename)-4]+'--'+modified_date+'.pkl'
os.rename(directory+filename,archived_file)
log.info('Cached object already exists with given filename, archiving old object to: '+ archived_file)
except WindowsError:
pass
joblib.dump(object, directory+filename, compress=9)
log.info('New object cached to: '+directory+filename)
def load_cached_object(filename, directory='Cache/'):
if filename[-4:] != '.pkl':
filename = filename+'.pkl'
try:
object = joblib.load(directory+filename)
log.info('Successfully loaded object from: '+directory+filename)
except IOError:
log.info('Cached object does not exist: '+directory+filename)
return object
def save_text_features(output_file, feature_names):
o_f = open( output_file, 'wb' )
feature_names = '\n'.join( feature_names )
o_f.write( feature_names ) | bsd-3-clause |
deepmind/bsuite | bsuite/logging/sqlite_load.py | 1 | 2268 | # python3
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Read functionality for local SQLite-based experiments."""
from typing import List, Optional, Tuple
from bsuite import sweep
from bsuite.logging import logging_utils
import pandas as pd
import sqlite3
def load_one_result_set(
db_path: str,
connection: Optional[sqlite3.Connection] = None) -> pd.DataFrame:
"""Returns a pandas DataFrame of bsuite results.
Args:
db_path: Path to the database file.
connection: Optional connection, for testing purposes. If supplied,
`db_path` will be ignored.
Returns:
A pandas DataFrame containing bsuite results.
"""
if connection is None:
connection = sqlite3.connect(db_path)
# Get a list of all table names in this database.
query = 'select name from sqlite_master where type=\'table\';'
with connection:
table_names = connection.execute(query).fetchall()
dataframes = []
for table_name in table_names:
dataframe = pd.read_sql_query('select * from ' + table_name[0], connection)
dataframe['bsuite_id'] = [
table_name[0] + sweep.SEPARATOR + str(setting_index)
for setting_index in dataframe.setting_index]
dataframes.append(dataframe)
df = pd.concat(dataframes, sort=False)
return logging_utils.join_metadata(df)
def load_bsuite(
results_dirs: logging_utils.PathCollection
) -> Tuple[pd.DataFrame, List[str]]:
"""Returns a pandas DataFrame of bsuite results."""
return logging_utils.load_multiple_runs(
path_collection=results_dirs,
single_load_fn=load_one_result_set,
)
| apache-2.0 |
ywcui1990/htmresearch | projects/combined_sequences/object_convergence.py | 3 | 27757 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file plots the behavior of L4-L2-TM network as you train it on objects.
"""
import random
import time
import os
from math import ceil
import numpy
import cPickle
from multiprocessing import Pool, cpu_count
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
from htmresearch.frameworks.layers.combined_sequence_experiment import (
L4TMExperiment
)
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
def locateConvergencePoint(stats, minOverlap, maxOverlap):
"""
Walk backwards through stats until you locate the first point that diverges
from target overlap values. We need this to handle cases where it might get
to target values, diverge, and then get back again. We want the last
convergence point.
"""
for i,v in enumerate(stats[::-1]):
if not (v >= minOverlap and v <= maxOverlap):
return len(stats)-i + 1
# Never differs - converged in one iteration
return 1
def averageConvergencePoint(inferenceStats, prefix, minOverlap, maxOverlap,
settlingTime):
"""
inferenceStats contains activity traces while the system visits each object.
Given the i'th object, inferenceStats[i] contains activity statistics for
each column for each region for the entire sequence of sensations.
For each object, compute the convergence time - the first point when all
L2 columns have converged.
Return the average convergence time across all objects.
Given inference statistics for a bunch of runs, locate all traces with the
given prefix. For each trace locate the iteration where it finally settles
on targetValue. Return the average settling iteration across all runs.
"""
convergenceSum = 0.0
numCorrect = 0.0
inferenceLength = 1000000
# For each object
for stats in inferenceStats:
# For each L2 column locate convergence time
convergencePoint = 0.0
for key in stats.iterkeys():
if prefix in key:
columnConvergence = locateConvergencePoint(
stats[key], minOverlap, maxOverlap)
# Ensure this column has converged by the last iteration
# assert(columnConvergence <= len(stats[key]))
convergencePoint = max(convergencePoint, columnConvergence)
convergenceSum += ceil(float(convergencePoint)/settlingTime)
if ceil(float(convergencePoint)/settlingTime) <= inferenceLength:
numCorrect += 1
return convergenceSum/len(inferenceStats), numCorrect/len(inferenceStats)
def averageSequenceAccuracy(inferenceStats, minOverlap, maxOverlap):
"""
inferenceStats contains activity traces while the system visits each object.
Given the i'th object, inferenceStats[i] contains activity statistics for
each column for each region for the entire sequence of sensations.
For each object, decide whether the TM uniquely classified it by checking that
the number of predictedActive cells are in an acceptable range.
"""
numCorrect = 0.0
numStats = 0.0
prefix = "TM PredictedActive"
# For each object
for stats in inferenceStats:
# Keep running total of how often the number of predictedActive cells are
# in the range.
for key in stats.iterkeys():
if prefix in key:
for numCells in stats[key]:
numStats += 1.0
if numCells in range(minOverlap, maxOverlap+1):
numCorrect += 1.0
return numCorrect / numStats
def runExperiment(args):
"""
Run experiment. What did you think this does?
args is a dict representing the parameters. We do it this way to support
multiprocessing. args contains one or more of the following keys:
@param noiseLevel (float) Noise level to add to the locations and features
during inference. Default: None
@param profile (bool) If True, the network will be profiled after
learning and inference. Default: False
@param numObjects (int) The number of objects we will train.
Default: 10
@param numPoints (int) The number of points on each object.
Default: 10
@param pointRange (int) Creates objects each with points ranging from
[numPoints,...,numPoints+pointRange-1]
A total of numObjects * pointRange objects will be
created.
Default: 1
@param numLocations (int) For each point, the number of locations to choose
from. Default: 10
@param numFeatures (int) For each point, the number of features to choose
from. Default: 10
@param numColumns (int) The total number of cortical columns in network.
Default: 2
@param networkType (string)The type of network to use. Options are:
"MultipleL4L2Columns",
"MultipleL4L2ColumnsWithTopology" and
"MultipleL4L2ColumnsWithRandomTopology".
Default: "MultipleL4L2Columns"
@param settlingTime (int) Number of iterations we wait to let columns
stabilize. Important for multicolumn experiments
with lateral connections.
@param includeRandomLocation (bool) If True, a random location SDR will be
generated during inference for each feature.
The method returns the args dict updated with two additional keys:
convergencePoint (int) The average number of iterations it took
to converge across all objects
objects (pairs) The list of objects we trained on
"""
numObjects = args.get("numObjects", 10)
numLocations = args.get("numLocations", 10)
numFeatures = args.get("numFeatures", 10)
numColumns = args.get("numColumns", 1)
networkType = args.get("networkType", "L4L2TMColumn")
profile = args.get("profile", False)
noiseLevel = args.get("noiseLevel", None) # TODO: implement this?
numPoints = args.get("numPoints", 10)
trialNum = args.get("trialNum", 42)
pointRange = args.get("pointRange", 1)
plotInferenceStats = args.get("plotInferenceStats", True)
settlingTime = args.get("settlingTime", 3)
includeRandomLocation = args.get("includeRandomLocation", False)
inputSize = args.get("inputSize", 512)
numInputBits = args.get("inputBits", 20)
# Create the objects
objects = createObjectMachine(
machineType="simple",
numInputBits=numInputBits,
sensorInputSize=inputSize,
externalInputSize=1024,
numCorticalColumns=numColumns,
numFeatures=numFeatures,
seed=trialNum
)
for p in range(pointRange):
objects.createRandomObjects(numObjects, numPoints=numPoints+p,
numLocations=numLocations,
numFeatures=numFeatures)
r = objects.objectConfusion()
print "Average common pairs=", r[0],
print ", locations=",r[1],
print ", features=",r[2]
# print "Total number of objects created:",len(objects.getObjects())
# print "Objects are:"
# for o in objects:
# pairs = objects[o]
# pairs.sort()
# print str(o) + ": " + str(pairs)
# Setup experiment and train the network. Ensure both TM layers have identical
# parameters.
name = "convergence_O%03d_L%03d_F%03d_T%03d" % (
numObjects, numLocations, numFeatures, trialNum
)
exp = L4TMExperiment(
name=name,
numCorticalColumns=numColumns,
networkType = networkType,
inputSize=inputSize,
numInputBits=numInputBits,
externalInputSize=1024,
numExternalInputBits=numInputBits,
seed=trialNum,
L4Overrides={"initialPermanence": 0.41,
"activationThreshold": 18,
"minThreshold": 18,
"basalPredictedSegmentDecrement": 0.0001},
)
# We want to traverse the features of each object randomly a few times before
# moving on to the next time. Create the SDRs that we need for this.
objectsToLearn = objects.provideObjectsToLearn()
objectTraversals = {}
for objectId in objectsToLearn:
objectTraversals[objectId] = objects.randomTraversal(
objectsToLearn[objectId], settlingTime)
# Train the network on all the SDRs for all the objects
exp.learnObjects(objectTraversals)
if profile:
exp.printProfile(reset=True)
# For inference, we will check and plot convergence for each object. For each
# object, we create a sequence of random sensations for each column. We will
# present each sensation for settlingTime time steps to let it settle and
# ensure it converges.
for objectId in objects:
obj = objects[objectId]
objectSensations = {}
for c in range(numColumns):
objectSensations[c] = []
assert numColumns == 1
# Create sequence of sensations for this object for one column. The total
# number of sensations is equal to the number of points on the object. No
# point should be visited more than once.
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
for pair in objectCopy:
objectSensations[0].append(pair)
inferConfig = {
"object": objectId,
"numSteps": len(objectSensations[0]),
"pairs": objectSensations,
"includeRandomLocation": includeRandomLocation,
}
inferenceSDRs = objects.provideObjectToInfer(inferConfig)
exp.infer(inferenceSDRs, objectName=objectId)
if profile:
exp.printProfile(reset=True)
if plotInferenceStats:
plotOneInferenceRun(
exp.statistics[objectId],
fields=[
("L4 PredictedActive", "Predicted active cells in sensorimotor layer"),
("TM NextPredicted", "Predicted cells in temporal sequence layer"),
("TM PredictedActive", "Predicted active cells in temporal sequence layer"),
],
basename=exp.name,
experimentID=objectId,
plotDir=os.path.join(os.path.dirname(os.path.realpath(__file__)),
"detailed_plots")
)
# Compute overall inference statistics
infStats = exp.getInferenceStats()
convergencePoint, sensorimotorAccuracy = averageConvergencePoint(
infStats,"L2 Representation", 30, 40, settlingTime)
sequenceAccuracy = averageSequenceAccuracy(infStats, 15, 25)
predictedActive = numpy.zeros(len(infStats))
predicted = numpy.zeros(len(infStats))
predictedActiveL4 = numpy.zeros(len(infStats))
predictedL4 = numpy.zeros(len(infStats))
for i,stat in enumerate(infStats):
predictedActive[i] = float(sum(stat["TM PredictedActive C0"][2:])) / len(stat["TM PredictedActive C0"][2:])
predicted[i] = float(sum(stat["TM NextPredicted C0"][2:])) / len(stat["TM NextPredicted C0"][2:])
predictedActiveL4[i] = float(sum(stat["L4 PredictedActive C0"])) / len(stat["L4 PredictedActive C0"])
predictedL4[i] = float(sum(stat["L4 Predicted C0"])) / len(stat["L4 Predicted C0"])
print "# objects {} # features {} # locations {} # columns {} trial # {} network type {}".format(
numObjects, numFeatures, numLocations, numColumns, trialNum, networkType)
print "Average convergence point=",convergencePoint,
print "Accuracy:", sensorimotorAccuracy
print "Sequence accuracy:", sequenceAccuracy
print
# Return our convergence point as well as all the parameters and objects
args.update({"objects": objects.getObjects()})
args.update({"convergencePoint":convergencePoint})
args.update({"sensorimotorAccuracyPct": sensorimotorAccuracy})
args.update({"sequenceAccuracyPct": sequenceAccuracy})
args.update({"averagePredictions": predicted.mean()})
args.update({"averagePredictedActive": predictedActive.mean()})
args.update({"averagePredictionsL4": predictedL4.mean()})
args.update({"averagePredictedActiveL4": predictedActiveL4.mean()})
# Can't pickle experiment so can't return it for batch multiprocessing runs.
# However this is very useful for debugging when running in a single thread.
if plotInferenceStats:
args.update({"experiment": exp})
return args
def runExperimentPool(numObjects,
numLocations,
numFeatures,
numWorkers=7,
nTrials=1,
pointRange=1,
numPoints=10,
includeRandomLocation=False,
resultsName="convergence_results.pkl"):
"""
Allows you to run a number of experiments using multiple processes.
For each parameter except numWorkers, pass in a list containing valid values
for that parameter. The cross product of everything is run, and each
combination is run nTrials times.
Returns a list of dict containing detailed results from each experiment.
Also pickles and saves the results in resultsName for later analysis.
Example:
results = runExperimentPool(
numObjects=[10],
numLocations=[5],
numFeatures=[5],
numColumns=[2,3,4,5,6],
numWorkers=8,
nTrials=5)
"""
# Create function arguments for every possibility
args = []
for o in reversed(numObjects):
for l in numLocations:
for f in numFeatures:
for t in range(nTrials):
args.append(
{"numObjects": o,
"numLocations": l,
"numFeatures": f,
"trialNum": t,
"pointRange": pointRange,
"numPoints": numPoints,
"plotInferenceStats": False,
"includeRandomLocation": includeRandomLocation,
"settlingTime": 3,
}
)
print "{} experiments to run, {} workers".format(len(args), numWorkers)
# Run the pool
if numWorkers > 1:
pool = Pool(processes=numWorkers)
result = pool.map(runExperiment, args)
else:
result = []
for arg in args:
result.append(runExperiment(arg))
# Pickle results for later use
with open(resultsName,"wb") as f:
cPickle.dump(result,f)
return result
def plotConvergenceByObject(results, objectRange, featureRange, numTrials):
"""
Plots the convergence graph: iterations vs number of objects.
Each curve shows the convergence for a given number of unique features.
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# Convergence[f,o] = how long it took it to converge with f unique features
# and o objects.
convergence = numpy.zeros((max(featureRange), max(objectRange) + 1))
for r in results:
if r["numFeatures"] in featureRange:
convergence[r["numFeatures"] - 1, r["numObjects"]] += r["convergencePoint"]
convergence /= numTrials
########################################################################
#
# Create the plot. x-axis=
plt.figure()
plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"plots", "convergence_by_object.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
print "features={} objectRange={} convergence={}".format(
f,objectRange, convergence[f-1,objectRange])
legendList.append('Unique features={}'.format(f))
plt.plot(objectRange, convergence[f-1,objectRange],
color=colorList[i])
# format
plt.legend(legendList, loc="lower right", prop={'size':10})
plt.xlabel("Number of objects in training set")
plt.xticks(range(0,max(objectRange)+1,10))
plt.yticks(range(0,int(convergence.max())+2))
plt.ylabel("Average number of touches")
plt.title("Number of touches to recognize one object (single column)")
# save
plt.savefig(plotPath)
plt.close()
def plotPredictionsByObject(results, objectRange, featureRange, numTrials,
key="", title="", yaxis=""):
"""
Plots the convergence graph: iterations vs number of objects.
Each curve shows the convergence for a given number of unique features.
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# predictions[f,o] = how long it took it to converge with f unique features
# and o objects.
predictions = numpy.zeros((max(featureRange), max(objectRange) + 1))
for r in results:
if r["numFeatures"] in featureRange:
predictions[r["numFeatures"] - 1, r["numObjects"]] += r[key]
predictions /= numTrials
########################################################################
#
# Create the plot. x-axis=
plt.figure()
plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"plots", key+"_by_object.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
print "features={} objectRange={} convergence={}".format(
f,objectRange, predictions[f-1,objectRange])
legendList.append('Unique features={}'.format(f))
plt.plot(objectRange, predictions[f-1,objectRange],
color=colorList[i])
# format
plt.legend(legendList, loc="center right", prop={'size':10})
plt.xlabel("Number of objects in training set")
plt.xticks(range(0,max(objectRange)+1,10))
plt.yticks(range(0,int(predictions.max())+2,10))
plt.ylabel(yaxis)
plt.title(title)
# save
plt.savefig(plotPath)
plt.close()
def plotSequenceAccuracy(results, featureRange, objectRange,
title="", yaxis=""):
"""
Plot accuracy vs number of features
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# accuracy[o,f] = accuracy with o objects in training
# and f unique features.
accuracy = numpy.zeros((max(objectRange)+1, max(featureRange) + 1))
totals = numpy.zeros((max(objectRange)+1, max(featureRange) + 1))
for r in results:
if r["numFeatures"] in featureRange and r["numObjects"] in objectRange:
accuracy[r["numObjects"], r["numFeatures"]] += r["sequenceAccuracyPct"]
totals[r["numObjects"], r["numFeatures"]] += 1
for o in objectRange:
for f in featureRange:
accuracy[o, f] = 100.0 * accuracy[o, f] / totals[o, f]
########################################################################
#
# Create the plot.
plt.figure()
plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"plots", "sequenceAccuracy_by_object.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
print "features={} objectRange={} accuracy={}".format(
f,objectRange, accuracy[objectRange, f])
print "Totals=",totals[objectRange, f]
legendList.append('Sequence layer, feature pool size: {}'.format(f))
plt.plot(objectRange, accuracy[objectRange, f], color=colorList[i])
plt.plot(objectRange, [100] * len(objectRange),
color=colorList[len(featureRange)])
legendList.append('Sensorimotor layer')
# format
plt.legend(legendList, bbox_to_anchor=(0., 0.6, 1., .102), loc="right", prop={'size':10})
plt.xlabel("Number of objects")
# plt.xticks(range(0,max(locationRange)+1,10))
# plt.yticks(range(0,int(accuracy.max())+2,10))
plt.ylim(-10.0, 110.0)
plt.ylabel(yaxis)
plt.title(title)
# save
plt.savefig(plotPath)
plt.close()
def plotSequenceAccuracyBargraph(results, featureRange, objectRange,
title="", yaxis=""):
"""
Plot accuracy vs number of features
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# accuracy[o,f] = accuracy with o objects in training
# and f unique features.
accuracy = numpy.zeros(max(featureRange) + 1)
totals = numpy.zeros(max(featureRange) + 1)
for r in results:
if r["numFeatures"] in featureRange and r["numObjects"] in objectRange:
accuracy[r["numFeatures"]] += r["sequenceAccuracyPct"]
totals[r["numFeatures"]] += 1
for f in featureRange:
accuracy[f] = 100.0 * accuracy[f] / totals[f]
########################################################################
#
# Create the plot.
plt.figure()
plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"plots", "sequenceAccuracy_by_object_bar.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
ind = numpy.arange(len(featureRange))
width = 0.35
for i in range(len(featureRange)):
f = featureRange[i]
print "features={} objectRange={} accuracy={}".format(
f,objectRange, accuracy[f])
print "Totals=",totals[f]
plt.bar(i, 100.0, width, color='black')
plt.bar(i+width, accuracy[f], width, color='white', edgecolor='black')
legendList.append("Sensorimotor layer")
legendList.append("Sequence layer")
plt.legend(legendList, bbox_to_anchor=(0., 0.87, 1.0, .102), loc="right", prop={'size':10})
plt.xlabel("Number of objects")
plt.xticks(ind + width / 2, featureRange)
plt.ylim(0.0, 119.0)
plt.ylabel(yaxis)
plt.title(title)
#
# # save
plt.savefig(plotPath)
plt.close()
def plotOneInferenceRun(stats,
fields,
basename,
plotDir="plots",
experimentID=0):
"""
Plots individual inference runs.
"""
if not os.path.exists(plotDir):
os.makedirs(plotDir)
plt.figure()
objectName = stats["object"]
# plot request stats
for field in fields:
fieldKey = field[0] + " C0"
plt.plot(stats[fieldKey], marker='+', label=field[1])
# format
plt.legend(loc="upper right")
plt.xlabel("Input number")
plt.xticks(range(stats["numSteps"]))
plt.ylabel("Number of cells")
# plt.ylim(plt.ylim()[0] - 5, plt.ylim()[1] + 5)
plt.ylim(-5, 50)
plt.title("Activity while inferring a single object".format(objectName))
# save
relPath = "{}_exp_{}.pdf".format(basename, experimentID)
path = os.path.join(plotDir, relPath)
plt.savefig(path)
plt.close()
if __name__ == "__main__":
startTime = time.time()
dirName = os.path.dirname(os.path.realpath(__file__))
# This is how you run a specific experiment in single process mode. Useful
# for debugging, profiling, etc.
if False:
results = runExperiment(
{
"numObjects": 50,
"numPoints": 10,
"numLocations": 100,
"numFeatures": 25,
"trialNum": 4,
"pointRange": 1,
"plotInferenceStats": True, # Outputs detailed graphs
"settlingTime": 3,
}
)
# Here we want to check accuracy of the TM network in classifying the
# objects.
if True:
# We run 10 trials for each column number and then analyze results
numTrials = 10
featureRange = [5, 10, 50]
objectRange = [2, 5, 10, 20, 30, 40, 50, 70]
locationRange = [100]
resultsName = os.path.join(dirName, "sequence_accuracy_results.pkl")
# Comment this out if you are re-running analysis on already saved results.
# Very useful for debugging the plots
# runExperimentPool(
# numObjects=objectRange,
# numFeatures=featureRange,
# numLocations=locationRange,
# numPoints=10,
# nTrials=numTrials,
# numWorkers=cpu_count() - 1,
# resultsName=resultsName)
# Analyze results
with open(resultsName,"rb") as f:
results = cPickle.load(f)
plotSequenceAccuracy(results, featureRange, objectRange,
title="Relative performance of layers during sensorimotor inference",
yaxis="Accuracy (%)")
# plotSequenceAccuracyBargraph(results, featureRange, objectRange,
# title="Performance while inferring objects",
# yaxis="Accuracy (%)")
# Here we want to see how the number of objects affects convergence for a
# single column.
# This experiment is run using a process pool
if False:
# We run 10 trials for each column number and then analyze results
numTrials = 10
featureRange = [5, 10, 100, 1000]
objectRange = [2, 5, 10, 20, 30, 50]
locationRange = [10, 100, 500, 1000]
resultsName = os.path.join(dirName, "object_convergence_results.pkl")
# Comment this out if you are re-running analysis on already saved results.
# Very useful for debugging the plots
runExperimentPool(
numObjects=objectRange,
numLocations=locationRange,
numFeatures=featureRange,
numPoints=10,
nTrials=numTrials,
numWorkers=cpu_count() - 1,
resultsName=resultsName)
# Analyze results
with open(resultsName,"rb") as f:
results = cPickle.load(f)
plotConvergenceByObject(results, objectRange, featureRange, numTrials)
plotPredictionsByObject(results, objectRange, featureRange, numTrials,
key="averagePredictions",
title="Predictions in temporal sequence layer",
yaxis="Average number of predicted cells")
plotPredictionsByObject(results, objectRange, featureRange, numTrials,
key="averagePredictedActive",
title="Correct predictions in temporal sequence layer",
yaxis="Average number of correctly predicted cells"
)
plotPredictionsByObject(results, objectRange, featureRange, numTrials,
key="averagePredictedActiveL4",
title="Correct predictions in sensorimotor layer",
yaxis="Average number of correctly predicted cells"
)
plotPredictionsByObject(results, objectRange, featureRange, numTrials,
key="averagePredictionsL4",
title="Predictions in sensorimotor layer",
yaxis="Average number of predicted cells")
print "Actual runtime=",time.time() - startTime
| agpl-3.0 |
wazeerzulfikar/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
chanceraine/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colors.py | 69 | 31676 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of
colors called a colormap. Colormapping typically involves two steps:
a data array is first mapped onto the range 0-1 using an instance
of :class:`Normalize` or of a subclass; then this number in the 0-1
range is mapped to a color using an instance of a subclass of
:class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all
the built-in colormap instances, but is also useful for making
custom colormaps, and :class:`ListedColormap`, which is used for
generating a custom colormap from a list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single
color specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic builtin colors, you can use a single letter
- b : blue
- g : green
- r : red
- c : cyan
- m : magenta
- y : yellow
- k : black
- w : white
Gray shades can be given as a string encoding a float in the 0-1
range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify
the color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B*
are in the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and
'chartreuse' are supported.
"""
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
parts = np.__version__.split('.')
NP_MAJOR, NP_MINOR = map(int, parts[:2])
# true if clip supports the out kwarg
NP_CLIP_OUT = NP_MAJOR>=1 and NP_MINOR>=2
cnames = {
'aliceblue' : '#F0F8FF',
'antiquewhite' : '#FAEBD7',
'aqua' : '#00FFFF',
'aquamarine' : '#7FFFD4',
'azure' : '#F0FFFF',
'beige' : '#F5F5DC',
'bisque' : '#FFE4C4',
'black' : '#000000',
'blanchedalmond' : '#FFEBCD',
'blue' : '#0000FF',
'blueviolet' : '#8A2BE2',
'brown' : '#A52A2A',
'burlywood' : '#DEB887',
'cadetblue' : '#5F9EA0',
'chartreuse' : '#7FFF00',
'chocolate' : '#D2691E',
'coral' : '#FF7F50',
'cornflowerblue' : '#6495ED',
'cornsilk' : '#FFF8DC',
'crimson' : '#DC143C',
'cyan' : '#00FFFF',
'darkblue' : '#00008B',
'darkcyan' : '#008B8B',
'darkgoldenrod' : '#B8860B',
'darkgray' : '#A9A9A9',
'darkgreen' : '#006400',
'darkkhaki' : '#BDB76B',
'darkmagenta' : '#8B008B',
'darkolivegreen' : '#556B2F',
'darkorange' : '#FF8C00',
'darkorchid' : '#9932CC',
'darkred' : '#8B0000',
'darksalmon' : '#E9967A',
'darkseagreen' : '#8FBC8F',
'darkslateblue' : '#483D8B',
'darkslategray' : '#2F4F4F',
'darkturquoise' : '#00CED1',
'darkviolet' : '#9400D3',
'deeppink' : '#FF1493',
'deepskyblue' : '#00BFFF',
'dimgray' : '#696969',
'dodgerblue' : '#1E90FF',
'firebrick' : '#B22222',
'floralwhite' : '#FFFAF0',
'forestgreen' : '#228B22',
'fuchsia' : '#FF00FF',
'gainsboro' : '#DCDCDC',
'ghostwhite' : '#F8F8FF',
'gold' : '#FFD700',
'goldenrod' : '#DAA520',
'gray' : '#808080',
'green' : '#008000',
'greenyellow' : '#ADFF2F',
'honeydew' : '#F0FFF0',
'hotpink' : '#FF69B4',
'indianred' : '#CD5C5C',
'indigo' : '#4B0082',
'ivory' : '#FFFFF0',
'khaki' : '#F0E68C',
'lavender' : '#E6E6FA',
'lavenderblush' : '#FFF0F5',
'lawngreen' : '#7CFC00',
'lemonchiffon' : '#FFFACD',
'lightblue' : '#ADD8E6',
'lightcoral' : '#F08080',
'lightcyan' : '#E0FFFF',
'lightgoldenrodyellow' : '#FAFAD2',
'lightgreen' : '#90EE90',
'lightgrey' : '#D3D3D3',
'lightpink' : '#FFB6C1',
'lightsalmon' : '#FFA07A',
'lightseagreen' : '#20B2AA',
'lightskyblue' : '#87CEFA',
'lightslategray' : '#778899',
'lightsteelblue' : '#B0C4DE',
'lightyellow' : '#FFFFE0',
'lime' : '#00FF00',
'limegreen' : '#32CD32',
'linen' : '#FAF0E6',
'magenta' : '#FF00FF',
'maroon' : '#800000',
'mediumaquamarine' : '#66CDAA',
'mediumblue' : '#0000CD',
'mediumorchid' : '#BA55D3',
'mediumpurple' : '#9370DB',
'mediumseagreen' : '#3CB371',
'mediumslateblue' : '#7B68EE',
'mediumspringgreen' : '#00FA9A',
'mediumturquoise' : '#48D1CC',
'mediumvioletred' : '#C71585',
'midnightblue' : '#191970',
'mintcream' : '#F5FFFA',
'mistyrose' : '#FFE4E1',
'moccasin' : '#FFE4B5',
'navajowhite' : '#FFDEAD',
'navy' : '#000080',
'oldlace' : '#FDF5E6',
'olive' : '#808000',
'olivedrab' : '#6B8E23',
'orange' : '#FFA500',
'orangered' : '#FF4500',
'orchid' : '#DA70D6',
'palegoldenrod' : '#EEE8AA',
'palegreen' : '#98FB98',
'palevioletred' : '#AFEEEE',
'papayawhip' : '#FFEFD5',
'peachpuff' : '#FFDAB9',
'peru' : '#CD853F',
'pink' : '#FFC0CB',
'plum' : '#DDA0DD',
'powderblue' : '#B0E0E6',
'purple' : '#800080',
'red' : '#FF0000',
'rosybrown' : '#BC8F8F',
'royalblue' : '#4169E1',
'saddlebrown' : '#8B4513',
'salmon' : '#FA8072',
'sandybrown' : '#FAA460',
'seagreen' : '#2E8B57',
'seashell' : '#FFF5EE',
'sienna' : '#A0522D',
'silver' : '#C0C0C0',
'skyblue' : '#87CEEB',
'slateblue' : '#6A5ACD',
'slategray' : '#708090',
'snow' : '#FFFAFA',
'springgreen' : '#00FF7F',
'steelblue' : '#4682B4',
'tan' : '#D2B48C',
'teal' : '#008080',
'thistle' : '#D8BFD8',
'tomato' : '#FF6347',
'turquoise' : '#40E0D0',
'violet' : '#EE82EE',
'wheat' : '#F5DEB3',
'white' : '#FFFFFF',
'whitesmoke' : '#F5F5F5',
'yellow' : '#FFFF00',
'yellowgreen' : '#9ACD32',
}
# add british equivs
for k, v in cnames.items():
if k.find('gray')>=0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given a len 3 rgb tuple of 0-1 floats, return the hex string'
return '#%02x%02x%02x' % tuple([round(val*255) for val in rgb])
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, basestring):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "%s"' % s)
return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter:
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b' : (0.0, 0.0, 1.0),
'g' : (0.0, 0.5, 0.0),
'r' : (1.0, 0.0, 0.0),
'c' : (0.0, 0.75, 0.75),
'm' : (0.75, 0, 0.75),
'y' : (0.75, 0.75, 0),
'k' : (0.0, 0.0, 0.0),
'w' : (1.0, 1.0, 1.0),
}
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
try: return self.cache[arg]
except KeyError: pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try: return self.cache[arg]
except KeyError: pass
except TypeError:
raise ValueError(
'to_rgb: arg "%s" is unhashable even inside a tuple'
% (str(arg),))
try:
if cbook.is_string_like(arg):
color = self.colors.get(arg, None)
if color is None:
str1 = cnames.get(arg, arg)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(arg)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = tuple([fl]*3)
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is %d; must be 3 or 4'%len(arg))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbg sequence outside 0-1 range')
else:
raise ValueError('cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError), exc:
raise ValueError('to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if [x for x in arg if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], arg[3] * alpha
r,g,b = arg[:3]
if [x for x in (r,g,b) if (float(x) < 0) or (x > 1)]:
raise ValueError('number in rbg sequence outside 0-1 range')
else:
r,g,b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r,g,b,alpha
except (TypeError, ValueError), exc:
raise ValueError('to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
if c.lower() == 'none':
return np.zeros((0,4), dtype=np.float_)
except AttributeError:
pass
if len(c) == 0:
return np.zeros((0,4), dtype=np.float_)
try:
result = np.array([self.to_rgba(c, alpha)], dtype=np.float_)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
result = np.zeros((len(c), 4))
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha) # change in place
return np.asarray(result, np.float_)
colorConverter = ColorConverter()
def makeMappingArray(N, data):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:,0]
y0 = adata[:,1]
y1 = adata[:,2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x)-x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N-1)
lut = np.zeros((N,), np.float)
xind = np.arange(float(N))
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = ( ((xind[1:-1] - x[ind-1]) / (x[ind] - x[ind-1]))
* (y0[ind] - y1[ind-1]) + y1[ind-1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap:
"""Base class for all scalar to rgb mappings
Important methods:
* :meth:`set_bad`
* :meth:`set_under`
* :meth:`set_over`
"""
def __init__(self, name, N=256):
"""
Public class attributes:
:attr:`N` : number of rgb quantization levels
:attr:`name` : name of colormap
"""
self.name = name
self.N = N
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = N
self._i_over = N+1
self._i_bad = N+2
self._isinit = False
def __call__(self, X, alpha=1.0, bytes=False):
"""
*X* is either a scalar or an array (of any dimension).
If scalar, a tuple of rgba values is returned, otherwise
an array with the new shape = oldshape+(4,). If the X-values
are integers, then they are used as indices into the array.
If they are floating point, then they must be in the
interval (0.0, 1.0).
Alpha must be a scalar.
If bytes is False, the rgba values will be floats on a
0-1 scale; if True, they will be uint8, 0-255.
"""
if not self._isinit: self._init()
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
self._lut[:-3, -1] = alpha
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.asarray(X)
xa = xma.filled(0)
mask_bad = ma.getmask(xma)
if xa.dtype.char in np.typecodes['Float']:
np.putmask(xa, xa==1.0, 0.9999999) #Treat 1.0 as slightly less than 1.
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
if NP_CLIP_OUT:
np.clip(xa * self.N, -1, self.N, out=xa)
else:
xa = np.clip(xa * self.N, -1, self.N)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
np.putmask(xa, xa>self.N-1, self._i_over)
np.putmask(xa, xa<0, self._i_under)
if mask_bad is not None and mask_bad.shape == xa.shape:
np.putmask(xa, mask_bad, self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut
rgba = np.empty(shape=xa.shape+(4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0,:])
return rgba
def set_bad(self, color = 'k', alpha = 1.0):
'''Set color to be used for masked values.
'''
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_under(self, color = 'k', alpha = 1.0):
'''Set color to be used for low out-of-range values.
Requires norm.clip = False
'''
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_over(self, color = 'k', alpha = 1.0):
'''Set color to be used for high out-of-range values.
Requires norm.clip = False
'''
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N-1]
self._lut[self._i_bad] = self._rgba_bad
def _init():
'''Generate the lookup table, self._lut'''
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit: self._init()
return (np.alltrue(self._lut[:,0] == self._lut[:,1])
and np.alltrue(self._lut[:,0] == self._lut[:,2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:func:`makeMappingArray`
"""
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(self.N, self._segmentdata['red'])
self._lut[:-3, 1] = makeMappingArray(self.N, self._segmentdata['green'])
self._lut[:-3, 2] = makeMappingArray(self.N, self._segmentdata['blue'])
self._isinit = True
self._set_extremes()
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name = 'from_list', N = None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 floating point array (*N* rgb values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try: gray = float(self.colors)
except TypeError: pass
else: self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgb = np.array([colorConverter.to_rgb(c)
for c in self.colors], np.float)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3, :-1] = rgb
self._lut[:-3, -1] = 1
self._isinit = True
self._set_extremes()
class Normalize:
"""
Normalize a given value to the 0-1 range
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
self.vmin = ma.minimum(A)
self.vmax = ma.maximum(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None: self.vmin = ma.minimum(A)
if self.vmax is None: self.vmax = ma.maximum(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin<=0:
raise ValueError("values must all be positive")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax/vmin), val)
else:
return vmin * pow((vmax/vmin), value)
class BoundaryNorm(Normalize):
'''
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
'''
def __init__(self, boundaries, ncolors, clip=False):
'''
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
'''
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N-1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax+1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx>=b] = i
if self._interp:
iret = (iret * (float(self.Ncmap-1)/(self.N-2))).astype(np.int16)
iret[xx<self.vmin] = -1
iret[xx>=self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
'''
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
'''
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = Normalize
no_norm = NoNorm
| agpl-3.0 |
wackymaster/QTClock | Libraries/numpy/lib/recfunctions.py | 148 | 35012 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| mit |
jangorecki/h2o-3 | h2o-py/tests/testdir_scikit_grid/pyunit_scal_pca_rf_grid.py | 4 | 1831 | from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def scale_pca_rf_pipe():
from h2o.transforms.preprocessing import H2OScaler
from h2o.transforms.decomposition import H2OPCA
from h2o.estimators.random_forest import H2ORandomForestEstimator
from sklearn.pipeline import Pipeline
from sklearn.grid_search import RandomizedSearchCV
from h2o.cross_validation import H2OKFold
from h2o.model.regression import h2o_r2_score
from sklearn.metrics.scorer import make_scorer
from scipy.stats import randint
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris_wheader.csv"))
# build transformation pipeline using sklearn's Pipeline and H2O transforms
pipe = Pipeline([("standardize", H2OScaler()),
("pca", H2OPCA()),
("rf", H2ORandomForestEstimator())])
params = {"standardize__center": [True, False], # Parameters to test
"standardize__scale": [True, False],
"pca__k": randint(2, iris[1:].shape[1]),
"rf__ntrees": randint(50,60),
"rf__max_depth": randint(4,8),
"rf__min_rows": randint(5,10),}
custom_cv = H2OKFold(iris, n_folds=5, seed=42)
random_search = RandomizedSearchCV(pipe, params,
n_iter=5,
scoring=make_scorer(h2o_r2_score),
cv=custom_cv,
random_state=42,
n_jobs=1)
random_search.fit(iris[1:],iris[0])
print(random_search.best_estimator_)
if __name__ == "__main__":
pyunit_utils.standalone_test(scale_pca_rf_pipe)
else:
scale_pca_rf_pipe()
| apache-2.0 |
wzbozon/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
wanggang3333/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
akrherz/idep | scripts/gridorder2/strips_dumper.py | 2 | 1653 | """Generate the requested output."""
from pyiem.dep import read_env
from pyiem.util import get_dbconn
from tqdm import tqdm
import pandas as pd
from pandas.io.sql import read_sql
HUCS = (
"070801050307 070801050702 071000070104 102300030203 102400030406 "
"102400030602 102802010203"
).split()
def get_flowpath_lengths(pgconn):
"""Load some metadata."""
df = read_sql(
"SELECT scenario, huc_12, fpath, bulk_slope,"
"ST_LENGTH(geom) as len from flowpaths "
"WHERE scenario = 95 and huc_12 in %s"
"ORDER by scenario ASC, huc_12 ASC",
pgconn,
params=(tuple(HUCS),),
index_col=None,
)
df["useme"] = True
return df
def compute_daily(df):
"""Compute daily 2008-2020 totals."""
envs = []
for _, row in tqdm(df.iterrows(), total=len(df.index)):
envfn = (
f"/i/{row['scenario']}/env/{row['huc_12'][:8]}/"
f"{row['huc_12'][8:]}/{row['huc_12']}_{row['fpath']}.env"
)
envdf = read_env(envfn)
envdf["scenario"] = row["scenario"]
envdf["huc_12"] = row["huc_12"]
envdf["flowpath"] = row["fpath"]
envdf["delivery"] = envdf["sed_del"] / row["len"]
envs.append(envdf)
# We now have dataframe with yearly flowpath totals
envdf = pd.concat(envs)
return envdf
def main():
"""Do great things."""
pgconn = get_dbconn("idep")
df = get_flowpath_lengths(pgconn)
result_full = compute_daily(df)
with pd.ExcelWriter("daily.xlsx") as writer:
result_full.to_excel(writer, "Single Flowpath-daily", index=False)
if __name__ == "__main__":
main()
| mit |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/pandas/tools/pivot.py | 4 | 13460 | # pylint: disable=E1103
import warnings
from pandas import Series, DataFrame
from pandas.core.index import MultiIndex, Index
from pandas.core.groupby import Grouper
from pandas.tools.merge import concat
from pandas.tools.util import cartesian_product
from pandas.compat import range, lrange, zip
from pandas.util.decorators import deprecate_kwarg
from pandas import compat
import pandas.core.common as com
import numpy as np
def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
fill_value=None, margins=False, dropna=True):
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in the
pivot table will be stored in MultiIndex objects (hierarchical indexes) on
the index and columns of the result DataFrame
Parameters
----------
data : DataFrame
values : column to aggregate, optional
index : a column, Grouper, array which has the same length as data, or list of them.
Keys to group by on the pivot table index.
If an array is passed, it is being used as the same manner as column values.
columns : a column, Grouper, array which has the same length as data, or list of them.
Keys to group by on the pivot table column.
If an array is passed, it is being used as the same manner as column values.
aggfunc : function, default numpy.mean, or list of functions
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names (inferred
from the function objects themselves)
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
Examples
--------
>>> df
A B C D
0 foo one small 1
1 foo one large 2
2 foo one large 2
3 foo two small 3
4 foo two small 3
5 bar one large 4
6 bar one small 5
7 bar two small 6
8 bar two large 7
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
small large
foo one 1 4
two 6 NaN
bar one 5 4
two 6 7
Returns
-------
table : DataFrame
"""
index = _convert_by(index)
columns = _convert_by(columns)
if isinstance(aggfunc, list):
pieces = []
keys = []
for func in aggfunc:
table = pivot_table(data, values=values, index=index, columns=columns,
fill_value=fill_value, aggfunc=func,
margins=margins)
pieces.append(table)
keys.append(func.__name__)
return concat(pieces, keys=keys, axis=1)
keys = index + columns
values_passed = values is not None
if values_passed:
if isinstance(values, (list, tuple)):
values_multi = True
else:
values_multi = False
values = [values]
else:
values = list(data.columns.drop(keys))
if values_passed:
to_filter = []
for x in keys + values:
if isinstance(x, Grouper):
x = x.key
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
grouped = data.groupby(keys)
agged = grouped.agg(aggfunc)
table = agged
if table.index.nlevels > 1:
to_unstack = [agged.index.names[i] or i
for i in range(len(index), len(keys))]
table = agged.unstack(to_unstack)
if not dropna:
try:
m = MultiIndex.from_arrays(cartesian_product(table.index.levels))
table = table.reindex_axis(m, axis=0)
except AttributeError:
pass # it's a single level
try:
m = MultiIndex.from_arrays(cartesian_product(table.columns.levels))
table = table.reindex_axis(m, axis=1)
except AttributeError:
pass # it's a single level or a series
if isinstance(table, DataFrame):
if isinstance(table.columns, MultiIndex):
table = table.sortlevel(axis=1)
else:
table = table.sort_index(axis=1)
if fill_value is not None:
table = table.fillna(value=fill_value, downcast='infer')
if margins:
table = _add_margins(table, data, values, rows=index,
cols=columns, aggfunc=aggfunc)
# discard the top level
if values_passed and not values_multi:
table = table[values[0]]
if len(index) == 0 and len(columns) > 0:
table = table.T
return table
DataFrame.pivot_table = pivot_table
def _add_margins(table, data, values, rows, cols, aggfunc):
grand_margin = _compute_grand_margin(data, values, aggfunc)
if not values and isinstance(table, Series):
# If there are no values and the table is a series, then there is only
# one column in the data. Compute grand margin and return it.
row_key = ('All',) + ('',) * (len(rows) - 1) if len(rows) > 1 else 'All'
return table.append(Series({row_key: grand_margin['All']}))
if values:
marginal_result_set = _generate_marginal_results(table, data, values, rows, cols, aggfunc, grand_margin)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
else:
marginal_result_set = _generate_marginal_results_without_values(table, data, rows, cols, aggfunc)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
key = ('All',) + ('',) * (len(rows) - 1) if len(rows) > 1 else 'All'
row_margin = row_margin.reindex(result.columns)
# populate grand margin
for k in margin_keys:
if isinstance(k, compat.string_types):
row_margin[k] = grand_margin[k]
else:
row_margin[k] = grand_margin[k[0]]
margin_dummy = DataFrame(row_margin, columns=[key]).T
row_names = result.index.names
result = result.append(margin_dummy)
result.index.names = row_names
return result
def _compute_grand_margin(data, values, aggfunc):
if values:
grand_margin = {}
for k, v in data[values].iteritems():
try:
if isinstance(aggfunc, compat.string_types):
grand_margin[k] = getattr(v, aggfunc)()
elif isinstance(aggfunc, dict):
if isinstance(aggfunc[k], compat.string_types):
grand_margin[k] = getattr(v, aggfunc[k])()
else:
grand_margin[k] = aggfunc[k](v)
else:
grand_margin[k] = aggfunc(v)
except TypeError:
pass
return grand_margin
else:
return {'All': aggfunc(data.index)}
def _generate_marginal_results(table, data, values, rows, cols, aggfunc, grand_margin):
if len(cols) > 0:
# need to "interleave" the margins
table_pieces = []
margin_keys = []
def _all_key(key):
return (key, 'All') + ('',) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows + values].groupby(rows).agg(aggfunc)
cat_axis = 1
for key, piece in table.groupby(level=0, axis=cat_axis):
all_key = _all_key(key)
# we are going to mutate this, so need to copy!
piece = piece.copy()
piece[all_key] = margin[key]
table_pieces.append(piece)
margin_keys.append(all_key)
else:
margin = grand_margin
cat_axis = 0
for key, piece in table.groupby(level=0, axis=cat_axis):
all_key = _all_key(key)
table_pieces.append(piece)
table_pieces.append(Series(margin[key], index=[all_key]))
margin_keys.append(all_key)
result = concat(table_pieces, axis=cat_axis)
if len(rows) == 0:
return result
else:
result = table
margin_keys = table.columns
if len(cols) > 0:
row_margin = data[cols + values].groupby(cols).agg(aggfunc)
row_margin = row_margin.stack()
# slight hack
new_order = [len(cols)] + lrange(len(cols))
row_margin.index = row_margin.index.reorder_levels(new_order)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _generate_marginal_results_without_values(table, data, rows, cols, aggfunc):
if len(cols) > 0:
# need to "interleave" the margins
margin_keys = []
def _all_key():
if len(cols) == 1:
return 'All'
return ('All', ) + ('', ) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows].groupby(rows).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
else:
margin = data.groupby(level=0, axis=0).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
return result
else:
result = table
margin_keys = table.columns
if len(cols):
row_margin = data[cols].groupby(cols).apply(aggfunc)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _convert_by(by):
if by is None:
by = []
elif (np.isscalar(by) or isinstance(by, (np.ndarray, Index, Series, Grouper))
or hasattr(by, '__call__')):
by = [by]
else:
by = list(by)
return by
def crosstab(index, columns, values=None, rownames=None, colnames=None,
aggfunc=None, margins=False, dropna=True):
"""
Compute a simple cross-tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns
values : array-like, optional
Array of values to aggregate according to the factors
aggfunc : function, optional
If no values array is passed, computes a frequency table
rownames : sequence, default None
If passed, must match number of row arrays passed
colnames : sequence, default None
If passed, must match number of column arrays passed
margins : boolean, default False
Add row/column margins (subtotals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
Notes
-----
Any Series passed will have their name attributes used unless row or column
names for the cross-tabulation are specified
Examples
--------
>>> a
array([foo, foo, foo, foo, bar, bar,
bar, bar, foo, foo, foo], dtype=object)
>>> b
array([one, one, one, two, one, one,
one, two, two, two, one], dtype=object)
>>> c
array([dull, dull, shiny, dull, dull, shiny,
shiny, dull, shiny, shiny, shiny], dtype=object)
>>> crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
Returns
-------
crosstab : DataFrame
"""
index = com._maybe_make_list(index)
columns = com._maybe_make_list(columns)
rownames = _get_names(index, rownames, prefix='row')
colnames = _get_names(columns, colnames, prefix='col')
data = {}
data.update(zip(rownames, index))
data.update(zip(colnames, columns))
if values is None:
df = DataFrame(data)
df['__dummy__'] = 0
table = df.pivot_table('__dummy__', index=rownames, columns=colnames,
aggfunc=len, margins=margins, dropna=dropna)
return table.fillna(0).astype(np.int64)
else:
data['__dummy__'] = values
df = DataFrame(data)
table = df.pivot_table('__dummy__', index=rownames, columns=colnames,
aggfunc=aggfunc, margins=margins, dropna=dropna)
return table
def _get_names(arrs, names, prefix='row'):
if names is None:
names = []
for i, arr in enumerate(arrs):
if isinstance(arr, Series) and arr.name is not None:
names.append(arr.name)
else:
names.append('%s_%d' % (prefix, i))
else:
if len(names) != len(arrs):
raise AssertionError('arrays and names must have the same length')
if not isinstance(names, list):
names = list(names)
return names
| gpl-2.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/test_strings.py | 6 | 108539 | # -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import (Index, Series, DataFrame, isnull, MultiIndex, notnull)
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isnull(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected] some text [email protected]',
'[email protected] some text [email protected] and [email protected]',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notnull(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notnull(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['Wes McKinney', 'Travis Oliphant'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels, 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels, 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels, 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels, 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2),
(3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isnull(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
xp = Series(['oof', NA, 'rab', NA, NA, NA, NA, NA])
# unicode
values = Series([u('aafootwo'), u('aabartwo'), NA, u('aabazqux')])
result = values.str.slice(2, 5)
exp = Series([u('foo'), u('bar'), NA, u('baz')])
tm.assert_series_equal(result, exp)
result = values.str.slice(0, -1, 2)
exp = Series([u('afow'), u('abrw'), NA, u('abzu')])
tm.assert_series_equal(result, exp)
def test_slice_replace(self):
values = Series(['short', 'a bit longer', 'evenlongerthanthat', '', NA
])
exp = Series(['shrt', 'a it longer', 'evnlongerthanthat', '', NA])
result = values.str.slice_replace(2, 3)
tm.assert_series_equal(result, exp)
exp = Series(['shzrt', 'a zit longer', 'evznlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 3, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 1, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shorz', 'a bit longez', 'evenlongerthanthaz', 'z', NA])
result = values.str.slice_replace(-1, None, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'zer', 'zat', 'z', NA])
result = values.str.slice_replace(None, -2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shortz', 'a bit znger', 'evenlozerthanthat', 'z', NA])
result = values.str.slice_replace(6, 8, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'a zit longer', 'evenlongzerthanthat', 'z', NA])
result = values.str.slice_replace(-10, 3, 'z')
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip(self):
values = Series([' aa ', ' bb \n', NA, 'cc '])
result = values.str.strip()
exp = Series(['aa', 'bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series(['aa ', 'bb \n', NA, 'cc '])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([' aa', ' bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_mixed(self):
# mixed
mixed = Series([' aa ', NA, ' bb \t\n', True, datetime.today(), None,
1, 2.])
rs = Series(mixed).str.strip()
xp = Series(['aa', NA, 'bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
# unicode
values = Series([u(' aa '), u(' bb \n'), NA, u('cc ')])
result = values.str.strip()
exp = Series([u('aa'), u('bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series([u('aa '), u('bb \n'), NA, u('cc ')])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([u(' aa'), u(' bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_args(self):
values = Series(['xxABCxx', 'xx BNSD', 'LDFJH xx'])
rs = values.str.strip('x')
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip('x')
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip('x')
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_strip_lstrip_rstrip_args_unicode(self):
values = Series([u('xxABCxx'), u('xx BNSD'), u('LDFJH xx')])
rs = values.str.strip(u('x'))
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip(u('x'))
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip(u('x'))
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_wrap(self):
# test values are: two words less than width, two words equal to width,
# two words greater than width, one word less than width, one word
# equal to width, one word greater than width, multiple tokens with
# trailing whitespace equal to width
values = Series([u('hello world'), u('hello world!'), u(
'hello world!!'), u('abcdefabcde'), u('abcdefabcdef'), u(
'abcdefabcdefa'), u('ab ab ab ab '), u('ab ab ab ab a'), u(
'\t')])
# expected values
xp = Series([u('hello world'), u('hello world!'), u('hello\nworld!!'),
u('abcdefabcde'), u('abcdefabcdef'), u('abcdefabcdef\na'),
u('ab ab ab ab'), u('ab ab ab ab\na'), u('')])
rs = values.str.wrap(12, break_long_words=True)
assert_series_equal(rs, xp)
# test with pre and post whitespace (non-unicode), NaN, and non-ascii
# Unicode
values = Series([' pre ', np.nan, u('\xac\u20ac\U00008000 abadcafe')
])
xp = Series([' pre', NA, u('\xac\u20ac\U00008000 ab\nadcafe')])
rs = values.str.wrap(6)
assert_series_equal(rs, xp)
def test_get(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.get(1)
expected = Series(['b', 'd', np.nan, 'g'])
tm.assert_series_equal(result, expected)
# mixed
mixed = Series(['a_b_c', NA, 'c_d_e', True, datetime.today(), None, 1,
2.])
rs = Series(mixed).str.split('_').str.get(1)
xp = Series(['b', NA, 'd', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.get(1)
expected = Series([u('b'), u('d'), np.nan, u('g')])
tm.assert_series_equal(result, expected)
def test_more_contains(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA,
'CABA', 'dog', 'cat'])
result = s.str.contains('a')
expected = Series([False, False, False, True, True, False, np.nan,
False, False, True])
assert_series_equal(result, expected)
result = s.str.contains('a', case=False)
expected = Series([True, False, False, True, True, False, np.nan, True,
False, True])
assert_series_equal(result, expected)
result = s.str.contains('Aa')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba', case=False)
expected = Series([False, False, False, True, True, False, np.nan,
True, False, False])
assert_series_equal(result, expected)
def test_contains_nan(self):
# PR #14171
s = Series([np.nan, np.nan, np.nan], dtype=np.object_)
result = s.str.contains('foo', na=False)
expected = Series([False, False, False], dtype=np.bool_)
assert_series_equal(result, expected)
result = s.str.contains('foo', na=True)
expected = Series([True, True, True], dtype=np.bool_)
assert_series_equal(result, expected)
result = s.str.contains('foo', na="foo")
expected = Series(["foo", "foo", "foo"], dtype=np.object_)
assert_series_equal(result, expected)
result = s.str.contains('foo')
expected = Series([np.nan, np.nan, np.nan], dtype=np.object_)
assert_series_equal(result, expected)
def test_more_replace(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA, 'CABA',
'dog', 'cat'])
result = s.str.replace('A', 'YYY')
expected = Series(['YYY', 'B', 'C', 'YYYaba', 'Baca', '', NA,
'CYYYBYYY', 'dog', 'cat'])
assert_series_equal(result, expected)
result = s.str.replace('A', 'YYY', case=False)
expected = Series(['YYY', 'B', 'C', 'YYYYYYbYYY', 'BYYYcYYY', '', NA,
'CYYYBYYY', 'dog', 'cYYYt'])
assert_series_equal(result, expected)
result = s.str.replace('^.a|dog', 'XX-XX ', case=False)
expected = Series(['A', 'B', 'C', 'XX-XX ba', 'XX-XX ca', '', NA,
'XX-XX BA', 'XX-XX ', 'XX-XX t'])
assert_series_equal(result, expected)
def test_string_slice_get_syntax(self):
s = Series(['YYY', 'B', 'C', 'YYYYYYbYYY', 'BYYYcYYY', NA, 'CYYYBYYY',
'dog', 'cYYYt'])
result = s.str[0]
expected = s.str.get(0)
assert_series_equal(result, expected)
result = s.str[:3]
expected = s.str.slice(stop=3)
assert_series_equal(result, expected)
result = s.str[2::-1]
expected = s.str.slice(start=2, step=-1)
assert_series_equal(result, expected)
def test_string_slice_out_of_bounds(self):
s = Series([(1, 2), (1, ), (3, 4, 5)])
result = s.str[1]
expected = Series([2, np.nan, 4])
assert_series_equal(result, expected)
s = Series(['foo', 'b', 'ba'])
result = s.str[1]
expected = Series(['o', np.nan, 'a'])
assert_series_equal(result, expected)
def test_match_findall_flags(self):
data = {'Dave': '[email protected]',
'Steve': '[email protected]',
'Rob': '[email protected]',
'Wes': np.nan}
data = Series(data)
pat = r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})'
result = data.str.extract(pat, flags=re.IGNORECASE, expand=True)
assert result.iloc[0].tolist() == ['dave', 'google', 'com']
result = data.str.match(pat, flags=re.IGNORECASE)
assert result[0]
result = data.str.findall(pat, flags=re.IGNORECASE)
assert result[0][0] == ('dave', 'google', 'com')
result = data.str.count(pat, flags=re.IGNORECASE)
assert result[0] == 1
with tm.assert_produces_warning(UserWarning):
result = data.str.contains(pat, flags=re.IGNORECASE)
assert result[0]
def test_encode_decode(self):
base = Series([u('a'), u('b'), u('a\xe4')])
series = base.str.encode('utf-8')
f = lambda x: x.decode('utf-8')
result = series.str.decode('utf-8')
exp = series.map(f)
tm.assert_series_equal(result, exp)
def test_encode_decode_errors(self):
encodeBase = Series([u('a'), u('b'), u('a\x9d')])
pytest.raises(UnicodeEncodeError, encodeBase.str.encode, 'cp1252')
f = lambda x: x.encode('cp1252', 'ignore')
result = encodeBase.str.encode('cp1252', 'ignore')
exp = encodeBase.map(f)
tm.assert_series_equal(result, exp)
decodeBase = Series([b'a', b'b', b'a\x9d'])
pytest.raises(UnicodeDecodeError, decodeBase.str.decode, 'cp1252')
f = lambda x: x.decode('cp1252', 'ignore')
result = decodeBase.str.decode('cp1252', 'ignore')
exp = decodeBase.map(f)
tm.assert_series_equal(result, exp)
def test_normalize(self):
values = ['ABC', u'ABC', u'123', np.nan, u'アイエ']
s = Series(values, index=['a', 'b', 'c', 'd', 'e'])
normed = [u'ABC', u'ABC', u'123', np.nan, u'アイエ']
expected = Series(normed, index=['a', 'b', 'c', 'd', 'e'])
result = s.str.normalize('NFKC')
tm.assert_series_equal(result, expected)
expected = Series([u'ABC', u'ABC', u'123', np.nan, u'アイエ'],
index=['a', 'b', 'c', 'd', 'e'])
result = s.str.normalize('NFC')
tm.assert_series_equal(result, expected)
with tm.assert_raises_regex(ValueError,
"invalid normalization form"):
s.str.normalize('xxx')
s = Index([u'ABC', u'123', u'アイエ'])
expected = Index([u'ABC', u'123', u'アイエ'])
result = s.str.normalize('NFKC')
tm.assert_index_equal(result, expected)
def test_cat_on_filtered_index(self):
df = DataFrame(index=MultiIndex.from_product(
[[2011, 2012], [1, 2, 3]], names=['year', 'month']))
df = df.reset_index()
df = df[df.month > 1]
str_year = df.year.astype('str')
str_month = df.month.astype('str')
str_both = str_year.str.cat(str_month, sep=' ')
assert str_both.loc[1] == '2011 2'
str_multiple = str_year.str.cat([str_month, str_month], sep=' ')
assert str_multiple.loc[1] == '2011 2 2'
def test_str_cat_raises_intuitive_error(self):
# https://github.com/pandas-dev/pandas/issues/11334
s = Series(['a', 'b', 'c', 'd'])
message = "Did you mean to supply a `sep` keyword?"
with tm.assert_raises_regex(ValueError, message):
s.str.cat('|')
with tm.assert_raises_regex(ValueError, message):
s.str.cat(' ')
def test_index_str_accessor_visibility(self):
from pandas.core.strings import StringMethods
if not compat.PY3:
cases = [(['a', 'b'], 'string'), (['a', u('b')], 'mixed'),
([u('a'), u('b')], 'unicode'),
(['a', 'b', 1], 'mixed-integer'),
(['a', 'b', 1.3], 'mixed'),
(['a', 'b', 1.3, 1], 'mixed-integer'),
(['aa', datetime(2011, 1, 1)], 'mixed')]
else:
cases = [(['a', 'b'], 'string'), (['a', u('b')], 'string'),
([u('a'), u('b')], 'string'),
(['a', 'b', 1], 'mixed-integer'),
(['a', 'b', 1.3], 'mixed'),
(['a', 'b', 1.3, 1], 'mixed-integer'),
(['aa', datetime(2011, 1, 1)], 'mixed')]
for values, tp in cases:
idx = Index(values)
assert isinstance(Series(values).str, StringMethods)
assert isinstance(idx.str, StringMethods)
assert idx.inferred_type == tp
for values, tp in cases:
idx = Index(values)
assert isinstance(Series(values).str, StringMethods)
assert isinstance(idx.str, StringMethods)
assert idx.inferred_type == tp
cases = [([1, np.nan], 'floating'),
([datetime(2011, 1, 1)], 'datetime64'),
([timedelta(1)], 'timedelta64')]
for values, tp in cases:
idx = Index(values)
message = 'Can only use .str accessor with string values'
with tm.assert_raises_regex(AttributeError, message):
Series(values).str
with tm.assert_raises_regex(AttributeError, message):
idx.str
assert idx.inferred_type == tp
# MultiIndex has mixed dtype, but not allow to use accessor
idx = MultiIndex.from_tuples([('a', 'b'), ('a', 'b')])
assert idx.inferred_type == 'mixed'
message = 'Can only use .str accessor with Index, not MultiIndex'
with tm.assert_raises_regex(AttributeError, message):
idx.str
def test_str_accessor_no_new_attributes(self):
# https://github.com/pandas-dev/pandas/issues/10673
s = Series(list('aabbcde'))
with tm.assert_raises_regex(AttributeError,
"You cannot add any new attribute"):
s.str.xlabel = "a"
def test_method_on_bytes(self):
lhs = Series(np.array(list('abc'), 'S1').astype(object))
rhs = Series(np.array(list('def'), 'S1').astype(object))
if compat.PY3:
pytest.raises(TypeError, lhs.str.cat, rhs)
else:
result = lhs.str.cat(rhs)
expected = Series(np.array(
['ad', 'be', 'cf'], 'S2').astype(object))
tm.assert_series_equal(result, expected)
| mit |
mhue/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
yunfuliu/matplot-opencv | matplot-opencv/hist.py | 1 | 1328 | """
Demo of the histogram (hist) function with a few features.
In addition to the basic histogram, this demo shows a few optional features:
* Setting the number of data bins
* The ``normed`` flag, which normalizes bin heights so that the integral of
the histogram is 1. The resulting histogram is a probability density.
* Setting the face color of the bars
* Setting the opacity (alpha value).
"""
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
def runhist(fname,num_bins):
# load data
f = open(fname,'r')
s = f.readline();
s = s.split()
for i in range(0,len(s)):
s[i]=float(s[i])
# data
mu = np.mean(s) # mean of distribution
sigma = np.std(s) # deviation of distribution
# the histogram of the data
n, bins, patches = plt.hist(s, num_bins, normed=1, facecolor='green', alpha=0.5)
# add a 'best fit' line
y = mlab.normpdf(bins, mu, sigma)
# plot
plt.plot(bins, y, 'r--')
plt.xlabel('x axis')
plt.ylabel('y axis')
titles = 'Histogram : $\mu='+str(mu)+'$, $\sigma='+str(sigma)+'$'
plt.title(titles)
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.show()
if __name__ == '__main__':
runhist('vec.txt',30)
| mit |
tswast/google-cloud-python | bigquery_storage/tests/system/test_reader_dataframe.py | 2 | 3460 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""System tests for reading rows with pandas connector."""
import numpy
import pyarrow.types
import pytest
from google.cloud import bigquery_storage_v1beta1
def test_read_rows_to_arrow(client, project_id):
table_ref = bigquery_storage_v1beta1.types.TableReference()
table_ref.project_id = "bigquery-public-data"
table_ref.dataset_id = "new_york_citibike"
table_ref.table_id = "citibike_stations"
read_options = bigquery_storage_v1beta1.types.TableReadOptions()
read_options.selected_fields.append("station_id")
read_options.selected_fields.append("latitude")
read_options.selected_fields.append("longitude")
read_options.selected_fields.append("name")
session = client.create_read_session(
table_ref,
"projects/{}".format(project_id),
format_=bigquery_storage_v1beta1.enums.DataFormat.ARROW,
read_options=read_options,
requested_streams=1,
)
stream_pos = bigquery_storage_v1beta1.types.StreamPosition(
stream=session.streams[0]
)
tbl = client.read_rows(stream_pos).to_arrow(session)
assert tbl.num_columns == 4
schema = tbl.schema
# Use field_by_name because the order doesn't currently match that of
# selected_fields.
assert pyarrow.types.is_int64(schema.field_by_name("station_id").type)
assert pyarrow.types.is_float64(schema.field_by_name("latitude").type)
assert pyarrow.types.is_float64(schema.field_by_name("longitude").type)
assert pyarrow.types.is_string(schema.field_by_name("name").type)
@pytest.mark.parametrize(
"data_format,expected_schema_type",
(
(bigquery_storage_v1beta1.enums.DataFormat.AVRO, "avro_schema"),
(bigquery_storage_v1beta1.enums.DataFormat.ARROW, "arrow_schema"),
),
)
def test_read_rows_to_dataframe(client, project_id, data_format, expected_schema_type):
table_ref = bigquery_storage_v1beta1.types.TableReference()
table_ref.project_id = "bigquery-public-data"
table_ref.dataset_id = "new_york_citibike"
table_ref.table_id = "citibike_stations"
session = client.create_read_session(
table_ref,
"projects/{}".format(project_id),
format_=data_format,
requested_streams=1,
)
schema_type = session.WhichOneof("schema")
assert schema_type == expected_schema_type
stream_pos = bigquery_storage_v1beta1.types.StreamPosition(
stream=session.streams[0]
)
frame = client.read_rows(stream_pos).to_dataframe(
session, dtypes={"latitude": numpy.float16}
)
# Station ID is a required field (no nulls), so the datatype should always
# be integer.
assert frame.station_id.dtype.name == "int64"
assert frame.latitude.dtype.name == "float16"
assert frame.longitude.dtype.name == "float64"
assert frame["name"].str.startswith("Central Park").any()
| apache-2.0 |
cmeresearch/epilepsymodeling | tipp3simulate.py | 1 | 6410 | import csv
import numpy as np
import bisect as b
import random
import networkx as nx
import matplotlib.pyplot as plt
from datetime import datetime
## This file is the main neural network simulation script
## Refractory period will be 5, delta time added on firing will be 3, duration of signal will be 10
## Queue entries are (simulation time, eventID, target neuron, signal)
## Neuron base class
class Neuron:
def __init__(self):
self.refractory = False
self.inputsum = 0
## Main simulation class
class Simulator:
def __init__(self, numneurons):
self.numberofneurons = numneurons
self.timestamp = datetime.now().strftime('%m-%d-%Y - %H.%M.%S')
self.neuralnet = np.zeros((numneurons, numneurons), dtype = int)
self.clock = 0
self.neuronarray = [Neuron() for i in range(numneurons)]
self.queue = []
self.simlog = open('simulationevents ' + self.timestamp + '.txt', 'w')
def fireneuron(self, numneurons, neuronindex):
for i in range(numneurons):
if self.neuralnet[neuronindex][i] != 0:
b.insort_right(self.queue, (self.clock + 3, 1, i, self.neuralnet[neuronindex][i]))
b.insort_right(self.queue, (self.clock + 3 + 10, 2, i, self.neuralnet[neuronindex][i]))
self.simlog.write('%i' % (neuronindex))
self.simlog.write(', at simulation time ')
self.simlog.write('%i' % (self.clock))
self.simlog.write('\n')
self.neuronarray[neuronindex].refractory = True
self.neuronarray[neuronindex].inputsum = 0
b.insort_right(self.queue, (self.clock + 5, 3, neuronindex, 0))
def startsignal(self, neuronindex, signal):
n = self.neuronarray[neuronindex]
n.inputsum = n.inputsum + signal
if (n.refractory == False and n.inputsum > 0):
self.fireneuron(self.numberofneurons, neuronindex)
def endsignal(self, neuronindex, signal):
n = self.neuronarray[neuronindex]
n.inputsum = n.inputsum - signal
if (n.refractory == False and n.inputsum > 0):
self.fireneuron(self.numberofneurons, neuronindex)
def exitrefractory(self, neuronindex):
n = self.neuronarray[neuronindex]
n.refractory = False
if n.inputsum > 0:
self.fireneuron(self.numberofneurons, neuronindex)
## Read the output file from the csvreader script, and create dictionaries for looking up index values and neuron types
matrix_size = 1 ## This will be used to determine the size of the initial matrix of zeros
indexdict = {}
tpyesdict = {}
print 'Reading neuron names, numbers, and types...'
with open('n_output.csv', 'rb') as neuronfile:
neuron_indices = np.genfromtxt(neuronfile, dtype = None, delimiter = ',', usecols = (0, 1))
indexdict = {k1:v1 for k1,v1 in neuron_indices}
matrix_size = len(neuron_indices) ## Determine size of neural net matrix
with open('n_output.csv', 'rb') as neuronfile:
neuron_types = np.genfromtxt(neuronfile, dtype = None, delimiter = ',', usecols = (1, 2))
typesdict = {k2:v2 for k2,v2 in neuron_types}
print 'Done'
print 'Generating neural network...'
## Create an instance of the Simulator class
sim = Simulator(matrix_size)
## Plug in connection data, using the index and type dictionaries and connectome.csv
with open('connectome.csv', 'rb') as connectomefile:
connectionarray = np.genfromtxt(connectomefile, dtype = None, delimiter = ';', usecols = (0, 1))
for i in range(len(connectionarray)):
presynaptic = connectionarray[i, 0]
presynapticindex = indexdict.get(presynaptic)
postsynaptic = connectionarray[i, 1]
postsynapticindex = indexdict.get(postsynaptic)
sim.neuralnet[presynapticindex, postsynapticindex] = typesdict.get(presynapticindex)
np.savetxt('neuralnet.csv', sim.neuralnet, fmt = '%+1d', delimiter = ', ', newline = '\n')
neurongraph = nx.from_numpy_matrix(sim.neuralnet)
pos = nx.random_layout(neurongraph)
nx.draw(neurongraph, pos, with_labels = True, node_size = 100, node_color = 'blue', width = 0.1, font_size = 6)
plt.savefig('neurongraph.png', dpi = 200)
print 'Done'
## To do: Prompt the user for max simulation duration and how many neurons to fire initially
## Current code should do this in theory, but Atom doesn't accept user input. Running in N++ or IDLE should allow uncommenting these input lines
#maxcount = int(input('Enter maximum simulation cycles: '))
maxcount = 1000000
print 'Maximum sim cycles set to ' + str(maxcount)
#initialneurons = int(input('How many neurons do you want to fire at simulation start? '))
initialneurons = 10
sim.simlog.write('Max sim cycles set to ')
sim.simlog.write('%i' % (maxcount))
sim.simlog.write('\n')
sim.simlog.write('Initializing by firing ')
sim.simlog.write('%i' % (initialneurons))
sim.simlog.write(' neurons')
sim.simlog.write('\n')
## Populate the queue with an initial set of randomized firing events, set the cycle counter to 0, and begin running the simulation
for i in range(initialneurons):
sim.fireneuron(matrix_size, random.randrange(matrix_size))
count = 0
print 'Beginning simulation by firing ' + str(initialneurons) + ' neurons'
## Simulation will stop running when we have either reached the user-specified maximum number of cycles or there are no more events in the queue
while (count < maxcount and len(sim.queue) > 0):
count += 1
currentevent = sim.queue.pop(0)
sim.clock = currentevent[0]
if currentevent[1] == 1:
sim.startsignal(currentevent[2], currentevent[3])
elif currentevent[1] == 2:
sim.endsignal(currentevent[2], currentevent[3])
elif currentevent[1] == 3:
sim.exitrefractory(currentevent[2])
else:
print 'Something has gone wrong during simulation'
sim.simlog.close()
if count >= maxcount:
sim.simlog.write('Simulation terminated because maximum number of cycles was reached')
print 'Maximum number of cycles reached'
if len(sim.queue) == 0:
sim.simlog.write('Simulation terminated because the simulation queue was empty')
sim.simlog.write('\n')
sim.simlog.write('Number of cycles run: ')
sim.simlog.write('%i' % (count))
print 'No more events left in the simulation queue'
sim.simlog.close()
print 'Simulation has finished running. Please see the simulationevents output text file for a log of which neurons fired'
| mit |
jmargeta/scikit-learn | sklearn/neighbors/base.py | 2 | 20935 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
#
# License: BSD, (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from scipy.spatial.ckdtree import cKDTree
from .ball_tree import BallTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..utils import safe_asarray, atleast2d_or_csr, check_arrays
from ..utils.fixes import unique
from ..externals import six
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
with np.errstate(divide='ignore'):
dist = 1. / dist
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
#FIXME: include float parameter p for using different distance metrics.
# this can be passed directly to BallTree and cKDTree. Brute-force will
# rely on soon-to-be-updated functionality in the pairwise module.
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, p=2):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
if algorithm not in ['auto', 'brute', 'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if p < 1:
raise ValueError("p must be greater than or equal to 1")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, cKDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = safe_asarray(X)
if X.ndim != 2:
raise ValueError("data type not understood")
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
self._fit_X = X.tocsr()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# BallTree outperforms the others in nearly any circumstance.
if self.n_neighbors is None:
self._fit_method = 'ball_tree'
elif self.n_neighbors < self._fit_X.shape[0] // 2:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'kd_tree':
self._tree = cKDTree(X, self.leaf_size)
elif self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size, p=self.p)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to point, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = atleast2d_or_csr(X)
if n_neighbors is None:
n_neighbors = self.n_neighbors
if self._fit_method == 'brute':
if self.p == 1:
dist = pairwise_distances(X, self._fit_X, 'manhattan')
elif self.p == 2:
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
elif self.p == np.inf:
dist = pairwise_distances(X, self._fit_X, 'chebyshev')
else:
dist = pairwise_distances(X, self._fit_X, 'minkowski',
p=self.p)
# XXX: should be implemented with a partial sort
neigh_ind = dist.argsort(axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
if return_distance:
j = np.arange(neigh_ind.shape[0])[:, None]
if self.p == 2:
return np.sqrt(dist[j, neigh_ind]), neigh_ind
else:
return dist[j, neigh_ind], neigh_ind
else:
return neigh_ind
elif self._fit_method == 'ball_tree':
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
return result
elif self._fit_method == 'kd_tree':
dist, ind = self._tree.query(X, n_neighbors, p=self.p)
# kd_tree returns a 1D array for n_neighbors = 1
if n_neighbors == 1:
dist = dist[:, None]
ind = ind[:, None]
if return_distance:
return dist, ind
else:
return ind
else:
raise ValueError("internal: _fit_method not recognized")
def kneighbors_graph(self, X, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
X = safe_asarray(X)
if n_neighbors is None:
n_neighbors = self.n_neighbors
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones((n_samples1, n_neighbors))
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
data, ind = self.kneighbors(X, n_neighbors + 1,
return_distance=True)
A_data, A_ind = data[:, 1:], ind[:, 1:]
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
return csr_matrix((A_data.ravel(), A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point or points
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the euclidean distances to each point,
only present if return_distance=True.
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construnct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.radius_neighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 1.5, 0.5]]...), array([[1, 2]]...)
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = atleast2d_or_csr(X)
if radius is None:
radius = self.radius
if self._fit_method == 'brute':
if self.p == 1:
dist = pairwise_distances(X, self._fit_X, 'manhattan')
elif self.p == 2:
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
elif self.p == np.inf:
dist = pairwise_distances(X, self._fit_X, 'chebyshev')
else:
dist = pairwise_distances(X, self._fit_X, 'minkowski',
p=self.p)
neigh_ind = [np.where(d < radius)[0] for d in dist]
# if there are the same number of neighbors for each point,
# we can do a normal array. Otherwise, we return an object
# array with elements that are numpy arrays
try:
neigh_ind = np.asarray(neigh_ind, dtype=int)
dtype_F = float
except ValueError:
neigh_ind = np.asarray(neigh_ind, dtype='object')
dtype_F = object
if return_distance:
if self.p == 2:
dist = np.array([np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)],
dtype=dtype_F)
else:
dist = np.array([d[neigh_ind[i]]
for i, d in enumerate(dist)],
dtype=dtype_F)
return dist, neigh_ind
else:
return neigh_ind
elif self._fit_method == 'ball_tree':
if return_distance:
ind, dist = self._tree.query_radius(X, radius,
return_distance=True)
return dist, ind
else:
ind = self._tree.query_radius(X, radius,
return_distance=False)
return ind
elif self._fit_method == 'kd_tree':
Npts = self._fit_X.shape[0]
dist, ind = self._tree.query(X, Npts,
distance_upper_bound=radius,
p=self.p)
ind = [ind_i[:ind_i.searchsorted(Npts)] for ind_i in ind]
# if there are the same number of neighbors for each point,
# we can do a normal array. Otherwise, we return an object
# array with elements that are numpy arrays
try:
ind = np.asarray(ind, dtype=int)
dtype_F = float
except ValueError:
ind = np.asarray(ind, dtype='object')
dtype_F = object
if return_distance:
dist = np.array([dist_i[:len(ind[i])]
for i, dist_i in enumerate(dist)],
dtype=dtype_F)
return dist, ind
else:
return ind
else:
raise ValueError("internal: _fit_method not recognized")
def radius_neighbors_graph(self, X, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
X = safe_asarray(X)
if radius is None:
radius = self.radius
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_neighbors = np.array([len(a) for a in A_ind])
n_nonzero = np.sum(n_neighbors)
if A_data is None:
A_data = np.ones(n_nonzero)
A_ind = np.concatenate(list(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, cKDTree}
Training data. If array or matrix, then the shape
is [n_samples, n_features]
y : {array-like, sparse matrix}, shape = [n_samples]
Target values, array of float values.
"""
X, y = check_arrays(X, y, sparse_format="csr")
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, cKDTree}
Training data. If array or matrix, then the shape
is [n_samples, n_features]
y : {array-like, sparse matrix}, shape = [n_samples]
Target values, array of integer values.
"""
X, y = check_arrays(X, y, sparse_format="csr")
self.classes_, self._y = unique(y, return_inverse=True)
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, cKDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/neighbors/nearest_centroid.py | 37 | 7348 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
from ..utils.multiclass import check_classification_targets
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to its members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| mit |
JLHulme/MhoPerformance | Mho_element_plot_vRf.py | 1 | 4614 | #Required imports
import math
import matplotlib.pyplot as plt
#Create time vector, in terms of samples, for a time period of 30cycles
length = 30 #cycles
sampleRate = 4 # samples per cycle
time = []
#create global definition for a
deg120 = (math.pi/180) * 120
a = math.cos(deg120)+math.sin(deg120)*1j
for x in range(sampleRate*length):
time.append(x/4.0)
#Define function for voltage memory
class V_Mem:
vMemN2 = complex(0+0j)
vMemN1 = complex(0+0j)
vMem = complex(0+0j)
def __init__(self, startVoltage):
self.vMem = startVoltage
self.vMemN2 = startVoltage
self.vMemN1 = startVoltage
def updateVoltage(self, currentVoltage):
self.vMemN2 = self.vMemN1
self.vMemN1 = self.vMem
self.vMem = (1.0/16.0)*currentVoltage + (15.0/16.0)*self.vMemN2 #+ instead of - as we dont update phasor
def getVoltage(self):
return self.vMem
#create a class for to use as a Mho object
class Phase_Mho:
#v1Mem = V_Mem
#Z1L
#mho
def __init__(self, initialV1Mem, lineZ1):
self.v1Mem = V_Mem(initialV1Mem)
self.Z1L = lineZ1
def update(self, V1Fault, IA, IB):
#fault values = [v1Mem, IA, IB]
self.v1Mem.updateVoltage(V1Fault)
#print(faultValues)
currentV1Mem = self.v1Mem.getVoltage()
#print(currentV1Mem)
VA = V1Fault #V1F
VB = (a**2) * V1Fault #V1F@-120
VAB = VA - VB
IAB = IA - IB
VPolA = currentV1Mem
VPolB = currentV1Mem * (a**2)
VPolAB = VPolA - VPolB
#print(VAB)
#print(VA)
#print(VB)
torqNum = VAB * VPolAB.conjugate()
#print(torqNum.real)
torqDen = VPolAB.conjugate() * IAB * (self.Z1L / abs(self.Z1L))
#print(torqDen.real)
self.mho = torqNum.real / torqDen.real
#print(self.mho)
def getMho(self):
return self.mho
#Simulation vlaues
#Prefault voltage
V1 = 230000/math.sqrt(3) #VLN
#Fault values:
IA = 568.2-2673j #2733@-78
IB = -2599+844.5j #2733@162
V1F = 4173-11464j #12200@-70
lineImp = 0.0843+0.817j #[email protected]
#in secondary values
PTR = 2000
CTR = 400
IA = IA / CTR
IB = IB / CTR
V1F = V1F / PTR
V1 = V1 / PTR
lineImp = lineImp * CTR / PTR
#create relay mho obj
rlyMho = Phase_Mho(V1, lineImp)
#simulate
rlyImpedance = []
rlySetting = []
zone2_setting = 0.53
for t in time:
rlyMho.update(V1F, IA, IB)
rlyImpedance.append(rlyMho.getMho())
rlySetting.append(zone2_setting)
###########################################RF=0.0
V1 = 230000/math.sqrt(3) #VLN
#Fault values:
IA = 288.8-2748j #
IB = -2524+1124j #
V1F = 2300-40j #
lineImp = 0.0843+0.817j #
#in secondary values
PTR = 2000
CTR = 400
IA = IA / CTR
IB = IB / CTR
V1F = V1F / PTR
V1 = V1 / PTR
lineImp = lineImp * CTR / PTR
#create relay mho obj
rlyMho = Phase_Mho(V1, lineImp)
#simulate
rlyImpedance_0 = []
for t in time:
rlyMho.update(V1F, IA, IB)
rlyImpedance_0.append(rlyMho.getMho())
###########################################RF=0.35
V1 = 230000/math.sqrt(3) #VLN
#Fault values:
IA = 430-2714j #
IB = -2565+985j #
V1F = 3170-6800j #
lineImp = 0.0843+0.817j #
#in secondary values
PTR = 2000
CTR = 400
IA = IA / CTR
IB = IB / CTR
V1F = V1F / PTR
V1 = V1 / PTR
lineImp = lineImp * CTR / PTR
#create relay mho obj
rlyMho = Phase_Mho(V1, lineImp)
#simulate
rlyImpedance_035 = []
for t in time:
rlyMho.update(V1F, IA, IB)
rlyImpedance_035.append(rlyMho.getMho())
###########################################RF=1
V1 = 230000/math.sqrt(3) #VLN
#Fault values:
IA = 699-2610j #
IB = -2610+699j #
V1F = 6479-18816j #
lineImp = 0.0843+0.817j #
#in secondary values
PTR = 2000
CTR = 400
IA = IA / CTR
IB = IB / CTR
V1F = V1F / PTR
V1 = V1 / PTR
lineImp = lineImp * CTR / PTR
#create relay mho obj
rlyMho = Phase_Mho(V1, lineImp)
#simulate
rlyImpedance_1 = []
for t in time:
rlyMho.update(V1F, IA, IB)
rlyImpedance_1.append(rlyMho.getMho())
#plot
plt.plot(time, rlyImpedance_1, 'k', label="RF=1")
plt.plot(time, rlyImpedance, 'b', label="RF=0.7")
plt.plot(time, rlyImpedance_035, 'r', label="RF=0.35")
plt.plot(time, rlyImpedance_0, 'g', label="RF=0")
plt.plot(time, rlySetting, 'r--', label="Trip Setting")
plt.axvline(3.5, color='k', linestyle='--', label="3.5cy")
plt.axvline(5.5, color='b', linestyle='--', label="5.5cy")
plt.xlabel('Time (cycles)')
plt.ylabel('Measured Impedance (Ohm sec)')
plt.legend(shadow=True, loc=1)
plt.show()
| bsd-2-clause |
yl2695/parserator | parserator/utils.py | 4 | 3758 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from sklearn.metrics import f1_score
from sklearn.base import BaseEstimator
from sklearn.grid_search import GridSearchCV
from training import get_data_sklearn_format
import pycrfsuite
def f1_with_flattening(estimator, X, y):
"""
Calculate F1 score by flattening the predictions of the
estimator across all sequences. For example, given the following
address sequences as input
['1 My str', '2 Your blvd'],
the predictions of the model will be flattened like so:
['AddressNumber', 'StreetName', 'StreetNamePostType', 'AddressNumber', 'StreetName', 'StreetNamePostType']
and compared to a similarly flattened gold standard labels. This calculates the overall
quality of the model across all sequences as opposed to how well it does
at any particular sequence.
:param X: list of sequences to tag
:param y: list of gold standard tuples
"""
predicted = estimator.predict(X)
flat_pred, flat_gold = [], []
for a, b in zip(predicted, y):
if len(a) == len(b):
flat_pred.extend(a)
flat_gold.extend(b)
return f1_score(flat_gold, flat_pred)
def get_data_sklearn_format(train_file_list, module):
"""
Parses the specified data files and returns it in sklearn format.
:param path:
:return: tuple of:
1) list of training sequences, each of which is a string
2) list of gold standard labels, each of which is a tuple
of strings, one for each token in the corresponding training
sequence
"""
data = list(readTrainingData(train_file_list, module.GROUP_LABEL))
random.shuffle(data)
x, y = [], []
for raw_string, components in data:
tokens, labels = zip(*components)
x.append(raw_string)
y.append(labels)
return x, y
class SequenceEstimator(BaseEstimator):
"""
A sklearn-compatible wrapper for a parser trainer
"""
def __init__(self, c1=1, c2=1, feature_minfreq=0):
"""
:param c1: L1 regularisation coefficient
:param c2: L2 regularisation coefficient
:param feature_minfreq: minimum feature frequency
:return:
"""
self.c1 = c1
self.c2 = c2
self.feature_minfreq = feature_minfreq
def fit(self, X, y, **params, model_path):
# sklearn requires parameters to be declared as fields of the estimator,
# an we can't have a full stop there. Replace with an underscore
params = {k.replace('_', '.'): v for k, v in self.__dict__.items()}
trainer = pycrfsuite.Trainer(verbose=False, params=params)
for raw_text, labels in zip(X, y):
tokens = tokenize(raw_text)
trainer.append(tokens2features(tokens), labels)
trainer.train(model_path)
reload(parserator)
def predict(self, X):
reload(parserator) # tagger object is defined at the module level, update now
predictions = []
for sequence in X:
predictions.append([foo[1] for foo in parserator.parse(sequence)])
return predictions
if __name__ == '__main__':
# refer to http://www.chokkan.org/software/crfsuite/manual.html
# for description of parameters
cv = GridSearchCV(SequenceEstimator(), {'c1': [10 ** x for x in range(-2, 2)],
'c2': [10 ** x for x in range(-2, 4)],
'feature_minfreq': [0, 3, 5]},
scoring=f1_with_flattening, verbose=5)
X, y = get_data_sklearn_format()
cv.fit(X, y)
print(cv.best_params_)
for foo in cv.grid_scores_:
print(foo)
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.