repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
madjelan/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
sisl/CustomerSim | src/kdd98_propagate_classifier.py | 1 | 3609 |
# TRAINING KDD1998 CLASSIFIER
from shared_functions import *
from net_designs import *
import os
from copy import deepcopy
import pandas as ps
import numpy as np
import random
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.metrics import roc_curve, auc
from keras.utils.np_utils import to_categorical
from keras.callbacks import ModelCheckpoint
# seed
RANDOM_SEED = 777
np.random.seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
# cupy.random.seed(RANDOM_SEED)
# LOAD DATA
print('Loading data')
data = ps.read_csv("../kdd98_data/kdd1998tuples.csv", header=None)
data.columns = ['customer','period','r0','f0','m0','ir0','if0','gender','age','income',
'zip_region','zip_la','zip_lo','a','rew','r1','f1','m1','ir1','if1',
'gender1','age1','income1','zip_region1','zip_la1','zip_lo1']
data['rew_ind'] = (data['rew'] > 0) * 1
data['age'][data['age'] == 0] = None
# Train and validate donation classifier
print('Preprocessing data')
customers = list(set(data['customer']))
train_samples = 100000
val_samples = 50000
test_samples = len(customers) - val_samples - train_samples
np.random.shuffle(customers)
train_customers = customers[0:train_samples]
val_customers = customers[train_samples:(train_samples+val_samples)]
test_customers = customers[(train_samples+val_samples):]
cols = ['r0','f0','m0','ir0','if0','gender','age','income','zip_region','a','rew','rew_ind']
train_data = data[data['customer'].isin(train_customers)][cols].fillna(0)
val_data = data[data['customer'].isin(val_customers)][cols].fillna(0)
test_data = data[data['customer'].isin(test_customers)][cols].fillna(0)
n_train = train_data.shape[0]
n_val = val_data.shape[0]
n_test = test_data.shape[0]
cols_X = ['r0','f0','m0','ir0','if0','gender','age','income','zip_region','a']
cols_Y = ['rew_ind']
x_train = train_data[cols_X].values.astype(np.float32)
y_train = train_data[cols_Y].values.astype(np.int32)
x_val = val_data[cols_X].values.astype(np.float32)
y_val = val_data[cols_Y].values.astype(np.int32)
x_test = test_data[cols_X].values.astype(np.float32)
y_test = test_data[cols_Y].values.astype(np.int32)
# DEFINE NEURAL NET
print('Training KDD98 neural net classifier')
n_epochs = 50
batch_size = 100
file_name = "../results/kdd98_propagation_classifier_best.h5"
# Define the kdd98 classifier model with Keras
model = KDDClassifier()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Callback to save the best model
checkpoint = ModelCheckpoint(file_name, monitor='val_loss', save_best_only=True, save_weights_only=True)
# Fit the model
model.fit(x_train, to_categorical(y_train), batch_size=batch_size, nb_epoch=n_epochs,
verbose=1, callbacks=[checkpoint], validation_data=(x_val, to_categorical(y_val)))
# model.load_weights(file_name)
# model.save_weights(file_name, overwrite=True)
score = model.evaluate(x_test, to_categorical(y_test), verbose=1)
print('Test Loss: '+ str(score[0]) + '; Test Accuracy: ' + str(score[1]))
# VALIDATE CLASSIFIER
print('Validating neural net classifier')
y_score = model.predict_proba(x_test)
roc(to_categorical(y_test),y_score,name="../results/kdd98_propagation_classifier_roc.pdf")
# TRAIN RANDOM FOREST
print('Training random forest classifier')
clf = RandomForestClassifier(n_estimators=100)
clf = clf.fit(x_train, y_train.ravel())
# VALIDATE RANDOM FOREST
print('Validating random forest classifier')
y_score = clf.predict_proba(x_test)
# SAVE ROC CURVE PLOT
roc(to_categorical(y_test),y_score,name="../results/kdd98_propagation_classifier_roc_rf.pdf")
| apache-2.0 |
arcyfelix/Machine-Learning-For-Trading | 34_correlation.py | 1 | 2422 | import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
''' Read: http://pandas.pydata.org/pandas-docs/stable/api.html#api-dataframe-stats '''
def symbol_to_path(symbol, base_dir = 'data'):
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def dates_creator():
start_date = '2009-01-01'
end_date = '2015-12-31'
dates = pd.date_range(start_date, end_date)
return dates
def get_data(symbols, dates):
df = pd.DataFrame(index = dates)
if 'SPY' not in symbols: # adding SPY as the main reference
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol),
index_col = 'Date',
parse_dates = True,
usecols = ['Date', 'Adj Close'],
na_values = ['nan'])
df_temp = df_temp.rename(columns = {'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY':
df = df.dropna(subset = ['SPY'])
print(df)
return df
def plot(df):
ax = df.plot(title = 'Stock prices', fontsize = 12)
ax.set_xlabel('Date')
ax.set_ylabel('Price')
plt.show()
def get_daily_returns(df):
daily_returns = df.copy()
# Calculating daily returns
daily_returns[1:] = (df / df.shift(1)) - 1
# Setting daily returns for row 0 to 0.
daily_returns.ix[0, :] = 0
return daily_returns
def show_scatter(df, x, y):
df.plot(kind = 'scatter', x= x, y= y)
beta, alpha = calculate_alpha_beta(df, x, y)
# Line -> beta * x + alpha for all values of x
plt.plot(df[x], beta * df[x] + alpha, '-', color = 'r')
plt.show()
print('Beta for', y + ':')
print(beta)
print('Alpha for', y + ':')
print(alpha)
def calculate_alpha_beta(df, x, y):
beta, alpha = np.polyfit(df[x], df[y] , 1) # First order polynomial = 1
return beta, alpha
def calculate_correlation(df):
'''Calculating correlation using the most common method - > pearson.'''
print(df.corr(method = 'pearson'))
symbols = ['SPY', 'IBM', 'AAPL']
if __name__ == "__main__":
dates = dates_creator()
df = get_data(symbols, dates)
daily_returns = get_daily_returns(df)
plot(df)
plot(daily_returns)
calculate_correlation(daily_returns)
| apache-2.0 |
lpryszcz/bin | bed2stats.py | 1 | 2996 | #!/usr/bin/env python
desc="""Parse BED and print stats.
"""
epilog="""Author:
[email protected]
Dublin, 4/09/2012
"""
import os, sys
import numpy as np
#from optparse import OptionParser,OptionGroup
from datetime import datetime
from genome_annotation import get_contig2coverage
import numpy as np
import matplotlib.pyplot as plt
def print_stats( lengths,addon="" ):
"""
"""
info = "%.2f kb in %s fragments%s (median: %s bp; mean: %.3f kb +-%.3f )"
print info%(sum(lengths)/10.0**3, len(lengths), addon, \
int(np.median(lengths)), np.mean(lengths)/10.0**3,
np.std(lengths)/10.0**3)
def bed2stats( handle,simple,verbose ):
"""Parse BED and print stats."""
r2l = {}
for l in handle:
ref,start,end = l.split()[:3]
start,end = int(start),int(end)
r2l[ "%s:%s-%s" % (ref,start,end) ] = end-start
#print summary
lengths = [ x for x in r2l.itervalues() ]
if not simple:
print_stats( lengths )
else:
print "%s\t%s\t%s" % ( handle.name,sum(lengths),len(r2l) )
return
longest = ""
r2l_sorted = sorted( r2l.iteritems(),key=lambda x: x[1], reverse=True )
for r,l in r2l_sorted[:20]:
longest += "\t\t%s\t%s\n" % ( l,r )
longest += "\t\t...\n\t\t%s\t%s" % ( r2l_sorted[-1][1],r2l_sorted[-1][0] )
if not simple:
print longest
lengths1kb = []
for x in r2l.itervalues():
if x>=1000:
lengths1kb.append( x )
if not simple:
print_stats( lengths1kb," >=1kb" )
def main():
import argparse
usage = "%(prog)s -v" #usage=usage,
parser = argparse.ArgumentParser(description=desc, epilog=epilog, \
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', action='version', version='1.0b')
parser.add_argument("-v", "--verbose", default=False, action="store_true",
help="verbose")
parser.add_argument("-b", "--bed", nargs="+", default=[sys.stdin], type=file,
help="BED file [stdin]")
#parser.add_option("-c", dest="cov_fract", default=0.75, type=float,
# help="frac of mean coverage [%default]")
parser.add_argument("-s", dest="simple", default=False, action="store_true",
help="simple output [%(default)s]")
o = parser.parse_args()
if o.verbose:
sys.stderr.write("Options: %s\n"%str(o))
if o.simple:
print "#sample\tsum\toccurencies"
for handle in o.bed:
bed2stats(handle, o.simple, o.verbose)
if __name__=='__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
except IOError as e:
sys.stderr.write("I/O error({0}): {1}\n".format(e.errno, e.strerror))
dt = datetime.now()-t0
sys.stderr.write("#Time elapsed: %s\n"%dt)
| gpl-3.0 |
CoderHam/Machine_Learning_Projects | regression/finance_regression.py | 1 | 2099 | #!/usr/bin/python
"""
loads up/formats a modified version of the dataset
draws a little scatterplot of the training/testing data
"""
from time import time
import sys
import pickle
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
dictionary = pickle.load( open("../final_project/final_project_dataset_modified.pkl", "r") )
### list will be the "target" feature
features_list = ["bonus", "salary"]
data = featureFormat( dictionary, features_list, remove_any_zeroes=True)
target, features = targetFeatureSplit( data )
### training-testing split needed in regression, just like classification
from sklearn.cross_validation import train_test_split
feature_train, feature_test, target_train, target_test = train_test_split(features, target, test_size=0.5, random_state=42)
train_color = "b"
test_color = "r"
### the scatterplot, with color-coded training and testing points
import matplotlib.pyplot as plt
for feature, target in zip(feature_test, target_test):
plt.scatter( feature, target, color=test_color )
for feature, target in zip(feature_train, target_train):
plt.scatter( feature, target, color=train_color )
### labels for the legend
plt.scatter(feature_test[0], target_test[0], color=test_color, label="test")
plt.scatter(feature_test[0], target_test[0], color=train_color, label="train")
from sklearn.linear_model import LinearRegression
reg = LinearRegression ()
t0 = time()
reg.fit(feature_train, target_train)
print "training time:", round(time()-t0, 3), "s\n"
t0 = time()
print "prediction value: ", reg.predict(feature_test)
print "precition time:", round(time()-t0, 3), "s\n"
print "slope: ", reg.coef_
print "\nintercept: ", reg.intercept_
print "\nR-square score for training data: ", reg.score(feature_train, target_train)
print "\nR-square score for test data: ", reg.score(feature_test, target_test)
### draw the regression line, once it's coded
try:
plt.plot(feature_test, reg.predict(feature_test) )
except NameError:
pass
plt.xlabel(features_list[1])
plt.ylabel(features_list[0])
plt.legend()
plt.show()
| gpl-2.0 |
JeffAbrahamson/UNA_compta | ebp/budget_comparatif.py | 1 | 7596 | #!/usr/bin/python3
"""Compare two budgets.
"""
import argparse
import datetime
import numpy as np
import pandas as pd
import jinja2
import os
import datetime
def compare_budget_lists(config_1_column, config_2_column):
"""Compute one side of a budget comparison.
The input is the part of the config that represents a column of
the budget (expenses or income) and a map from account
name to balance.
The output is a list of lists, each of which has either one
element (a title string) or three elements (a label, a budget_1,
and a budget_2).
"""
if len(config_1_column) == 0 or len(config_2_column) == 0:
print("Empty budgets not allowed.")
return [["Empty budgets aren't allowed", 0, 0]]
labels_1 = [x[0] for x in config_1_column]
label_set_1 = set(labels_1)
labels_2 = [x[0] for x in config_2_column]
label_set_2 = set(labels_2)
label_to_amount_1 = {x[0]: x[1] for x in config_1_column if len(x) == 3}
label_to_amount_2 = {x[0]: x[1] for x in config_2_column if len(x) == 3}
# Map config 1 label to config 2 labels that should follow it.
# This only has entries if there are config 2 labels not in config 1
# that should follow the item.
additions = {}
last_label = labels_1[0]
for label in labels_2:
if label not in label_set_1:
additions[last_label] = label
else:
last_label = label # Label is in both configs 1 and 2.
out_labels = []
#for label in labels_1:
for label_index in range(len(labels_1)):
label = labels_1[label_index]
out_labels.append(label)
while label in additions:
out_labels.append(additions[label])
label_index += 1
if label_index < len(labels_1):
label = labels_1[label_index]
else:
label = None # Break out of loop.
return [[label,
label_to_amount_1.get(label, 0),
label_to_amount_2.get(label, 0)]
for label in out_labels]
def compare_budgets(config_filename_1, config_filename_2):
"""Compare two budgets.
The contents of config_filename* are python code. It should be a
list of two lists, each of which contains one or three members:
- The budget line name. If this is the only list element,
then it is a title.
- Budgeted amount
- A list of accounts to aggregate.
We don't actually look at the accounts to aggregate here, only at
the budgeted values. On the other hand, we do want to compare
values between the two budgets.
If we change the label of a line, this program will consider them
to be different between the two budgets.
The returned list contains two lists, the first for expenses, the
second for income. In each list, the elements contain a list
which has one element (a title) or three (a label, a year N
budget, and a year N+1 budget.
"""
with open(config_filename_1, 'r') as config_fp:
config_1 = eval(config_fp.read())
with open(config_filename_2, 'r') as config_fp:
config_2 = eval(config_fp.read())
expenses = compare_budget_lists(config_1[0], config_2[0])
income = compare_budget_lists(config_1[1], config_2[1])
return [expenses, income]
def budget_warnings(budgets):
"""Return a string with any warnings about the budgets.
The input budget is the computed comparison: two lists (one for
income, one for expenses) of label, N, N+1.
"""
income_n = sum([x[1] for x in budgets[0]])
income_n1 = sum([x[2] for x in budgets[0]])
expenses_n = sum([x[1] for x in budgets[1]])
expenses_n1 = sum([x[2] for x in budgets[1]])
if np.round(income_n1, 2) - np.round(expenses_n1, 2) or \
np.round(income_n, 2) != np.round(expenses_n, 2):
return "À noter : déséquilibre : $N$={n}, $N+1$={n1}".format(
n=income_n - expenses_n,
n1=income_n1 - expenses_n1)
return ""
def render_as_text_one_column(budget_column):
"""Render one column of a balance sheet as text.
"""
total_budget_1 = 0
total_budget_2 = 0
for line in budget_column:
if len(line) == 1:
print('\n', line[0])
else:
total_budget_1 += line[1]
total_budget_2 += line[2]
print('{label:40s} {budget:6.2f} {realised:6.2f}'.format(
label=line[0], budget=line[1], realised=line[2]))
print('{nothing:40s} {budget:6.2f} {realised:6.2f}'.format(
nothing='', budget=total_budget_1, realised=total_budget_2))
def render_as_text(budget):
"""Print the balance sheet to stdout as text in a single column.
The balance sheet is in list of lists format. The first list is
expenses, the second income. Cf comment in
get_budget_as_list() for more.
"""
print('==== Dépenses ====')
render_as_text_one_column(budget[0])
print('\n==== Recettes ====')
render_as_text_one_column(budget[1])
def render_as_latex_one_column(budget_column):
"""Return a string that is the latex for one column.
We're in a tabu environment with three columns. Return a sequence
of "label & budget & balance" lines.
"""
table = ""
total_budget_1 = 0
total_budget_2 = 0
for line in budget_column:
if 0 == line[1] and 0 == line[2]:
table += r'\textbf{{ {label} }}&&\\'.format(label=line[0])
table += '\n'
else:
total_budget_1 += line[1]
total_budget_2 += line[2]
table += r'{label}&{budget:6.0f}&{realised:6.0f}\\[1mm]'.format(
label=line[0], budget=line[1], realised=line[2])
table += '\n'
table += r'\hline' + '\n'
table += r'Total & {budget:6.0f} & {realised:6.0f}\\'.format(
nothing='', budget=total_budget_1, realised=total_budget_2)
table += '\n'
return table;
def render_as_latex(budget):
"""Print the balance sheet to budget_<date>.tex as latex.
The balance sheet is in list of lists format. The first list is
expenses, the second income. Cf comment in
get_budget_as_list() for more.
Latex the file to create budget_<date>.pdf.
"""
with open('budget_comparison.tex', 'r') as fp_template:
template_text = fp_template.read()
template = jinja2.Template(template_text)
out_filename = 'budget-comparison_{date}.tex'.format(
date=datetime.date.today().strftime('%Y%m%d'))
with open(out_filename, 'w') as fp_latex:
fp_latex.write(template.render(
expenses=render_as_latex_one_column(budget[0]),
income=render_as_latex_one_column(budget[1]),
warnings=budget_warnings(budget)))
os.system('pdflatex ' + out_filename)
def main():
"""Do what we do."""
parser = argparse.ArgumentParser()
parser.add_argument('--config-N', type=str, required=True,
help='config file mapping accounts to budget lines, year N')
parser.add_argument('--config-N1', type=str, required=True,
help='config file mapping accounts to budget lines, year N+1')
parser.add_argument('--render-as', type=str, required=False,
default='text',
help='One of text or latex')
args = parser.parse_args()
budgets = compare_budgets(args.config_N, args.config_N1)
if 'text' == args.render_as:
render_as_text(budgets)
if 'latex' == args.render_as:
render_as_latex(budgets)
if __name__ == '__main__':
main()
| gpl-3.0 |
AzamYahya/shogun | examples/undocumented/python_modular/graphical/preprocessor_kpca_graphical.py | 26 | 1893 | from numpy import *
import matplotlib.pyplot as p
import os, sys, inspect
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../tools'))
if not path in sys.path:
sys.path.insert(1, path)
del path
from generate_circle_data import circle_data
cir=circle_data()
number_of_points_for_circle1=42
number_of_points_for_circle2=122
row_vector=2
data=cir.generate_data(number_of_points_for_circle1,number_of_points_for_circle2,row_vector)
d=zeros((row_vector,number_of_points_for_circle1))
d2=zeros((row_vector,number_of_points_for_circle2))
d=[data[i][0:number_of_points_for_circle1] for i in range(0,row_vector)]
d2=[data[i][number_of_points_for_circle1:(number_of_points_for_circle1+number_of_points_for_circle2)] for i in range(0,row_vector)]
p.plot(d[1][:],d[0][:],'x',d2[1][:],d2[0][:],'o')
p.title('input data')
p.show()
parameter_list = [[data,0.01,1.0], [data,0.05,2.0]]
def preprocessor_kernelpca_modular (data, threshold, width):
from modshogun import RealFeatures
from modshogun import KernelPCA
from modshogun import GaussianKernel
features = RealFeatures(data)
kernel=GaussianKernel(features,features,width)
preprocessor=KernelPCA(kernel)
preprocessor.init(features)
preprocessor.set_target_dim(2)
#X=preprocessor.get_transformation_matrix()
X2=preprocessor.apply_to_feature_matrix(features)
lx0=len(X2)
modified_d1=zeros((lx0,number_of_points_for_circle1))
modified_d2=zeros((lx0,number_of_points_for_circle2))
modified_d1=[X2[i][0:number_of_points_for_circle1] for i in range(lx0)]
modified_d2=[X2[i][number_of_points_for_circle1:(number_of_points_for_circle1+number_of_points_for_circle2)] for i in range(lx0)]
p.plot(modified_d1[0][:],modified_d1[1][:],'o',modified_d2[0][:],modified_d2[1][:],'x')
p.title('final data')
p.show()
return features
if __name__=='__main__':
print('KernelPCA')
preprocessor_kernelpca_modular(*parameter_list[0])
| gpl-3.0 |
nmartensen/pandas | pandas/core/indexing.py | 2 | 74508 | # pylint: disable=W0223
import textwrap
import warnings
import numpy as np
from pandas.compat import range, zip
import pandas.compat as compat
from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries
from pandas.core.dtypes.common import (
is_integer_dtype,
is_integer, is_float,
is_list_like,
is_sequence,
is_iterator,
is_scalar,
is_sparse,
_is_unorderable_exception,
_ensure_platform_int)
from pandas.core.dtypes.missing import isna, _infer_fill_value
from pandas.core.index import Index, MultiIndex
import pandas.core.common as com
from pandas.core.common import (is_bool_indexer, _asarray_tuplesafe,
is_null_slice, is_full_slice,
_values_from_object)
# the supported indexers
def get_indexers_list():
return [
('ix', _IXIndexer),
('iloc', _iLocIndexer),
('loc', _LocIndexer),
('at', _AtIndexer),
('iat', _iAtIndexer),
]
# "null slice"
_NS = slice(None, None)
# the public IndexSlicerMaker
class _IndexSlice(object):
"""
Create an object to more easily perform multi-index slicing
Examples
--------
>>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']])
>>> columns = ['foo', 'bar']
>>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))),
index=midx, columns=columns)
Using the default slice command:
>>> dfmi.loc[(slice(None), slice('B0', 'B1')), :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
Using the IndexSlice class for a more intuitive command:
>>> idx = pd.IndexSlice
>>> dfmi.loc[idx[:, 'B0':'B1'], :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
"""
def __getitem__(self, arg):
return arg
IndexSlice = _IndexSlice()
class IndexingError(Exception):
pass
class _NDFrameIndexer(object):
_valid_types = None
_exception = KeyError
axis = None
def __init__(self, obj, name):
self.obj = obj
self.ndim = obj.ndim
self.name = name
def __call__(self, axis=None):
# we need to return a copy of ourselves
new_self = self.__class__(self.obj, self.name)
new_self.axis = axis
return new_self
def __iter__(self):
raise NotImplementedError('ix is not iterable')
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
try:
values = self.obj.get_value(*key)
if is_scalar(values):
return values
except Exception:
pass
return self._getitem_tuple(key)
else:
key = com._apply_if_callable(key, self.obj)
return self._getitem_axis(key, axis=0)
def _get_label(self, label, axis=0):
if self.ndim == 1:
# for perf reasons we want to try _xs first
# as its basically direct indexing
# but will fail when the index is not present
# see GH5667
try:
return self.obj._xs(label, axis=axis)
except:
return self.obj[label]
elif isinstance(label, tuple) and isinstance(label[axis], slice):
raise IndexingError('no slices here, handle elsewhere')
return self.obj._xs(label, axis=axis)
def _get_loc(self, key, axis=0):
return self.obj._ixs(key, axis=axis)
def _slice(self, obj, axis=0, kind=None):
return self.obj._slice(obj, axis=axis, kind=kind)
def _get_setitem_indexer(self, key):
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
axis = self.obj._get_axis(0)
if isinstance(axis, MultiIndex) and self.name != 'iloc':
try:
return axis.get_loc(key)
except Exception:
pass
if isinstance(key, tuple):
try:
return self._convert_tuple(key, is_setter=True)
except IndexingError:
pass
if isinstance(key, range):
return self._convert_range(key, is_setter=True)
try:
return self._convert_to_indexer(key, is_setter=True)
except TypeError as e:
# invalid indexer type vs 'other' indexing errors
if 'cannot do' in str(e):
raise
raise IndexingError(key)
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
else:
key = com._apply_if_callable(key, self.obj)
indexer = self._get_setitem_indexer(key)
self._setitem_with_indexer(indexer, value)
def _has_valid_type(self, k, axis):
raise NotImplementedError()
def _has_valid_tuple(self, key):
""" check the key for valid keys across my indexer """
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if not self._has_valid_type(k, i):
raise ValueError("Location based indexing can only have "
"[{types}] types"
.format(types=self._valid_types))
def _should_validate_iterable(self, axis=0):
""" return a boolean whether this axes needs validation for a passed
iterable
"""
ax = self.obj._get_axis(axis)
if isinstance(ax, MultiIndex):
return False
elif ax.is_floating():
return False
return True
def _is_nested_tuple_indexer(self, tup):
if any([isinstance(ax, MultiIndex) for ax in self.obj.axes]):
return any([is_nested_tuple(tup, ax) for ax in self.obj.axes])
return False
def _convert_tuple(self, key, is_setter=False):
keyidx = []
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
for i in range(self.ndim):
if i == axis:
keyidx.append(self._convert_to_indexer(
key, axis=axis, is_setter=is_setter))
else:
keyidx.append(slice(None))
else:
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
keyidx.append(idx)
return tuple(keyidx)
def _convert_range(self, key, is_setter=False):
""" convert a range argument """
return list(key)
def _convert_scalar_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
# a scalar
return ax._convert_scalar_indexer(key, kind=self.name)
def _convert_slice_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
return ax._convert_slice_indexer(key, kind=self.name)
def _has_valid_setitem_indexer(self, indexer):
return True
def _has_valid_positional_setitem_indexer(self, indexer):
""" validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally
"""
if isinstance(indexer, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
else:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("{name} cannot enlarge its target "
"object".format(name=self.name))
elif isinstance(i, dict):
raise IndexError("{name} cannot enlarge its target object"
.format(name=self.name))
return True
def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
# also has the side effect of consolidating in-place
# TODO: Panel, DataFrame are not imported, remove?
from pandas import Panel, DataFrame, Series # noqa
info_axis = self.obj._info_axis_number
# maybe partial set
take_split_path = self.obj._is_mixed_type
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if not take_split_path and self.obj._data.blocks:
blk, = self.obj._data.blocks
if 1 < blk.ndim: # in case of dict, keys are indices
val = list(value.values()) if isinstance(value,
dict) else value
take_split_path = not blk._can_hold_element(val)
if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
for i, ax in zip(indexer, self.obj.axes):
# if we have any multi-indexes that have non-trivial slices
# (not null slices) then we must take the split path, xref
# GH 10360
if (isinstance(ax, MultiIndex) and
not (is_integer(i) or is_null_slice(i))):
take_split_path = True
break
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
if isinstance(idx, dict):
# reindex the axis to the new value
# and set inplace
key, _ = convert_missing_indexer(idx)
# if this is the items axes, then take the main missing
# path first
# this correctly sets the dtype and avoids cache issues
# essentially this separates out the block that is needed
# to possibly be modified
if self.ndim > 1 and i == self.obj._info_axis_number:
# add the new item, and set the value
# must have all defined axes if we have a scalar
# or a list-like on the non-info axes if we have a
# list-like
len_non_info_axes = [
len(_ax) for _i, _ax in enumerate(self.obj.axes)
if _i != i
]
if any([not l for l in len_non_info_axes]):
if not is_list_like_indexer(value):
raise ValueError("cannot set a frame with no "
"defined index and a scalar")
self.obj[key] = value
return self.obj
# add a new item with the dtype setup
self.obj[key] = _infer_fill_value(value)
new_indexer = convert_from_missing_indexer_tuple(
indexer, self.obj.axes)
self._setitem_with_indexer(new_indexer, value)
return self.obj
# reindex the axis
# make sure to clear the cache because we are
# just replacing the block manager here
# so the object is the same
index = self.obj._get_axis(i)
labels = index.insert(len(index), key)
self.obj._data = self.obj.reindex_axis(labels, i)._data
self.obj._maybe_update_cacher(clear=True)
self.obj.is_copy = None
nindexer.append(labels.get_loc(key))
else:
nindexer.append(idx)
indexer = tuple(nindexer)
else:
indexer, missing = convert_missing_indexer(indexer)
if missing:
# reindex the axis to the new value
# and set inplace
if self.ndim == 1:
index = self.obj.index
new_index = index.insert(len(index), indexer)
# we have a coerced indexer, e.g. a float
# that matches in an Int64Index, so
# we will not create a duplicate index, rather
# index to that element
# e.g. 0.0 -> 0
# GH12246
if index.is_unique:
new_indexer = index.get_indexer([new_index[-1]])
if (new_indexer != -1).any():
return self._setitem_with_indexer(new_indexer,
value)
# this preserves dtype of the value
new_values = Series([value])._values
if len(self.obj._values):
try:
new_values = np.concatenate([self.obj._values,
new_values])
except TypeError:
new_values = np.concatenate([self.obj.asobject,
new_values])
self.obj._data = self.obj._constructor(
new_values, index=new_index, name=self.obj.name)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
elif self.ndim == 2:
# no columns and scalar
if not len(self.obj.columns):
raise ValueError("cannot set a frame with no defined "
"columns")
# append a Series
if isinstance(value, Series):
value = value.reindex(index=self.obj.columns,
copy=True)
value.name = indexer
# a list-list
else:
# must have conforming columns
if is_list_like_indexer(value):
if len(value) != len(self.obj.columns):
raise ValueError("cannot set a row with "
"mismatched columns")
value = Series(value, index=self.obj.columns,
name=indexer)
self.obj._data = self.obj.append(value)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
# set using setitem (Panel and > dims)
elif self.ndim >= 3:
return self.obj.__setitem__(indexer, value)
# set
item_labels = self.obj._get_axis(info_axis)
# align and set the values
if take_split_path:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
info_idx = indexer[info_axis]
if is_integer(info_idx):
info_idx = [info_idx]
labels = item_labels[info_idx]
# if we have a partial multiindex, then need to adjust the plane
# indexer here
if (len(labels) == 1 and
isinstance(self.obj[labels[0]].axes[0], MultiIndex)):
item = labels[0]
obj = self.obj[item]
index = obj.index
idx = indexer[:info_axis][0]
plane_indexer = tuple([idx]) + indexer[info_axis + 1:]
lplane_indexer = length_of_indexer(plane_indexer[0], index)
# require that we are setting the right number of values that
# we are indexing
if is_list_like_indexer(value) and np.iterable(
value) and lplane_indexer != len(value):
if len(obj[idx]) != len(value):
raise ValueError("cannot set using a multi-index "
"selection indexer with a different "
"length than the value")
# make sure we have an ndarray
value = getattr(value, 'values', value).ravel()
# we can directly set the series here
# as we select a slice indexer on the mi
idx = index._convert_slice_indexer(idx)
obj._consolidate_inplace()
obj = obj.copy()
obj._data = obj._data.setitem(indexer=tuple([idx]),
value=value)
self.obj[item] = obj
return
# non-mi
else:
plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:]
if info_axis > 0:
plane_axis = self.obj.axes[:info_axis][0]
lplane_indexer = length_of_indexer(plane_indexer[0],
plane_axis)
else:
lplane_indexer = 0
def setter(item, v):
s = self.obj[item]
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
# perform the equivalent of a setitem on the info axis
# as we have a null slice or a slice with full bounds
# which means essentially reassign to the columns of a
# multi-dim object
# GH6149 (null slice), GH10408 (full bounds)
if (isinstance(pi, tuple) and
all(is_null_slice(idx) or
is_full_slice(idx, len(self.obj))
for idx in pi)):
s = v
else:
# set the item, possibly having a dtype change
s._consolidate_inplace()
s = s.copy()
s._data = s._data.setitem(indexer=pi, value=v)
s._maybe_update_cacher(clear=True)
# reset the sliced object if unique
self.obj[item] = s
def can_do_equal_len():
""" return True if we have an equal len settable """
if not len(labels) == 1 or not np.iterable(value):
return False
l = len(value)
item = labels[0]
index = self.obj[item].index
# equal len list/ndarray
if len(index) == l:
return True
elif lplane_indexer == l:
return True
return False
# we need an iterable, with a ndim of at least 1
# eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value, 'ndim', 1) > 0:
# we have an equal len Frame
if isinstance(value, ABCDataFrame) and value.ndim > 1:
sub_indexer = list(indexer)
multiindex_indexer = isinstance(labels, MultiIndex)
for item in labels:
if item in value:
sub_indexer[info_axis] = item
v = self._align_series(
tuple(sub_indexer), value[item],
multiindex_indexer)
else:
v = np.nan
setter(item, v)
# we have an equal len ndarray/convertible to our labels
elif np.array(value).ndim == 2:
# note that this coerces the dtype if we are mixed
# GH 7551
value = np.array(value, dtype=object)
if len(labels) != value.shape[1]:
raise ValueError('Must have equal len keys and value '
'when setting with an ndarray')
for i, item in enumerate(labels):
# setting with a list, recoerces
setter(item, value[:, i].tolist())
# we have an equal len list/ndarray
elif can_do_equal_len():
setter(labels[0], value)
# per label values
else:
if len(labels) != len(value):
raise ValueError('Must have equal len keys and value '
'when setting with an iterable')
for item, v in zip(labels, value):
setter(item, v)
else:
# scalar
for item in labels:
setter(item, value)
else:
if isinstance(indexer, tuple):
indexer = maybe_convert_ix(*indexer)
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if (len(indexer) > info_axis and
is_integer(indexer[info_axis]) and
all(is_null_slice(idx) for i, idx in enumerate(indexer)
if i != info_axis) and item_labels.is_unique):
self.obj[item_labels[indexer[info_axis]]] = value
return
if isinstance(value, (ABCSeries, dict)):
value = self._align_series(indexer, Series(value))
elif isinstance(value, ABCDataFrame):
value = self._align_frame(indexer, value)
if isinstance(value, ABCPanel):
value = self._align_panel(indexer, value)
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._consolidate_inplace()
self.obj._data = self.obj._data.setitem(indexer=indexer,
value=value)
self.obj._maybe_update_cacher(clear=True)
def _align_series(self, indexer, ser, multiindex_indexer=False):
"""
Parameters
----------
indexer : tuple, slice, scalar
The indexer used to get the locations that will be set to
`ser`
ser : pd.Series
The values to assign to the locations specified by `indexer`
multiindex_indexer : boolean, optional
Defaults to False. Should be set to True if `indexer` was from
a `pd.MultiIndex`, to avoid unnecessary broadcasting.
Returns:
--------
`np.array` of `ser` broadcast to the appropriate shape for assignment
to the locations selected by `indexer`
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = tuple([indexer])
if isinstance(indexer, tuple):
# flatten np.ndarray indexers
ravel = lambda i: i.ravel() if isinstance(i, np.ndarray) else i
indexer = tuple(map(ravel, indexer))
aligners = [not is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
obj = self.obj
# are we a single alignable value on a non-primary
# dim (e.g. panel: 1,2, or frame: 0) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame:
single_aligner = single_aligner and aligners[0]
# panel
elif is_panel:
single_aligner = (single_aligner and
(aligners[1] or aligners[2]))
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if (sum_aligners == self.ndim and
all([is_sequence(_) for _ in indexer])):
ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
l = len(indexer[1])
ser = np.tile(ser, l).reshape(l, -1).T
return ser
for i, idx in enumerate(indexer):
ax = obj.axes[i]
# multiple aligners (or null slices)
if is_sequence(idx) or isinstance(idx, slice):
if single_aligner and is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like_indexer(new_ix):
new_ix = Index([new_ix])
else:
new_ix = Index(new_ix)
if ser.index.equals(new_ix) or not len(new_ix):
return ser._values.copy()
return ser.reindex(new_ix)._values
# 2 dims
elif single_aligner and is_frame:
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax) or not len(ax):
return ser._values.copy()
return ser.reindex(ax)._values
# >2 dims
elif single_aligner:
broadcast = []
for n, labels in enumerate(self.obj._get_plane_axes(i)):
# reindex along the matching dimensions
if len(labels & ser.index):
ser = ser.reindex(labels)
else:
broadcast.append((n, len(labels)))
# broadcast along other dims
ser = ser._values.copy()
for (axis, l) in broadcast:
shape = [-1] * (len(broadcast) + 1)
shape[axis] = l
ser = np.tile(ser, l).reshape(shape)
if self.obj.ndim == 3:
ser = ser.T
return ser
elif is_scalar(indexer):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values
raise ValueError('Incompatible indexer with Series')
def _align_frame(self, indexer, df):
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
if isinstance(indexer, tuple):
aligners = [not is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
# TODO: single_aligner is not used
single_aligner = sum_aligners == 1 # noqa
idx, cols = None, None
sindexers = []
for i, ix in enumerate(indexer):
ax = self.obj.axes[i]
if is_sequence(ix) or isinstance(ix, slice):
if isinstance(ix, np.ndarray):
ix = ix.ravel()
if idx is None:
idx = ax[ix]
elif cols is None:
cols = ax[ix]
else:
break
else:
sindexers.append(i)
# panel
if is_panel:
# need to conform to the convention
# as we are not selecting on the items axis
# and we have a single indexer
# GH 7763
if len(sindexers) == 1 and sindexers[0] != 0:
df = df.T
if idx is None:
idx = df.index
if cols is None:
cols = df.columns
if idx is not None and cols is not None:
if df.index.equals(idx) and df.columns.equals(cols):
val = df.copy()._values
else:
val = df.reindex(idx, columns=cols)._values
return val
elif ((isinstance(indexer, slice) or is_list_like_indexer(indexer)) and
is_frame):
ax = self.obj.index[indexer]
if df.index.equals(ax):
val = df.copy()._values
else:
# we have a multi-index and are trying to align
# with a particular, level GH3738
if (isinstance(ax, MultiIndex) and
isinstance(df.index, MultiIndex) and
ax.nlevels != df.index.nlevels):
raise TypeError("cannot align on a multi-index with out "
"specifying the join levels")
val = df.reindex(index=ax)._values
return val
elif is_scalar(indexer) and is_panel:
idx = self.obj.axes[1]
cols = self.obj.axes[2]
# by definition we are indexing on the 0th axis
# a passed in dataframe which is actually a transpose
# of what is needed
if idx.equals(df.index) and cols.equals(df.columns):
return df.copy()._values
return df.reindex(idx, columns=cols)._values
raise ValueError('Incompatible indexer with DataFrame')
def _align_panel(self, indexer, df):
# TODO: is_frame, is_panel are unused
is_frame = self.obj.ndim == 2 # noqa
is_panel = self.obj.ndim >= 3 # noqa
raise NotImplementedError("cannot set using an indexer with a Panel "
"yet!")
def _getitem_tuple(self, tup):
try:
return self._getitem_lowerdim(tup)
except IndexingError:
pass
# no multi-index, so validate all of the indexers
self._has_valid_tuple(tup)
# ugly hack for GH #836
if self._multi_take_opportunity(tup):
return self._multi_take(tup)
# no shortcut needed
retval = self.obj
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if is_null_slice(key):
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
return retval
def _multi_take_opportunity(self, tup):
from pandas.core.generic import NDFrame
# ugly hack for GH #836
if not isinstance(self.obj, NDFrame):
return False
if not all(is_list_like_indexer(x) for x in tup):
return False
# just too complicated
for indexer, ax in zip(tup, self.obj._data.axes):
if isinstance(ax, MultiIndex):
return False
elif is_bool_indexer(indexer):
return False
elif not ax.is_unique:
return False
return True
def _multi_take(self, tup):
""" create the reindex map for our objects, raise the _exception if we
can't create the indexer
"""
try:
o = self.obj
d = dict(
[(a, self._convert_for_reindex(t, axis=o._get_axis_number(a)))
for t, a in zip(tup, o._AXIS_ORDERS)])
return o.reindex(**d)
except(KeyError, IndexingError):
raise self._exception
def _convert_for_reindex(self, key, axis=0):
labels = self.obj._get_axis(axis)
if is_bool_indexer(key):
key = check_bool_indexer(labels, key)
return labels[key]
else:
if isinstance(key, Index):
keyarr = labels._convert_index_indexer(key)
else:
# asarray can be unsafe, NumPy strings are weird
keyarr = _asarray_tuplesafe(key)
if is_integer_dtype(keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
keyarr = labels._convert_arr_indexer(keyarr)
if not labels.is_integer():
keyarr = _ensure_platform_int(keyarr)
return labels.take(keyarr)
return keyarr
def _handle_lowerdim_multi_index_axis0(self, tup):
# we have an axis0 multi-index, handle or raise
try:
# fast path for series or for tup devoid of slices
return self._get_label(tup, axis=0)
except TypeError:
# slices are unhashable
pass
except Exception as e1:
if isinstance(tup[0], (slice, Index)):
raise IndexingError("Handle elsewhere")
# raise the error if we are not sorted
ax0 = self.obj._get_axis(0)
if not ax0.is_lexsorted_for_tuple(tup):
raise e1
return None
def _getitem_lowerdim(self, tup):
# we can directly get the axis result since the axis is specified
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
return self._getitem_axis(tup, axis=axis)
# we may have a nested tuples indexer here
if self._is_nested_tuple_indexer(tup):
return self._getitem_nested_tuple(tup)
# we maybe be using a tuple to represent multiple dimensions here
ax0 = self.obj._get_axis(0)
# ...but iloc should handle the tuple as simple integer-location
# instead of checking it as multiindex representation (GH 13797)
if isinstance(ax0, MultiIndex) and self.name != 'iloc':
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
if len(tup) > self.obj.ndim:
raise IndexingError("Too many indexers. handle elsewhere")
# to avoid wasted computation
# df.ix[d1:d2, 0] -> columns first (True)
# df.ix[0, ['C', 'B', A']] -> rows first (False)
for i, key in enumerate(tup):
if is_label_like(key) or isinstance(key, tuple):
section = self._getitem_axis(key, axis=i)
# we have yielded a scalar ?
if not is_list_like_indexer(section):
return section
elif section.ndim == self.ndim:
# we're in the middle of slicing through a MultiIndex
# revise the key wrt to `section` by inserting an _NS
new_key = tup[:i] + (_NS,) + tup[i + 1:]
else:
new_key = tup[:i] + tup[i + 1:]
# unfortunately need an odious kludge here because of
# DataFrame transposing convention
if (isinstance(section, ABCDataFrame) and i > 0 and
len(new_key) == 2):
a, b = new_key
new_key = b, a
if len(new_key) == 1:
new_key, = new_key
# Slices should return views, but calling iloc/loc with a null
# slice returns a new object.
if is_null_slice(new_key):
return section
# This is an elided recursive call to iloc/loc/etc'
return getattr(section, self.name)[new_key]
raise IndexingError('not applicable')
def _getitem_nested_tuple(self, tup):
# we have a nested tuple so have at least 1 multi-index level
# we should be able to match up the dimensionaility here
# we have too many indexers for our dim, but have at least 1
# multi-index dimension, try to see if we have something like
# a tuple passed to a series with a multi-index
if len(tup) > self.ndim:
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
# this is a series with a multi-index specified a tuple of
# selectors
return self._getitem_axis(tup, axis=0)
# handle the multi-axis by taking sections and reducing
# this is iterative
obj = self.obj
axis = 0
for i, key in enumerate(tup):
if is_null_slice(key):
axis += 1
continue
current_ndim = obj.ndim
obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)
axis += 1
# if we have a scalar, we are done
if is_scalar(obj) or not hasattr(obj, 'ndim'):
break
# has the dim of the obj changed?
# GH 7199
if obj.ndim < current_ndim:
# GH 7516
# if had a 3 dim and are going to a 2d
# axes are reversed on a DataFrame
if i >= 1 and current_ndim == 3 and obj.ndim == 2:
obj = obj.T
axis -= 1
return obj
def _getitem_axis(self, key, axis=0):
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
elif (is_list_like_indexer(key) and
not (isinstance(key, tuple) and
isinstance(labels, MultiIndex))):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
else:
# maybe coerce a float scalar to integer
key = labels._maybe_cast_indexer(key)
if is_integer(key):
if axis == 0 and isinstance(labels, MultiIndex):
try:
return self._get_label(key, axis=axis)
except (KeyError, TypeError):
if self.obj.index.levels[0].is_integer():
raise
# this is the fallback! (for a non-float, non-integer index)
if not labels.is_floating() and not labels.is_integer():
return self._get_loc(key, axis=axis)
return self._get_label(key, axis=axis)
def _getitem_iterable(self, key, axis=0):
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if is_bool_indexer(key):
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
return self.obj.take(inds, axis=axis, convert=False)
else:
# Have the index compute an indexer or return None
# if it cannot handle; we only act on all found values
indexer, keyarr = labels._convert_listlike_indexer(
key, kind=self.name)
if indexer is not None and (indexer != -1).all():
return self.obj.take(indexer, axis=axis)
# existing labels are unique and indexer are unique
if labels.is_unique and Index(keyarr).is_unique:
try:
return self.obj.reindex_axis(keyarr, axis=axis)
except AttributeError:
# Series
if axis != 0:
raise AssertionError('axis must be 0')
return self.obj.reindex(keyarr)
# existing labels are non-unique
else:
# reindex with the specified axis
if axis + 1 > self.obj.ndim:
raise AssertionError("invalid indexing error with "
"non-unique index")
new_target, indexer, new_indexer = labels._reindex_non_unique(
keyarr)
if new_indexer is not None:
result = self.obj.take(indexer[indexer != -1], axis=axis,
convert=False)
result = result._reindex_with_indexers(
{axis: [new_target, new_indexer]},
copy=True, allow_dups=True)
else:
result = self.obj.take(indexer, axis=axis, convert=False)
return result
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
labels = self.obj._get_axis(axis)
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
# try to find out correct indexer, if not type correct raise
try:
obj = self._convert_scalar_indexer(obj, axis)
except TypeError:
# but we will allow setting
if is_setter:
pass
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {'key': obj}
raise
except TypeError:
pass
except (ValueError):
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == 'loc':
return {'key': obj}
# a positional
if (obj >= self.obj.shape[axis] and
not isinstance(labels, MultiIndex)):
raise ValueError("cannot set by positional indexing with "
"enlargement")
return obj
if is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
# Have the index compute an indexer or return None
# if it cannot handle
indexer, objarr = labels._convert_listlike_indexer(
obj, kind=self.name)
if indexer is not None:
return indexer
# unique index
if labels.is_unique:
indexer = check = labels.get_indexer(objarr)
# non-unique (dups)
else:
(indexer,
missing) = labels.get_indexer_non_unique(objarr)
# 'indexer' has dupes, create 'check' using 'missing'
check = np.zeros_like(objarr)
check[missing] = -1
mask = check == -1
if mask.any():
raise KeyError('{mask} not in index'
.format(mask=objarr[mask]))
return _values_from_object(indexer)
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {'key': obj}
raise
def _tuplify(self, loc):
tup = [slice(None, None) for _ in range(self.ndim)]
tup[0] = loc
return tuple(tup)
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
indexer = self._convert_slice_indexer(slice_obj, axis)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj.take(indexer, axis=axis, convert=False)
class _IXIndexer(_NDFrameIndexer):
"""A primarily label-location based indexer, with integer position
fallback.
``.ix[]`` supports mixed integer and label based access. It is
primarily label based, but will fall back to integer positional
access unless the corresponding axis is of integer type.
``.ix`` is the most general indexer and will support any of the
inputs in ``.loc`` and ``.iloc``. ``.ix`` also supports floating
point label schemes. ``.ix`` is exceptionally useful when dealing
with mixed positional and label based hierachical indexes.
However, when an axis is integer based, ONLY label based access
and not positional access is supported. Thus, in such cases, it's
usually better to be explicit and use ``.iloc`` or ``.loc``.
See more at :ref:`Advanced Indexing <advanced>`.
"""
def __init__(self, obj, name):
_ix_deprecation_warning = textwrap.dedent("""
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated""") # noqa
warnings.warn(_ix_deprecation_warning,
DeprecationWarning, stacklevel=3)
super(_IXIndexer, self).__init__(obj, name)
def _has_valid_type(self, key, axis):
if isinstance(key, slice):
return True
elif is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
return True
else:
self._convert_scalar_indexer(key, axis)
return True
class _LocationIndexer(_NDFrameIndexer):
_exception = Exception
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
try:
if self._is_scalar_access(key):
return self._getitem_scalar(key)
except (KeyError, IndexError):
pass
return self._getitem_tuple(key)
else:
key = com._apply_if_callable(key, self.obj)
return self._getitem_axis(key, axis=0)
def _is_scalar_access(self, key):
raise NotImplementedError()
def _getitem_scalar(self, key):
raise NotImplementedError()
def _getitem_axis(self, key, axis=0):
raise NotImplementedError()
def _getbool_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
try:
return self.obj.take(inds, axis=axis, convert=False)
except Exception as detail:
raise self._exception(detail)
def _get_slice_axis(self, slice_obj, axis=0):
""" this is pretty simple as we just have to deal with labels """
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop,
slice_obj.step, kind=self.name)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj.take(indexer, axis=axis, convert=False)
class _LocIndexer(_LocationIndexer):
"""Purely label-location based indexer for selection by label.
``.loc[]`` is primarily label based, but may also be used with a
boolean array.
Allowed inputs are:
- A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is
interpreted as a *label* of the index, and **never** as an
integer position along the index).
- A list or array of labels, e.g. ``['a', 'b', 'c']``.
- A slice object with labels, e.g. ``'a':'f'`` (note that contrary
to usual python slices, **both** the start and the stop are included!).
- A boolean array.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above)
``.loc`` will raise a ``KeyError`` when the items are not found.
See more at :ref:`Selection by Label <indexing.label>`
"""
_valid_types = ("labels (MUST BE IN THE INDEX), slices of labels (BOTH "
"endpoints included! Can be slices of integers if the "
"index is integers), listlike of labels, boolean")
_exception = KeyError
def _has_valid_type(self, key, axis):
ax = self.obj._get_axis(axis)
# valid for a label where all labels are in the index
# slice of lables (where start-end in labels)
# slice of integers (only if in the lables)
# boolean
if isinstance(key, slice):
return True
elif is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
# mi is just a passthru
if isinstance(key, tuple) and isinstance(ax, MultiIndex):
return True
# TODO: don't check the entire key unless necessary
if (not is_iterator(key) and len(key) and
np.all(ax.get_indexer_for(key) < 0)):
raise KeyError(u"None of [{key}] are in the [{axis}]"
.format(key=key,
axis=self.obj._get_axis_name(axis)))
return True
else:
def error():
if isna(key):
raise TypeError("cannot use label indexing with a null "
"key")
raise KeyError(u"the label [{key}] is not in the [{axis}]"
.format(key=key,
axis=self.obj._get_axis_name(axis)))
try:
key = self._convert_scalar_indexer(key, axis)
if not ax.contains(key):
error()
except TypeError as e:
# python 3 type errors should be raised
if _is_unorderable_exception(e):
error()
raise
except:
error()
return True
def _is_scalar_access(self, key):
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if not hasattr(key, '__len__'):
return False
if len(key) != self.ndim:
return False
for i, k in enumerate(key):
if not is_scalar(k):
return False
ax = self.obj.axes[i]
if isinstance(ax, MultiIndex):
return False
if not ax.is_unique:
return False
return True
def _getitem_scalar(self, key):
# a fast-path to scalar access
# if not, raise
values = self.obj.get_value(*key)
return values
def _get_partial_string_timestamp_match_key(self, key, labels):
"""Translate any partial string timestamp matches in key, returning the
new key (GH 10331)"""
if isinstance(labels, MultiIndex):
if isinstance(key, compat.string_types) and \
labels.levels[0].is_all_dates:
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = tuple([key] + [slice(None)] * (len(labels.levels) - 1))
if isinstance(key, tuple):
# Convert (..., '2016-01-01', ...) in tuple to
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
if isinstance(component, compat.string_types) and \
labels.levels[i].is_all_dates:
new_key.append(slice(component, component, None))
else:
new_key.append(component)
key = tuple(new_key)
return key
def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = self._get_partial_string_timestamp_match_key(key, labels)
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# convert various list-like indexers
# to a list of keys
# we will use the *values* of the object
# and NOT the index if its a PandasObject
if isinstance(labels, MultiIndex):
if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:
# Series, or 0,1 ndim ndarray
# GH 14730
key = list(key)
elif isinstance(key, ABCDataFrame):
# GH 15438
raise NotImplementedError("Indexing a MultiIndex with a "
"DataFrame key is not "
"implemented")
elif hasattr(key, 'ndim') and key.ndim > 1:
raise NotImplementedError("Indexing a MultiIndex with a "
"multidimensional key is not "
"implemented")
if (not isinstance(key, tuple) and len(key) > 1 and
not isinstance(key[0], tuple)):
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._has_valid_type(key, axis)
return self._get_label(key, axis=axis)
class _iLocIndexer(_LocationIndexer):
"""Purely integer-location based indexing for selection by position.
``.iloc[]`` is primarily integer position based (from ``0`` to
``length-1`` of the axis), but may also be used with a boolean
array.
Allowed inputs are:
- An integer, e.g. ``5``.
- A list or array of integers, e.g. ``[4, 3, 0]``.
- A slice object with ints, e.g. ``1:7``.
- A boolean array.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above)
``.iloc`` will raise ``IndexError`` if a requested indexer is
out-of-bounds, except *slice* indexers which allow out-of-bounds
indexing (this conforms with python/numpy *slice* semantics).
See more at :ref:`Selection by Position <indexing.integer>`
"""
_valid_types = ("integer, integer slice (START point is INCLUDED, END "
"point is EXCLUDED), listlike of integers, boolean array")
_exception = IndexError
def _has_valid_type(self, key, axis):
if is_bool_indexer(key):
if hasattr(key, 'index') and isinstance(key.index, Index):
if key.index.inferred_type == 'integer':
raise NotImplementedError("iLocation based boolean "
"indexing on an integer type "
"is not available")
raise ValueError("iLocation based boolean indexing cannot use "
"an indexable as a mask")
return True
if isinstance(key, slice):
return True
elif is_integer(key):
return self._is_valid_integer(key, axis)
elif is_list_like_indexer(key):
return self._is_valid_list_like(key, axis)
return False
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _is_scalar_access(self, key):
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if not hasattr(key, '__len__'):
return False
if len(key) != self.ndim:
return False
for i, k in enumerate(key):
if not is_integer(k):
return False
ax = self.obj.axes[i]
if not ax.is_unique:
return False
return True
def _getitem_scalar(self, key):
# a fast-path to scalar access
# if not, raise
values = self.obj.get_value(*key, takeable=True)
return values
def _is_valid_integer(self, key, axis):
# return a boolean if we have a valid integer indexer
ax = self.obj._get_axis(axis)
l = len(ax)
if key >= l or key < -l:
raise IndexError("single positional indexer is out-of-bounds")
return True
def _is_valid_list_like(self, key, axis):
# return a boolean if we are a valid list-like (e.g. that we don't
# have out-of-bounds values)
# a tuple should already have been caught by this point
# so don't treat a tuple as a valid indexer
if isinstance(key, tuple):
raise IndexingError('Too many indexers')
# coerce the key to not exceed the maximum size of the index
arr = np.array(key)
ax = self.obj._get_axis(axis)
l = len(ax)
if (hasattr(arr, '__len__') and len(arr) and
(arr.max() >= l or arr.min() < -l)):
raise IndexError("positional indexers are out-of-bounds")
return True
def _getitem_tuple(self, tup):
self._has_valid_tuple(tup)
try:
return self._getitem_lowerdim(tup)
except:
pass
retval = self.obj
axis = 0
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if is_null_slice(key):
axis += 1
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=axis)
# if the dim was reduced, then pass a lower-dim the next time
if retval.ndim < self.ndim:
axis -= 1
# try to get for the next axis
axis += 1
return retval
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
slice_obj = self._convert_slice_indexer(slice_obj, axis)
if isinstance(slice_obj, slice):
return self._slice(slice_obj, axis=axis, kind='iloc')
else:
return self.obj.take(slice_obj, axis=axis, convert=False)
def _get_list_axis(self, key, axis=0):
"""
Return Series values by list or array of integers
Parameters
----------
key : list-like positional indexer
axis : int (can only be zero)
Returns
-------
Series object
"""
try:
return self.obj.take(key, axis=axis, convert=False)
except IndexError:
# re-raise with different error message
raise IndexError("positional indexers are out-of-bounds")
def _getitem_axis(self, key, axis=0):
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
if isinstance(key, list):
try:
key = np.asarray(key)
except TypeError: # pragma: no cover
pass
if is_bool_indexer(key):
self._has_valid_type(key, axis)
return self._getbool_axis(key, axis=axis)
# a list of integers
elif is_list_like_indexer(key):
return self._get_list_axis(key, axis=axis)
# a single integer
else:
key = self._convert_scalar_indexer(key, axis)
if not is_integer(key):
raise TypeError("Cannot index by location index with a "
"non-integer key")
# validate the location
self._is_valid_integer(key, axis)
return self._get_loc(key, axis=axis)
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
""" much simpler as we only have to deal with our valid types """
# make need to convert a float key
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
elif is_float(obj):
return self._convert_scalar_indexer(obj, axis)
elif self._has_valid_type(obj, axis):
return obj
raise ValueError("Can only index by location with a [%s]" %
self._valid_types)
class _ScalarAccessIndexer(_NDFrameIndexer):
""" access scalars quickly """
def _convert_key(self, key, is_setter=False):
return list(key)
def __getitem__(self, key):
if not isinstance(key, tuple):
# we could have a convertible item here (e.g. Timestamp)
if not is_list_like_indexer(key):
key = tuple([key])
else:
raise ValueError('Invalid call for scalar access (getting)!')
key = self._convert_key(key)
return self.obj.get_value(*key, takeable=self._takeable)
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
else:
# scalar callable may return tuple
key = com._apply_if_callable(key, self.obj)
if not isinstance(key, tuple):
key = self._tuplify(key)
if len(key) != self.obj.ndim:
raise ValueError('Not enough indexers for scalar access '
'(setting)!')
key = list(self._convert_key(key, is_setter=True))
key.append(value)
self.obj.set_value(*key, takeable=self._takeable)
class _AtIndexer(_ScalarAccessIndexer):
"""Fast label-based scalar accessor
Similarly to ``loc``, ``at`` provides **label** based scalar lookups.
You can also set using these indexers.
"""
_takeable = False
def _convert_key(self, key, is_setter=False):
""" require they keys to be the same type as the index (so we don't
fallback)
"""
# allow arbitrary setting
if is_setter:
return list(key)
for ax, i in zip(self.obj.axes, key):
if ax.is_integer():
if not is_integer(i):
raise ValueError("At based indexing on an integer index "
"can only have integer indexers")
else:
if is_integer(i):
raise ValueError("At based indexing on an non-integer "
"index can only have non-integer "
"indexers")
return key
class _iAtIndexer(_ScalarAccessIndexer):
"""Fast integer location scalar accessor.
Similarly to ``iloc``, ``iat`` provides **integer** based lookups.
You can also set using these indexers.
"""
_takeable = True
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _convert_key(self, key, is_setter=False):
""" require integer args (and convert to label arguments) """
for a, i in zip(self.obj.axes, key):
if not is_integer(i):
raise ValueError("iAt based indexing can only have integer "
"indexers")
return key
# 32-bit floating point machine epsilon
_eps = 1.1920929e-07
def length_of_indexer(indexer, target=None):
"""return the length of a single non-tuple indexer which could be a slice
"""
if target is not None and isinstance(indexer, slice):
l = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += l
if stop is None or stop > l:
stop = l
elif stop < 0:
stop += l
if step is None:
step = 1
elif step < 0:
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)):
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def convert_to_index_sliceable(obj, key):
"""if we are index sliceable, then return my slicer, otherwise return None
"""
idx = obj.index
if isinstance(key, slice):
return idx._convert_slice_indexer(key, kind='getitem')
elif isinstance(key, compat.string_types):
# we are an actual column
if obj._data.items.contains(key):
return None
# We might have a datetimelike string that we can translate to a
# slice here via partial string indexing
if idx.is_all_dates:
try:
return idx._get_string_slice(key)
except (KeyError, ValueError, NotImplementedError):
return None
return None
def is_index_slice(obj):
def _is_valid_index(x):
return (is_integer(x) or is_float(x) and
np.allclose(x, int(x), rtol=_eps, atol=0))
def _crit(v):
return v is None or _is_valid_index(v)
both_none = obj.start is None and obj.stop is None
return not both_none and (_crit(obj.start) and _crit(obj.stop))
def check_bool_indexer(ax, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
# this function assumes that is_bool_indexer(key) == True
result = key
if isinstance(key, ABCSeries) and not key.index.equals(ax):
result = result.reindex(ax)
mask = isna(result._values)
if mask.any():
raise IndexingError('Unalignable boolean Series provided as '
'indexer (index of the boolean Series and of '
'the indexed object do not match')
result = result.astype(bool)._values
elif is_sparse(result):
result = result.to_dense()
result = np.asarray(result, dtype=bool)
else:
# is_bool_indexer has already checked for nulls in the case of an
# object array key, so no check needed here
result = np.asarray(result, dtype=bool)
return result
def convert_missing_indexer(indexer):
""" reverse convert a missing indexer, which is a dict
return the scalar indexer and a boolean indicating if we converted
"""
if isinstance(indexer, dict):
# a missing key (but not a tuple indexer)
indexer = indexer['key']
if isinstance(indexer, bool):
raise KeyError("cannot use a single bool to index into setitem")
return indexer, True
return indexer, False
def convert_from_missing_indexer_tuple(indexer, axes):
""" create a filtered indexer that doesn't have any missing indexers """
def get_indexer(_i, _idx):
return (axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else
_idx)
return tuple([get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)])
def maybe_convert_indices(indices, n):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
The array of indices that we are to convert.
n : int
The number of elements in the array that we are indexing.
Returns
-------
valid_indices : array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError : one of the converted indices either exceeded the number
of elements (specified by `n`) OR was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If list is empty, np.array will return float and cause indexing
# errors.
return np.empty(0, dtype=np.int_)
mask = indices < 0
if mask.any():
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
def maybe_convert_ix(*args):
"""
We likely want to take the cross-product
"""
ixify = True
for arg in args:
if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):
ixify = False
if ixify:
return np.ix_(*args)
else:
return args
def is_nested_tuple(tup, labels):
# check for a compatiable nested tuple and multiindexes among the axes
if not isinstance(tup, tuple):
return False
# are we nested tuple of: tuple,list,slice
for i, k in enumerate(tup):
if isinstance(k, (tuple, list, slice)):
return isinstance(labels, MultiIndex)
return False
def is_list_like_indexer(key):
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and
type(key) is not tuple)
def is_label_like(key):
# select a label or row
return not isinstance(key, slice) and not is_list_like_indexer(key)
def need_slice(obj):
return (obj.start is not None or obj.stop is not None or
(obj.step is not None and obj.step != 1))
def maybe_droplevels(index, key):
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index.droplevel(0)
except:
# we have dropped too much, so back out
return original_index
else:
try:
index = index.droplevel(0)
except:
pass
return index
def _non_reducing_slice(slice_):
"""
Ensurse that a slice doesn't reduce to a Series or Scalar.
Any user-paseed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = tuple(list(compat.string_types) + [ABCSeries, np.ndarray, Index,
list])
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part):
# true when slice does *not* reduce
return isinstance(part, slice) or is_list_like(part)
if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
def _maybe_numeric_slice(df, slice_, include_bool=False):
"""
want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
"""
if slice_ is None:
dtypes = [np.number]
if include_bool:
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
| bsd-3-clause |
andreeweb/HeapHeap | heapheap.py | 2 | 11480 | import random
import time
from Heap.BinaryHeapQueue import BinaryHeapQueue
from Heap.BinomialHeapQueue import BinomialHeapQueue
from Utils.Generator import Generator
from Heap.DHeapQueue import DHeapQueue
from Heap.HeapSort import HeapSort
import matplotlib.pyplot as plot
# global log time
log_time = 0
def log(value):
"""
Used to write on log file.
:return:
"""
global log_time
if log_time is not None:
f = open("./logs/log_" + str(int(log_time)) + ".txt", "a")
else:
f = open("./logs/log_" + str(time.time()) + ".txt", "a")
f.write(value)
if True:
print(value)
def option_insert_delete(rnd_numb):
axis_y = []
axis_x = []
name = []
p = 0
while True:
p = input("\n Please insert the probability\n")
try:
if 0 <= float(p) <= 1:
log("P:" + p + "\n\n")
break
else:
print("Please insert a valid probability! (Probability must be between 0 and 1)")
except ValueError:
print("Please insert a valid probability!")
continue
while True:
select_queue = input(
"\n Please select the data structure with which you want to perform test.\n "
"1 - 2Heap \n"
" 2 - 3Heap \n"
" 3 - 4Heap \n"
" 4 - 6Heap \n"
" 5 - 8Heap \n"
" 6 - 10Heap \n"
" 7 - 16Heap \n"
" 8 - BinaryHeap \n"
" 9 - BinomialHeap \n"
" 10 - Perform test on all queue\n"
" b - Back \n")
try:
if str(select_queue) == 'b':
break
elif int(select_queue) == 1:
insert_delete_heap(rnd_numb, p, 2, DHeapQueue, axis_x, axis_y, name)
plot_insert_delete_option(axis_x, axis_y, name)
elif int(select_queue) == 2:
insert_delete_heap(rnd_numb, p, 3, DHeapQueue, axis_x, axis_y, name)
plot_insert_delete_option(axis_x, axis_y, name)
elif int(select_queue) == 3:
insert_delete_heap(rnd_numb, p, 4, DHeapQueue, axis_x, axis_y, name)
plot_insert_delete_option(axis_x, axis_y, name)
elif int(select_queue) == 4:
insert_delete_heap(rnd_numb, p, 6, DHeapQueue, axis_x, axis_y, name)
plot_insert_delete_option(axis_x, axis_y, name)
elif int(select_queue) == 5:
insert_delete_heap(rnd_numb, p, 8, DHeapQueue, axis_x, axis_y, name)
plot_insert_delete_option(axis_x, axis_y, name)
elif int(select_queue) == 6:
insert_delete_heap(rnd_numb, p, 10, DHeapQueue, axis_x, axis_y, name)
plot_insert_delete_option(axis_x, axis_y, name)
elif int(select_queue) == 7:
insert_delete_heap(rnd_numb, p, 16, DHeapQueue, axis_x, axis_y, name)
plot_insert_delete_option(axis_x, axis_y, name)
elif int(select_queue) == 8:
insert_delete_heap(rnd_numb, p, None, BinaryHeapQueue, axis_x, axis_y, name)
plot_insert_delete_option(axis_x, axis_y, name)
elif int(select_queue) == 9:
insert_delete_heap(rnd_numb, p, 32, BinomialHeapQueue, axis_x, axis_y, name)
plot_insert_delete_option(axis_x, axis_y, name)
elif int(select_queue) == 10:
insert_delete_heap(rnd_numb, p, 2, DHeapQueue, axis_x, axis_y, name)
insert_delete_heap(rnd_numb, p, 3, DHeapQueue, axis_x, axis_y, name)
insert_delete_heap(rnd_numb, p, 4, DHeapQueue, axis_x, axis_y, name)
insert_delete_heap(rnd_numb, p, 6, DHeapQueue, axis_x, axis_y, name)
insert_delete_heap(rnd_numb, p, 8, DHeapQueue, axis_x, axis_y, name)
insert_delete_heap(rnd_numb, p, 10, DHeapQueue, axis_x, axis_y, name)
insert_delete_heap(rnd_numb, p, 16, DHeapQueue, axis_x, axis_y, name)
insert_delete_heap(rnd_numb, p, None, BinaryHeapQueue, axis_x, axis_y, name)
# insert_delete_heap(rnd_numb, p, 32, BinomialHeapQueue, axis_x, axis_y)
plot_insert_delete_option(axis_x, axis_y, name)
else:
print("Please select a valid option!")
except Exception as e:
print("Please select a valid option!" + str(e))
continue
def insert_delete_heap(rnd_numb, p, d, cls, axis_x, axis_y, name):
plot_value_x = []
plot_value_y = []
start_ordering_time = time.time()
# identify the queue
if cls == DHeapQueue:
pq = DHeapQueue(d)
log("\nStart execution on " + str(d) + "Heap data structure" + "\n")
name.append(str(d) + "Heap")
elif cls == BinaryHeapQueue:
pq = BinaryHeapQueue()
log("\nStart execution on BinaryHeap data structure" + "\n")
name.append("BinaryHeap")
else:
pq = BinomialHeapQueue(d)
log("\nStart execution on BinomialHeap data structure" + "\n")
name.append("BinomialHeap")
# how many insert before start with the insert/delete algorithm
count = int(len(rnd_numb)/2)
# perform some insert
for i in range(count):
pq.insert(rnd_numb[i][0], rnd_numb[i][0])
# start to insert with probability of 0 < p < 1, delete with probability of 1-p
action_time = time.time()
for i in range(count, len(rnd_numb)):
k = random.uniform(0.0, 1.0)
if float(k) < float(p):
pq.insert(rnd_numb[count][0], rnd_numb[count][0])
else:
pq.delete_min()
if i % 1500 == 0:
plot_value_y.append(float((time.time() - action_time)) * 1000)
plot_value_x.append(i)
action_time = time.time()
axis_x.append(plot_value_x)
axis_y.append(plot_value_y)
log("\nX values: " + str(plot_value_x) + "\n")
log("Y values: " + str(plot_value_y) + "\n\n")
log("\nEnd execution in: " + str(time.time() - start_ordering_time) + "\n\n")
def option_order(rnd_numb):
while True:
select_queue = input(
"\n Please select the data structure with which you want to perform test.\n "
"1 - 2Heap \n"
" 2 - 3Heap \n"
" 3 - 4Heap \n"
" 4 - 6Heap \n"
" 5 - 8Heap \n"
" 6 - 10Heap \n"
" 7 - 16Heap \n"
" 8 - BinaryHeap \n"
" 9 - BinomialHeap \n"
" 10 - Perform test on all queue\n"
" b - Back \n")
try:
if str(select_queue) == 'b':
break
elif int(select_queue) == 1:
heapsort(rnd_numb, 2, DHeapQueue)
elif int(select_queue) == 2:
heapsort(rnd_numb, 3, DHeapQueue)
elif int(select_queue) == 3:
heapsort(rnd_numb, 4, DHeapQueue)
elif int(select_queue) == 4:
heapsort(rnd_numb, 6, DHeapQueue)
elif int(select_queue) == 5:
heapsort(rnd_numb, 8, DHeapQueue)
elif int(select_queue) == 6:
heapsort(rnd_numb, 10, DHeapQueue)
elif int(select_queue) == 7:
heapsort(rnd_numb, 16, DHeapQueue)
elif int(select_queue) == 8:
heapsort(rnd_numb, None, BinaryHeapQueue)
elif int(select_queue) == 9:
heapsort(rnd_numb, 32, BinomialHeapQueue)
elif int(select_queue) == 10:
heapsort(rnd_numb, 2, DHeapQueue)
heapsort(rnd_numb, 3, DHeapQueue)
heapsort(rnd_numb, 4, DHeapQueue)
heapsort(rnd_numb, 6, DHeapQueue)
heapsort(rnd_numb, 8, DHeapQueue)
heapsort(rnd_numb, 10, DHeapQueue)
heapsort(rnd_numb, 16, DHeapQueue)
heapsort(rnd_numb, None, BinaryHeapQueue)
# heapsort(rnd_numb, 32, BinomialHeapQueue, axis_x, axis_y)
else:
print("Please select a valid option!")
except ValueError:
print("Please select a valid option!")
continue
def heapsort(rnd_numb, d, cls):
log("\n\n **************************\n")
start_ordering_time = time.time()
# identify the queue
if cls == DHeapQueue:
pq = DHeapQueue(d)
log("Start heapsort with " + str(d) + "Heap data structure" + "\n")
elif cls == BinaryHeapQueue:
pq = BinaryHeapQueue()
log("Start heapsort with BinaryHeap data structure" + "\n")
else:
pq = BinomialHeapQueue(d)
log("Start heapsort with BinomialHeap data structure" + "\n")
# insert input on heap structure
for i in range(len(rnd_numb)):
pq.insert(rnd_numb[i][0], rnd_numb[i][0])
# do heapsort!
result = HeapSort.heapsort_support(pq)
timestamp = str(time.time() - start_ordering_time)
log("End ordering in: " + timestamp + "\n\n")
log("Input ordered \n" + str(result[:35]) + "...\n\n")
def divider():
return "\n ---------------------------------------- \n"
def plot_insert_delete_option(x_values, y_values, name):
# print(x_values)
# print(y_values)
# print(name)
plot.title("Priority Queue")
plot.xlabel("Length of list (number)")
plot.ylabel("Time taken (milliseconds)")
y_min = 0
y_max = 0
for i in range(len(x_values)):
plot.plot(x_values[i], y_values[i], label=str(name[i]))
if max(y_values[i]) > y_max:
y_max = max(y_values[i])
if min(y_values[i]) < y_min:
y_min = min(y_values[i])
plot.axis([x_values[0][0], x_values[0][-1], y_min, y_max + 10])
plot.legend(loc='upper left')
plot.show()
if __name__ == "__main__":
log_time = time.time()
log(divider())
input_size = 0
seed = 0
while True:
input_size = input("Please insert the size of input (Size must be between 5000 and 50000)\n")
try:
if 5000 <= int(input_size) <= 50000:
break
else:
print("Please insert a valid number!")
except ValueError:
print("Please insert a valid number!")
while True:
seed = input("Please insert the seed of input\n")
try:
if int(seed) > 0:
break
else:
print("Please insert a valid seed! (Seed must be > 0)")
except ValueError:
print("Please insert a valid seed!")
continue
random_numbers = Generator.generate(int(seed), int(input_size))
log("Input size: \n" + input_size + "\n\n")
log("Seed:\n" + seed + "\n\n")
log("Input:\n" + str(random_numbers[:35]) + "...\n\n")
while True:
option_selected = input(
'\n Please select one option:\n 1 - Test insert/delete \n 2 - Test Heapsort \n q - Quit \n\n')
try:
if str(option_selected) == 'q':
break
elif int(option_selected) == 1:
option_insert_delete(random_numbers)
elif int(option_selected) == 2:
option_order(random_numbers)
else:
print("Please insert a valid option!")
except ValueError:
print("Please insert a valid option!")
continue
end = time.time()
log(divider())
| mit |
thilinamb/k-means-parallel | src/test.py | 1 | 1932 | from KMeansBase import KMeansBase
from KMeansPP import KMeansPP
from ScalableKMeansPP import ScalableKMeansPP
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
if __name__ == '__main__':
k = 3
data = np.random.randn(100000,2)
#data = np.array([[1.1,2],[1,2],[0.9,1.9],[1,2.1],[4,4],[4,4.1],[4.2,4.3],[4.3,4],[9,9],[8.9,9],[8.7,9.2],[9.1,9]])
kmeans = KMeansPP(data, k)
_, _, centroids, min_location = kmeans.cluster()
# plotting code
plt.figure()
plt.subplot(1,3,1)
colors = iter(cm.rainbow(np.linspace(0, 1, k + 1)))
for col in range (0,k):
plt.scatter(data[min_location[:,col] == True, :][:,0], data[min_location[:,col] == True, :][:,1], color=next(colors))
centroid_leg = plt.scatter(centroids[:,0], centroids[:,1], color=next(colors), marker='x')
plt.legend([centroid_leg], ['Centroids'], scatterpoints=1, loc='best')
kmeans = KMeansBase(data, k)
_, _, centroids, min_location = kmeans.cluster()
plt.subplot(1,3,2)
colors = iter(cm.rainbow(np.linspace(0, 1, k + 1)))
for col in range (0,k):
plt.scatter(data[min_location[:,col] == True, :][:,0], data[min_location[:,col] == True, :][:,1], color=next(colors))
centroid_leg = plt.scatter(centroids[:,0], centroids[:,1], color=next(colors), marker='x')
plt.legend([centroid_leg], ['Centroids'], scatterpoints=1, loc='best')
kmeans = ScalableKMeansPP(data, k, 2, 2)
_, _, centroids, min_location = kmeans.cluster()
plt.subplot(1,3,3)
colors = iter(cm.rainbow(np.linspace(0, 1, k + 1)))
for col in range (0,k):
plt.scatter(data[min_location[:,col] == True, :][:,0], data[min_location[:,col] == True, :][:,1], color=next(colors))
centroid_leg = plt.scatter(centroids[:,0], centroids[:,1], color=next(colors), marker='x')
plt.legend([centroid_leg], ['Centroids'], scatterpoints=1, loc='best')
plt.show()
| apache-2.0 |
akionakamura/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 57 | 16523 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False) | bsd-3-clause |
WindCanDie/spark | python/pyspark/sql/group.py | 24 | 12490 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import *
__all__ = ["GroupedData"]
def dfapi(f):
def _api(self):
name = f.__name__
jdf = getattr(self._jgd, name)()
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
def df_varargs_api(f):
def _api(self, *cols):
name = f.__name__
jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols))
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
class GroupedData(object):
"""
A set of methods for aggregations on a :class:`DataFrame`,
created by :func:`DataFrame.groupBy`.
.. note:: Experimental
.. versionadded:: 1.3
"""
def __init__(self, jgd, df):
self._jgd = jgd
self._df = df
self.sql_ctx = df.sql_ctx
@ignore_unicode_prefix
@since(1.3)
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx)
@dfapi
@since(1.3)
def count(self):
"""Counts the number of records for each group.
>>> sorted(df.groupBy(df.age).count().collect())
[Row(age=2, count=1), Row(age=5, count=1)]
"""
@df_varargs_api
@since(1.3)
def mean(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().mean('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().mean('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def avg(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().avg('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().avg('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def max(self, *cols):
"""Computes the max value for each numeric columns for each group.
>>> df.groupBy().max('age').collect()
[Row(max(age)=5)]
>>> df3.groupBy().max('age', 'height').collect()
[Row(max(age)=5, max(height)=85)]
"""
@df_varargs_api
@since(1.3)
def min(self, *cols):
"""Computes the min value for each numeric column for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().min('age').collect()
[Row(min(age)=2)]
>>> df3.groupBy().min('age', 'height').collect()
[Row(min(age)=2, min(height)=80)]
"""
@df_varargs_api
@since(1.3)
def sum(self, *cols):
"""Compute the sum for each numeric columns for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().sum('age').collect()
[Row(sum(age)=7)]
>>> df3.groupBy().sum('age', 'height').collect()
[Row(sum(age)=7, sum(height)=165)]
"""
@since(1.6)
def pivot(self, pivot_col, values=None):
"""
Pivots a column of the current :class:`DataFrame` and perform the specified aggregation.
There are two versions of pivot function: one that requires the caller to specify the list
of distinct values to pivot on, and one that does not. The latter is more concise but less
efficient, because Spark needs to first compute the list of distinct values internally.
:param pivot_col: Name of the column to pivot.
:param values: List of values that will be translated to columns in the output DataFrame.
# Compute the sum of earnings for each year by course with each course as a separate column
>>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
[Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)]
# Or without specifying column values (less efficient)
>>> df4.groupBy("year").pivot("course").sum("earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
>>> df5.groupBy("sales.year").pivot("sales.course").sum("sales.earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
"""
if values is None:
jgd = self._jgd.pivot(pivot_col)
else:
jgd = self._jgd.pivot(pivot_col, values)
return GroupedData(jgd, self._df)
@since(2.3)
def apply(self, udf):
"""
Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result
as a `DataFrame`.
The user-defined function should take a `pandas.DataFrame` and return another
`pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame`
to the user-function and the returned `pandas.DataFrame` are combined as a
:class:`DataFrame`.
The returned `pandas.DataFrame` can be of arbitrary length and its schema must match the
returnType of the pandas udf.
.. note:: This function requires a full shuffle. all the data of a group will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. note:: Experimental
:param udf: a grouped map user-defined function returned by
:func:`pyspark.sql.functions.pandas_udf`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
.. seealso:: :meth:`pyspark.sql.functions.pandas_udf`
"""
# Columns are special because hasattr always return True
if isinstance(udf, Column) or not hasattr(udf, 'func') \
or udf.evalType != PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type "
"GROUPED_MAP.")
df = self._df
udf_column = udf(*[df[col] for col in df.columns])
jdf = self._jgd.flatMapGroupsInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.group
globs = pyspark.sql.group.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.group tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
Row(name='Bob', age=5, height=85)]).toDF()
globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000),
Row(course="Java", year=2012, earnings=20000),
Row(course="dotNET", year=2012, earnings=5000),
Row(course="dotNET", year=2013, earnings=48000),
Row(course="Java", year=2013, earnings=30000)]).toDF()
globs['df5'] = sc.parallelize([
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=10000)),
Row(training="junior", sales=Row(course="Java", year=2012, earnings=20000)),
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=5000)),
Row(training="junior", sales=Row(course="dotNET", year=2013, earnings=48000)),
Row(training="expert", sales=Row(course="Java", year=2013, earnings=30000))]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.group, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
31415us/trajectory | py/env/lib/python2.7/site-packages/shapely/examples/dissolve.py | 24 | 1747 | # dissolve.py
#
# Demonstrate how Shapely can be used to build up a collection of patches by
# dissolving circular regions and how Shapely supports plotting of the results.
from functools import partial
import random
import pylab
from shapely.geometry import Point
from shapely.ops import cascaded_union
# Use a partial function to make 100 points uniformly distributed in a 40x40
# box centered on 0,0.
r = partial(random.uniform, -20.0, 20.0)
points = [Point(r(), r()) for i in range(100)]
# Buffer the points, producing 100 polygon spots
spots = [p.buffer(2.5) for p in points]
# Perform a cascaded union of the polygon spots, dissolving them into a
# collection of polygon patches
patches = cascaded_union(spots)
if __name__ == "__main__":
# Illustrate the results using matplotlib's pylab interface
pylab.figure(num=None, figsize=(4, 4), dpi=180)
for patch in patches.geoms:
assert patch.geom_type in ['Polygon']
assert patch.is_valid
# Fill and outline each patch
x, y = patch.exterior.xy
pylab.fill(x, y, color='#cccccc', aa=True)
pylab.plot(x, y, color='#666666', aa=True, lw=1.0)
# Do the same for the holes of the patch
for hole in patch.interiors:
x, y = hole.xy
pylab.fill(x, y, color='#ffffff', aa=True)
pylab.plot(x, y, color='#999999', aa=True, lw=1.0)
# Plot the original points
pylab.plot([p.x for p in points], [p.y for p in points], 'b,', alpha=0.75)
# Write the number of patches and the total patch area to the figure
pylab.text(-25, 25,
"Patches: %d, total area: %.2f" % (len(patches.geoms), patches.area))
pylab.savefig('dissolve.png')
| mit |
bthirion/scikit-learn | sklearn/neural_network/rbm.py | 26 | 12280 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from scipy.special import expit # logistic function
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='F')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='F')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
466152112/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 75 | 34122 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
trungnt13/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
css-lucas/GAT | gat/service/SmartSearch/SVO_SENT_MODULE_spacy.py | 1 | 11227 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 21 22:28:48 2017
@author: ruobingwang
"""
from spacy.en import English
from gat.service import file_io
import spacy
import pandas as pd
from nltk import data
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import datefinder
import re
import textacy
import datetime
from dateparser import parse
import time
from nltk.stem import WordNetLemmatizer
import numpy as np
from gat.dao import dao
class SVOSENT(object):
"""
Class Methods to Extract Subject Verb Object Tuples and sentiments from a Sentence
"""
def __init__(self, language='english'):
"""
Initialize
"""
self.nlp = dao.spacy_load_en()
self.sent_detector = data.load('tokenizers/punkt/english.pickle')
self.analyzer = SentimentIntensityAnalyzer() # for sentiment analysis
self.keyverbs = list(pd.read_csv('KeyVerbs.csv')['key_verbs'])
self.allcities = list(pd.read_csv('Allcities.csv')['City'])
# self.emolexdict=pd.read_csv('emolex_full.csv')
def getTexts(self, directory):
# function by Tye
# Input: Directory
# Output:List of all text files in the directory fully loaded into memory
texts = []
pathnames = file_io.getFilesRecurse(directory, '.txt')
for pathname in pathnames:
texts.append(file_io.openFile(pathname))
return texts
def split_and_clean(self, text):
'''
Temporay function only useful for corpus data
'''
textlist = text.split('______________________________________________________')
result = [text[text.find("Title:") + 6:text.find("Publication title")] for text in textlist if len(text) != 0]
return result
def get_svo(self, sentence):
'''
get SVO of single sentence
'''
parsed_phrase = self.nlp(sentence)
names = list(parsed_phrase.ents)
corrected_names = []
persons = []
locations = []
organizations = []
event_date = []
norp = []
facilities = []
events = []
cities = []
for e in names:
if e.label_ == 'GPE' or e.label == 'LOC' or e.label_ == 'PERSON' or e.label_ == 'ORG' or e.label == 'NORP' or e.label == 'FACILITY' or e.label == 'PRODUCT':
corrected_names.append(e.text)
if e.label_ == 'GPE' or e.label == 'LOC':
locations.append(e.text)
# if e.text.lower() in self.allcities: # detect cities, slowdone the speed
# cities.append(e.text)
if e.label_ == 'PERSON':
persons.append(e.text)
if e.label_ == 'ORG':
organizations.append(e.text)
if e.label == 'NORP':
norp.append(e.text)
if e.label == 'FACILITY' or e.label == 'PRODUCT':
facilities.append(e.text)
if e.label == 'EVENT':
events.append(e.text)
subjects = []
objects = []
verbs = []
for text in parsed_phrase:
if text.dep_.startswith("nsubj") or text.dep_ in ['conj']:
subject = text.orth_
subjects.append(subject)
if text.dep_ in ["dobj", 'pobj', 'iobj']:
object_ = text.orth_
objects.append(object_)
if text.pos_ == "VERB" and text.lemma_ in self.keyverbs:
verb = text.lemma_
verbs.append(verb)
# event date
try:
event_date = list(set(sentence.replace('.', '').split()) & {'Monday', 'Tuesday', 'Wednesday', 'Tursday',
'Friday', 'Saturday', 'Sunday', 'Today',
'today', 'Tomorrow', 'tomorrow', 'Yesterday',
'yesterday'})[0]
except:
try:
event_date = list(datefinder.find_dates(sentence))[0]
if str(event_date.year) not in sentence:
event_date = str(event_date.month) + '/' + str(event_date.day)
event_date = str(event_date)
except:
event_date = None
# correct subject and object
corrected_subjects = []
corrected_objects = []
corrected_names_copy = list(corrected_names)
for sub in subjects:
for name in corrected_names_copy:
if sub in name:
corrected_subjects.append(name)
corrected_names_copy.remove(name)
break;
for obj in objects:
for name in corrected_names_copy:
if obj in name:
corrected_objects.append(name)
corrected_names_copy.remove(name)
break;
return {'Sentence': sentence,
'Subjects': corrected_subjects,
'Predicates': verbs,
'Objects': corrected_objects,
'Names': corrected_names,
'Event_date': event_date,
'Persons': persons,
'Locations': locations,
# 'Cities': cities,
'Organizations': organizations,
'NORP': norp,
'Facilities': facilities,
'Events': events}
def get_svo_from_article(self, article):
sentences = self.sentence_split(article)
val = []
for sent in sentences:
svoresult = self.get_svo(sent)
val.append(svoresult)
return pd.DataFrame(val)
def sentence_split(self, text):
sentences = self.sent_detector.tokenize(text)
return sentences
def sentimentAnalysis(self, sentence):
result = self.analyzer.polarity_scores(sentence)
result['Sentence'] = sentence
return result
def get_senti_from_article(self, article):
sentences = self.sentence_split(article)
val = []
for sent in sentences:
result = self.sentimentAnalysis(sent)
val.append(result)
return pd.DataFrame(val)
###############################################
# get both SVO and sent in one dataframe
def svo_senti_from_article(self, article, subject=None):
title = article[0:article.find('(title_end)')]
try:
date = list(datefinder.find_dates(article))[-1]
except:
date = None
sentences = self.sentence_split(article)
val1 = []
val2 = []
for sent in sentences:
val1.append(self.sentimentAnalysis(sent))
val2.append(self.get_svo(sent))
result = pd.merge(pd.DataFrame(val1), pd.DataFrame(val2), on='Sentence')[
['Sentence', 'Names', 'Persons', 'Organizations', 'Facilities', 'Locations', 'Subjects', 'Predicates',
'Objects', 'compound', 'Event_date']]
result.rename(columns={'compound': 'Sentiment'}, inplace=True)
# try:
# result['date']=date
# except:
# result['date']='-----'
result['Article_date'] = date
result['Article_title'] = title
def correctdate(eventdate, articledate):
if eventdate == None:
return None
if articledate == None:
return None
try:
corrected_date = parse(eventdate, settings={'RELATIVE_BASE': articledate})
except:
corrected_date = None
return corrected_date
result['Event_date'] = result['Event_date'].apply(lambda x: correctdate(x, date))
# try:
# result.loc[result['date']> datetime.datetime.today() + datetime.timedelta(days=1),'date']='-----'
# except:
# pass
result = result.drop_duplicates(subset=['Sentence'], keep='first') # remove duplicate rows
'''
###emolex start
def getEmolex(word):
wordlist=re.findall(r'\w+', word)
wordlist=[e.lower() for e in wordlist]
df=pd.DataFrame(columns=list(self.emolexdict['type'].unique()))
dflist=[]
for e in wordlist:
temp=self.emolexdict[self.emolexdict['word']==e]
pivot=temp.pivot(index='word', columns='type', values='Weight').reset_index()
dflist.append(pivot)
result=pd.concat(dflist)
features=list(result)
features.remove('word')
df[features]=result[features]
df['Sentence']=word
final=df.groupby('Sentence').apply(np.mean).reset_index()
return final
emolex_all=[]
for sent in result['Sentence']:
dft=getEmolex(sent)
emolex_all.append(dft)
result_emolex=pd.concat(emolex_all)
result=result.join(result_emolex.set_index('Sentence'),on='Sentence')
###emolex end
'''
if subject == None:
return result
else:
return result[result['Names'].apply(lambda x: subject in x)]
def WriteCSV(self, df, name):
df.to_csv(name + '.csv', index=False)
def batchProcessArticles(self, articles): # articles are list of strings, can be got from split and clean
t0 = time.time()
results = []
for i, article in enumerate(articles):
try:
result = self.svo_senti_from_article(article)
results.append(result)
print(i + 1, 'th/', len(articles), 'article is done')
except Exception as e:
print(i, 'th article has error:', e)
t1 = time.time()
results = pd.concat(results, axis=0)
result = result.drop_duplicates(subset=['Sentence'], keep='first') # remove duplicate rows
print('time cost', end=':')
print(t1 - t0)
return results
if __name__ == "__main__":
svo_sent = SVOSENT()
article = '''
North Korea threatens to attack South Korea on September 24th. United States and South Korea will have a meeting at Beijing.
'''
result = svo_sent.svo_senti_from_article(article)
print(result)
'''
articles_not=svo_sent.getTexts('corpus4')[-1]
articles=svo_sent.split_and_clean(articles_not)
import time
t0=time.time()
results=[]
for i,article in enumerate(articles):
try:
result=svo_sent.svo_senti_from_article(article)
results.append(result)
print(i,end='th/')
print(len(articles),end='')
print(' article is done')
except:
print(i,' th article is empty!')
#result2=svo_sent.svo_senti_from_article(article,'Robin')
t1=time.time()
results=pd.concat(results, axis=0)
print('time cost',end=':')
print(t1-t0)
#print(results)
svo_sent.WriteCSV(results,'corpus4_full_dataset')
'''
| mit |
thientu/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 71 | 18815 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
stinebuu/nest-simulator | pynest/examples/testiaf.py | 7 | 4086 | # -*- coding: utf-8 -*-
#
# testiaf.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""IAF Neuron example
------------------
A DC current is injected into the neuron using a current generator
device. The membrane potential as well as the spiking activity are
recorded by corresponding devices.
It can be observed how the current charges the membrane, a spike
is emitted, the neuron becomes absolute refractory, and finally
starts to recover.
"""
###############################################################################
# First, we import all necessary modules for simulation and plotting
import nest
import matplotlib.pyplot as plt
###############################################################################
# Second the function ``build_network`` is defined to build the network and
# return the handles of the ``spike_recorder`` and the ``voltmeter``. The
# function takes the simulation resolution as argument
#
# The function first resets the simulation kernel and sets the number of
# threads and the simulation resolution. The ``iaf_psc_alpha`` neuron is
# created and the handle is stored in the variable `neuron`. The status of
# the neuron is changed so it receives an external current. Next a
# ``voltmeter`` and a ``spike_recorder`` are created and their handles stored
# in the variables `vm` and `sr` respectively.
#
# The voltmeter and spike recorder are then connected to the neuron. ``Connect``
# takes the device and neuron handles as input. The voltmeter is connected to the
# neuron and the neuron to the spike recorder because the neuron sends spikes
# to the recorder and the voltmeter 'observes' the neuron.
def build_network(dt):
nest.ResetKernel()
nest.SetKernelStatus({"local_num_threads": 1, "resolution": dt})
neuron = nest.Create('iaf_psc_alpha')
neuron.I_e = 376.0
vm = nest.Create('voltmeter')
sr = nest.Create('spike_recorder')
nest.Connect(vm, neuron)
nest.Connect(neuron, sr)
return vm, sr
###############################################################################
# The neuron is simulated for three different resolutions and then the
# voltage trace is plotted
for dt in [0.1, 0.5, 1.0]:
print("Running simulation with dt=%.2f" % dt)
vm, sr = build_network(dt)
nest.Simulate(1000.0)
###########################################################################
# The network is simulated using ``Simulate``, which takes the desired
# simulation time in milliseconds and advances the network state by this
# amount of time. During simulation, the ``spike_recorder`` counts the
# spikes of the target neuron and the total number is read out at the
# end of the simulation period.
#
# The values of the voltage recorded by the voltmeter are read out and
# the values for the membrane potential are stored in potential and the
# corresponding times in the times array
potentials = vm.get('events', 'V_m')
times = vm.get('events', 'times')
###########################################################################
# Using the matplotlib library the voltage trace is plotted over time
plt.plot(times, potentials, label="dt=%.2f" % dt)
print(" Number of spikes: {0}".format(sr.n_events))
###########################################################################
# Finally the axis are labelled and a legend is generated
plt.legend(loc=3)
plt.xlabel("time (ms)")
plt.ylabel("V_m (mV)")
plt.show()
| gpl-2.0 |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/IPython/core/display.py | 7 | 26478 | # -*- coding: utf-8 -*-
"""Top-level display functions for displaying object in different formats.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import os
import struct
from IPython.core.formatters import _safe_get_formatter_method
from IPython.utils.py3compat import (string_types, cast_bytes_py2, cast_unicode,
unicode_type)
from IPython.testing.skipdoctest import skip_doctest
from .displaypub import publish_display_data
#-----------------------------------------------------------------------------
# utility functions
#-----------------------------------------------------------------------------
def _safe_exists(path):
"""Check path, but don't let exceptions raise"""
try:
return os.path.exists(path)
except Exception:
return False
def _merge(d1, d2):
"""Like update, but merges sub-dicts instead of clobbering at the top level.
Updates d1 in-place
"""
if not isinstance(d2, dict) or not isinstance(d1, dict):
return d2
for key, value in d2.items():
d1[key] = _merge(d1.get(key), value)
return d1
def _display_mimetype(mimetype, objs, raw=False, metadata=None):
"""internal implementation of all display_foo methods
Parameters
----------
mimetype : str
The mimetype to be published (e.g. 'image/png')
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
if metadata:
metadata = {mimetype: metadata}
if raw:
# turn list of pngdata into list of { 'image/png': pngdata }
objs = [ {mimetype: obj} for obj in objs ]
display(*objs, raw=raw, metadata=metadata, include=[mimetype])
#-----------------------------------------------------------------------------
# Main functions
#-----------------------------------------------------------------------------
def display(*objs, **kwargs):
"""Display a Python object in all frontends.
By default all representations will be computed and sent to the frontends.
Frontends can decide which representation is used and how.
Parameters
----------
objs : tuple of objects
The Python objects to display.
raw : bool, optional
Are the objects to be displayed already mimetype-keyed dicts of raw display data,
or Python objects that need to be formatted before display? [default: False]
include : list or tuple, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
A list of format type strings (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
metadata : dict, optional
A dictionary of metadata to associate with the output.
mime-type keys in this dictionary will be associated with the individual
representation formats, if they exist.
"""
raw = kwargs.get('raw', False)
include = kwargs.get('include')
exclude = kwargs.get('exclude')
metadata = kwargs.get('metadata')
from IPython.core.interactiveshell import InteractiveShell
if not raw:
format = InteractiveShell.instance().display_formatter.format
for obj in objs:
# If _ipython_display_ is defined, use that to display this object.
display_method = _safe_get_formatter_method(obj, '_ipython_display_')
if display_method is not None:
try:
display_method(**kwargs)
except NotImplementedError:
pass
else:
continue
if raw:
publish_display_data('display', obj, metadata)
else:
format_dict, md_dict = format(obj, include=include, exclude=exclude)
if metadata:
# kwarg-specified metadata gets precedence
_merge(md_dict, metadata)
publish_display_data('display', format_dict, md_dict)
def display_pretty(*objs, **kwargs):
"""Display the pretty (default) representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/plain', objs, **kwargs)
def display_html(*objs, **kwargs):
"""Display the HTML representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw HTML data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/html', objs, **kwargs)
def display_svg(*objs, **kwargs):
"""Display the SVG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw svg data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/svg+xml', objs, **kwargs)
def display_png(*objs, **kwargs):
"""Display the PNG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw png data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/png', objs, **kwargs)
def display_jpeg(*objs, **kwargs):
"""Display the JPEG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw JPEG data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/jpeg', objs, **kwargs)
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/latex', objs, **kwargs)
def display_json(*objs, **kwargs):
"""Display the JSON representation of an object.
Note that not many frontends support displaying JSON.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw json data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/json', objs, **kwargs)
def display_javascript(*objs, **kwargs):
"""Display the Javascript representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/javascript', objs, **kwargs)
def display_pdf(*objs, **kwargs):
"""Display the PDF representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/pdf', objs, **kwargs)
#-----------------------------------------------------------------------------
# Smart classes
#-----------------------------------------------------------------------------
class DisplayObject(object):
"""An object that wraps data to be displayed."""
_read_flags = 'r'
def __init__(self, data=None, url=None, filename=None):
"""Create a display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. The MIME type of the data should match the
subclasses used, so the Png subclass should be used for 'image/png'
data. If the data is a URL, the data will first be downloaded
and then displayed. If
Parameters
----------
data : unicode, str or bytes
The raw data or a URL or file to load the data from
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
"""
if data is not None and isinstance(data, string_types):
if data.startswith('http') and url is None:
url = data
filename = None
data = None
elif _safe_exists(data) and filename is None:
url = None
filename = data
data = None
self.data = data
self.url = url
self.filename = None if filename is None else unicode_type(filename)
self.reload()
self._check_data()
def _check_data(self):
"""Override in subclasses if there's something to check."""
pass
def reload(self):
"""Reload the raw data from file or URL."""
if self.filename is not None:
with open(self.filename, self._read_flags) as f:
self.data = f.read()
elif self.url is not None:
try:
try:
from urllib.request import urlopen # Py3
except ImportError:
from urllib2 import urlopen
response = urlopen(self.url)
self.data = response.read()
# extract encoding from header, if there is one:
encoding = None
for sub in response.headers['content-type'].split(';'):
sub = sub.strip()
if sub.startswith('charset'):
encoding = sub.split('=')[-1].strip()
break
# decode data, if an encoding was specified
if encoding:
self.data = self.data.decode(encoding, 'replace')
except:
self.data = None
class TextDisplayObject(DisplayObject):
"""Validate that display data is text"""
def _check_data(self):
if self.data is not None and not isinstance(self.data, string_types):
raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
class Pretty(TextDisplayObject):
def _repr_pretty_(self):
return self.data
class HTML(TextDisplayObject):
def _repr_html_(self):
return self.data
def __html__(self):
"""
This method exists to inform other HTML-using modules (e.g. Markupsafe,
htmltag, etc) that this object is HTML and does not need things like
special characters (<>&) escaped.
"""
return self._repr_html_()
class Math(TextDisplayObject):
def _repr_latex_(self):
s = self.data.strip('$')
return "$$%s$$" % s
class Latex(TextDisplayObject):
def _repr_latex_(self):
return self.data
class SVG(DisplayObject):
# wrap data in a property, which extracts the <svg> tag, discarding
# document headers
_data = None
@property
def data(self):
return self._data
@data.setter
def data(self, svg):
if svg is None:
self._data = None
return
# parse into dom object
from xml.dom import minidom
svg = cast_bytes_py2(svg)
x = minidom.parseString(svg)
# get svg tag (should be 1)
found_svg = x.getElementsByTagName('svg')
if found_svg:
svg = found_svg[0].toxml()
else:
# fallback on the input, trust the user
# but this is probably an error.
pass
svg = cast_unicode(svg)
self._data = svg
def _repr_svg_(self):
return self.data
class JSON(TextDisplayObject):
def _repr_json_(self):
return self.data
css_t = """$("head").append($("<link/>").attr({
rel: "stylesheet",
type: "text/css",
href: "%s"
}));
"""
lib_t1 = """$.getScript("%s", function () {
"""
lib_t2 = """});
"""
class Javascript(TextDisplayObject):
def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
"""Create a Javascript display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. If the data is a URL, the data will first be
downloaded and then displayed.
In the Notebook, the containing element will be available as `element`,
and jQuery will be available. The output area starts hidden, so if
the js appends content to `element` that should be visible, then
it must call `container.show()` to unhide the area.
Parameters
----------
data : unicode, str or bytes
The Javascript source code or a URL to download it from.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
lib : list or str
A sequence of Javascript library URLs to load asynchronously before
running the source code. The full URLs of the libraries should
be given. A single Javascript library URL can also be given as a
string.
css: : list or str
A sequence of css files to load before running the source code.
The full URLs of the css files should be given. A single css URL
can also be given as a string.
"""
if isinstance(lib, string_types):
lib = [lib]
elif lib is None:
lib = []
if isinstance(css, string_types):
css = [css]
elif css is None:
css = []
if not isinstance(lib, (list,tuple)):
raise TypeError('expected sequence, got: %r' % lib)
if not isinstance(css, (list,tuple)):
raise TypeError('expected sequence, got: %r' % css)
self.lib = lib
self.css = css
super(Javascript, self).__init__(data=data, url=url, filename=filename)
def _repr_javascript_(self):
r = ''
for c in self.css:
r += css_t % c
for l in self.lib:
r += lib_t1 % l
r += self.data
r += lib_t2*len(self.lib)
return r
# constants for identifying png/jpeg data
_PNG = b'\x89PNG\r\n\x1a\n'
_JPEG = b'\xff\xd8'
def _pngxy(data):
"""read the (width, height) from a PNG header"""
ihdr = data.index(b'IHDR')
# next 8 bytes are width/height
w4h4 = data[ihdr+4:ihdr+12]
return struct.unpack('>ii', w4h4)
def _jpegxy(data):
"""read the (width, height) from a JPEG header"""
# adapted from http://www.64lines.com/jpeg-width-height
idx = 4
while True:
block_size = struct.unpack('>H', data[idx:idx+2])[0]
idx = idx + block_size
if data[idx:idx+2] == b'\xFF\xC0':
# found Start of Frame
iSOF = idx
break
else:
# read another block
idx += 2
h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
return w, h
class Image(DisplayObject):
_read_flags = 'rb'
_FMT_JPEG = u'jpeg'
_FMT_PNG = u'png'
_ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
def __init__(self, data=None, url=None, filename=None, format=u'png', embed=None, width=None, height=None, retina=False):
"""Create a PNG/JPEG image object given raw data.
When this object is returned by an input cell or passed to the
display function, it will result in the image being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Images from a file are always embedded.
format : unicode
The format of the image data (png/jpeg/jpg). If a filename or URL is given
for format will be inferred from the filename extension.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
width : int
Width to which to constrain the image in html
height : int
Height to which to constrain the image in html
retina : bool
Automatically set the width and height to half of the measured
width and height.
This only works for embedded images because it reads the width/height
from image data.
For non-embedded images, you can just set the desired display width
and height directly.
Examples
--------
# embedded image data, works in qtconsole and notebook
# when passed positionally, the first arg can be any of raw image data,
# a URL, or a filename from which to load image data.
# The result is always embedding image data for inline images.
Image('http://www.google.fr/images/srpr/logo3w.png')
Image('/path/to/image.jpg')
Image(b'RAW_PNG_DATA...')
# Specifying Image(url=...) does not embed the image data,
# it only generates `<img>` tag with a link to the source.
# This will not work in the qtconsole or offline.
Image(url='http://www.google.fr/images/srpr/logo3w.png')
"""
if filename is not None:
ext = self._find_ext(filename)
elif url is not None:
ext = self._find_ext(url)
elif data is None:
raise ValueError("No image data found. Expecting filename, url, or data.")
elif isinstance(data, string_types) and (
data.startswith('http') or _safe_exists(data)
):
ext = self._find_ext(data)
else:
ext = None
if ext is not None:
format = ext.lower()
if ext == u'jpg' or ext == u'jpeg':
format = self._FMT_JPEG
if ext == u'png':
format = self._FMT_PNG
elif isinstance(data, bytes) and format == 'png':
# infer image type from image data header,
# only if format might not have been specified.
if data[:2] == _JPEG:
format = 'jpeg'
self.format = unicode_type(format).lower()
self.embed = embed if embed is not None else (url is None)
if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
raise ValueError("Cannot embed the '%s' image format" % (self.format))
self.width = width
self.height = height
self.retina = retina
super(Image, self).__init__(data=data, url=url, filename=filename)
if retina:
self._retina_shape()
def _retina_shape(self):
"""load pixel-doubled width and height from image data"""
if not self.embed:
return
if self.format == 'png':
w, h = _pngxy(self.data)
elif self.format == 'jpeg':
w, h = _jpegxy(self.data)
else:
# retina only supports png
return
self.width = w // 2
self.height = h // 2
def reload(self):
"""Reload the raw data from file or URL."""
if self.embed:
super(Image,self).reload()
if self.retina:
self._retina_shape()
def _repr_html_(self):
if not self.embed:
width = height = ''
if self.width:
width = ' width="%d"' % self.width
if self.height:
height = ' height="%d"' % self.height
return u'<img src="%s"%s%s/>' % (self.url, width, height)
def _data_and_metadata(self):
"""shortcut for returning metadata with shape information, if defined"""
md = {}
if self.width:
md['width'] = self.width
if self.height:
md['height'] = self.height
if md:
return self.data, md
else:
return self.data
def _repr_png_(self):
if self.embed and self.format == u'png':
return self._data_and_metadata()
def _repr_jpeg_(self):
if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
return self._data_and_metadata()
def _find_ext(self, s):
return unicode_type(s.split('.')[-1].lower())
def clear_output(wait=False):
"""Clear the output of the current cell receiving output.
Parameters
----------
wait : bool [default: false]
Wait to clear the output until new output is available to replace it."""
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
InteractiveShell.instance().display_pub.clear_output(wait)
else:
from IPython.utils import io
print('\033[2K\r', file=io.stdout, end='')
io.stdout.flush()
print('\033[2K\r', file=io.stderr, end='')
io.stderr.flush()
@skip_doctest
def set_matplotlib_formats(*formats, **kwargs):
"""Select figure formats for the inline backend. Optionally pass quality for JPEG.
For example, this enables PNG and JPEG output with a JPEG quality of 90%::
In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
To set this in your config files use the following::
c.InlineBackend.figure_formats = {'png', 'jpeg'}
c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
Parameters
----------
*formats : strs
One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs :
Keyword args will be relayed to ``figure.canvas.print_figure``.
"""
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.pylabtools import select_figure_formats
from IPython.kernel.zmq.pylab.config import InlineBackend
# build kwargs, starting with InlineBackend config
kw = {}
cfg = InlineBackend.instance()
kw.update(cfg.print_figure_kwargs)
kw.update(**kwargs)
shell = InteractiveShell.instance()
select_figure_formats(shell, formats, **kw)
@skip_doctest
def set_matplotlib_close(close=True):
"""Set whether the inline backend closes all figures automatically or not.
By default, the inline backend used in the IPython Notebook will close all
matplotlib figures automatically after each cell is run. This means that
plots in different cells won't interfere. Sometimes, you may want to make
a plot in one cell and then refine it in later cells. This can be accomplished
by::
In [1]: set_matplotlib_close(False)
To set this in your config files use the following::
c.InlineBackend.close_figures = False
Parameters
----------
close : bool
Should all matplotlib figures be automatically closed after each cell is
run?
"""
from IPython.kernel.zmq.pylab.config import InlineBackend
cfg = InlineBackend.instance()
cfg.close_figures = close
| bsd-3-clause |
SidSachdev/SFrame | oss_src/unity/python/sframe/data_structures/sgraph.py | 9 | 58636 | """
.. warning:: This product is currently in a beta release. The API reference is
subject to change.
This package defines the GraphLab Create SGraph, Vertex, and Edge objects. The SGraph
is a directed graph, consisting of a set of Vertex objects and Edges that
connect pairs of Vertices. The methods in this module are available from the top
level import of the graphlab package.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from .. import connect as _mt
from ..connect import main as glconnect
from .sframe import SFrame
from .sarray import SArray
from .gframe import GFrame, VERTEX_GFRAME, EDGE_GFRAME
from ..cython.cy_graph import UnityGraphProxy
from ..cython.context import debug_trace as cython_context
from ..util import _make_internal_url
from ..deps import pandas as pd
from ..deps import HAS_PANDAS
import inspect
import copy
## \internal Default column name for vertex id.
_VID_COLUMN = '__id'
## \internal Default column name for source vid.
_SRC_VID_COLUMN = '__src_id'
## \internal Default column name for target vid.
_DST_VID_COLUMN = '__dst_id'
#/**************************************************************************/
#/* */
#/* SGraph Related Classes */
#/* */
#/**************************************************************************/
class Vertex(object):
"""
A vertex object, consisting of a vertex ID and a dictionary of vertex
attributes. The vertex ID can be an integer, string, or float.
Parameters
----------
vid : int or string or float
Vertex ID.
attr : dict, optional
Vertex attributes. A Dictionary of string keys and values with one of
the following types: int, float, string, array of floats.
See Also
--------
Edge, SGraph
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
"""
__slots__ = ['vid', 'attr']
def __init__(self, vid, attr={}, _series=None):
"""__init__(self, vid, attr={})
Construct a new vertex.
"""
if not _series is None:
self.vid = _series[_VID_COLUMN]
self.attr = _series.to_dict()
self.attr.pop(_VID_COLUMN)
else:
self.vid = vid
self.attr = attr
def __repr__(self):
return "V(" + str(self.vid) + ", " + str(self.attr) + ")"
def __str__(self):
return "V(" + str(self.vid) + ", " + str(self.attr) + ")"
class Edge(object):
"""
A directed edge between two Vertex objects. An Edge object consists of a
source vertex ID, a destination vertex ID, and a dictionary of edge
attributes.
Parameters
----------
src_vid : int or string or float
Source vertex ID.
dst_vid : int or string or float
Target vertex ID.
attr : dict
Edge attributes. A Dictionary of string keys and values with one of the
following types: integer, float, string, array of floats.
See Also
--------
Vertex, SGraph
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'vizsla'})]
>>> edges = [Edge(0, 1, attr={'size': 'larger_than'})]
>>> g = SGraph()
>>> g = g.add_vertices(verts).add_edges(edges)
"""
__slots__ = ['src_vid', 'dst_vid', 'attr']
def __init__(self, src_vid, dst_vid, attr={}, _series=None):
"""__init__(self, vid, attr={})
Construct a new edge.
"""
if not _series is None:
self.src_vid = _series[_SRC_VID_COLUMN]
self.dst_vid = _series[_DST_VID_COLUMN]
self.attr = _series.to_dict()
self.attr.pop(_SRC_VID_COLUMN)
self.attr.pop(_DST_VID_COLUMN)
else:
self.src_vid = src_vid
self.dst_vid = dst_vid
self.attr = attr
def __repr__(self):
return ("E(" + str(self.src_vid) + " -> " + str(self.dst_vid) + ", " +
str(self.attr) + ")")
def __str__(self):
return ("E(" + str(self.src_vid) + " -> " + str(self.dst_vid) + ", " +
str(self.attr) + ")")
class SGraph(object):
"""
A scalable graph data structure. The SGraph data structure allows arbitrary
dictionary attributes on vertices and edges, provides flexible vertex and
edge query functions, and seamless transformation to and from
:class:`~graphlab.SFrame`.
There are several ways to create an SGraph. The simplest way is to make an
empty SGraph then add vertices and edges with the :py:func:`add_vertices`
and :py:func:`add_edges` methods. SGraphs can also be created from vertex
and edge lists stored in :class:`~graphlab.SFrames`. Columns of these
SFrames not used as vertex IDs are assumed to be vertex or edge attributes.
Please see the `User Guide
<https://dato.com/learn/userguide/sgraph/sgraph.html>`_
for a more detailed introduction to creating and working with SGraphs.
Parameters
----------
vertices : SFrame, optional
Vertex data. Must include an ID column with the name specified by
`vid_field.` Additional columns are treated as vertex attributes.
edges : SFrame, optional
Edge data. Must include source and destination ID columns as specified
by `src_field` and `dst_field`. Additional columns are treated as edge
attributes.
vid_field : str, optional
The name of vertex ID column in the `vertices` SFrame.
src_field : str, optional
The name of source ID column in the `edges` SFrame.
dst_field : str, optional
The name of destination ID column in the `edges` SFrame.
See Also
--------
SFrame
Notes
-----
- SGraphs are *structurally immutable*. In the example below, the
:func:`~add_vertices` and :func:`~add_edges` commands both return a new
graph; the old graph gets garbage collected.
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
>>> g = g.add_edges(Edge(1, 2))
"""
__slots__ = ['__proxy__', '_vertices', '_edges']
def __init__(self, vertices=None, edges=None, vid_field='__id',
src_field='__src_id', dst_field='__dst_id', _proxy=None):
"""
__init__(vertices=None, edges=None, vid_field='__id', src_field='__src_id', dst_field='__dst_id')
By default, construct an empty graph when vertices and edges are None.
Otherwise construct an SGraph with given vertices and edges.
Parameters
----------
vertices : SFrame, optional
An SFrame containing vertex id columns and optional vertex data
columns.
edges : SFrame, optional
An SFrame containing source and target id columns and optional edge
data columns.
vid_field : str, optional
The name of vertex id column in the `vertices` SFrame.
src_field : str, optional
The name of source id column in the `edges` SFrame.
dst_field : str, optional
The name of target id column in the `edges` SFrame.
"""
if (_proxy is None):
self.__proxy__ = UnityGraphProxy(glconnect.get_client())
if vertices is not None:
self.__proxy__ = self.add_vertices(vertices, vid_field).__proxy__
if edges is not None:
self.__proxy__ = self.add_edges(edges, src_field, dst_field).__proxy__
else:
self.__proxy__ = _proxy
self._vertices = GFrame(self, VERTEX_GFRAME)
self._edges = GFrame(self, EDGE_GFRAME)
def __str__(self):
"""Returns a readable string representation summarizing the graph."""
return "SGraph(%s)" % str(self.summary())
def __repr__(self):
"""Returns a readable string representation summarizing the graph."""
return "SGraph(%s)\nVertex Fields:%s\nEdge Fields:%s" % \
(str(self.summary()), str(self.get_vertex_fields()), str(self.get_edge_fields()))
def __copy__(self):
return SGraph(_proxy=self.__proxy__)
def copy(self):
"""
Returns a shallow copy of the SGraph.
"""
return self.__copy__()
@property
def vertices(self):
"""
Special vertex SFrame of the SGraph. Modifying the contents of this
SFrame changes the vertex data of the SGraph. To preserve the graph
structure, the ``__id`` column of this SFrame is read-only.
See Also
--------
edges
Examples
--------
>>> from graphlab import SGraph, Vertex
>>> g = SGraph().add_vertices([Vertex('cat', {'fluffy': 1}),
Vertex('dog', {'fluffy': 1, 'woof': 1}),
Vertex('hippo', {})])
Copy the 'woof' vertex attribute into a new 'bark' vertex attribute:
>>> g.vertices['bark'] = g.vertices['woof']
Remove the 'woof' attribute:
>>> del g.vertices['woof']
Create a new field 'likes_fish':
>>> g.vertices['likes_fish'] = g.vertices['__id'] == 'cat'
+-------+--------+------+------------+
| __id | fluffy | bark | likes_fish |
+-------+--------+------+------------+
| dog | 1.0 | 1.0 | 0 |
| cat | 1.0 | nan | 1 |
| hippo | nan | nan | 0 |
+-------+--------+------+------------+
Replace missing values with zeros:
>>> for col in g.vertices.column_names():
... if col != '__id':
... g.vertices.fillna(col, 0)
+-------+--------+------+------------+
| __id | fluffy | bark | likes_fish |
+-------+--------+------+------------+
| dog | 1.0 | 1.0 | 0 |
| cat | 1.0 | 0.0 | 1 |
| hippo | 0.0 | 0.0 | 0 |
+-------+--------+------+------------+
"""
_mt._get_metric_tracker().track('sgraph.vertices')
return self._vertices
@property
def edges(self):
"""
Special edge SFrame of the SGraph. Modifying the contents of this SFrame
changes the edge data of the SGraph. To preserve the graph structure,
the ``__src_id``, and ``__dst_id`` columns of this SFrame are read-only.
See Also
--------
vertices
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> g = g.add_vertices([Vertex(x) for x in ['cat', 'dog', 'fossa']])
>>> g = g.add_edges([Edge('cat', 'dog', attr={'relationship': 'dislikes'}),
Edge('dog', 'cat', attr={'relationship': 'likes'}),
Edge('dog', 'fossa', attr={'relationship': 'likes'})])
>>> g.edges['size'] = ['smaller than', 'larger than', 'equal to']
+----------+----------+--------------+--------------+
| __src_id | __dst_id | relationship | size |
+----------+----------+--------------+--------------+
| cat | dog | dislikes | smaller than |
| dog | cat | likes | larger than |
| dog | fossa | likes | equal to |
+----------+----------+--------------+--------------+
"""
_mt._get_metric_tracker().track('sgraph.edges')
return self._edges
def summary(self):
"""
Return the number of vertices and edges as a dictionary.
Returns
-------
out : dict
A dictionary containing the number of vertices and edges.
See Also
--------
show, vertices, edges
Examples
--------
>>> from graphlab import SGraph, Vertex
>>> g = SGraph().add_vertices([Vertex(i) for i in range(10)])
>>> n_vertex = g.summary()['num_vertices']
10
>>> n_edge = g.summary()['num_edges']
0
"""
_mt._get_metric_tracker().track('sgraph.summary')
ret = self.__proxy__.summary()
return dict(ret.items())
def get_vertices(self, ids=[], fields={}, format='sframe'):
"""
get_vertices(self, ids=list(), fields={}, format='sframe')
Return a collection of vertices and their attributes.
Parameters
----------
ids : list [int | float | str] or SArray
List of vertex IDs to retrieve. Only vertices in this list will be
returned. Also accepts a single vertex id.
fields : dict | pandas.DataFrame
Dictionary specifying equality constraint on field values. For
example ``{'gender': 'M'}``, returns only vertices whose 'gender'
field is 'M'. ``None`` can be used to designate a wild card. For
example, {'relationship': None} will find all vertices with the
field 'relationship' regardless of the value.
format : {'sframe', 'list'}
Output format. The SFrame output (default) contains a column
``__src_id`` with vertex IDs and a column for each vertex attribute.
List output returns a list of Vertex objects.
Returns
-------
out : SFrame or list [Vertex]
An SFrame or list of Vertex objects.
See Also
--------
vertices, get_edges
Examples
--------
Return all vertices in the graph.
>>> from graphlab import SGraph, Vertex
>>> g = SGraph().add_vertices([Vertex(0, attr={'gender': 'M'}),
Vertex(1, attr={'gender': 'F'}),
Vertex(2, attr={'gender': 'F'})])
>>> g.get_vertices()
+------+--------+
| __id | gender |
+------+--------+
| 0 | M |
| 2 | F |
| 1 | F |
+------+--------+
Return vertices 0 and 2.
>>> g.get_vertices(ids=[0, 2])
+------+--------+
| __id | gender |
+------+--------+
| 0 | M |
| 2 | F |
+------+--------+
Return vertices with the vertex attribute "gender" equal to "M".
>>> g.get_vertices(fields={'gender': 'M'})
+------+--------+
| __id | gender |
+------+--------+
| 0 | M |
+------+--------+
"""
_mt._get_metric_tracker().track('sgraph.get_vertices')
if not hasattr(ids, '__iter__'):
ids = [ids]
if type(ids) not in (list, SArray):
raise TypeError('ids must be list or SArray type')
with cython_context():
sf = SFrame(_proxy=self.__proxy__.get_vertices(ids, fields))
if (format == 'sframe'):
return sf
elif (format == 'dataframe'):
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
if sf.num_rows() == 0:
return pd.DataFrame()
else:
df = sf.head(sf.num_rows()).to_dataframe()
return df.set_index('__id')
elif (format == 'list'):
return _dataframe_to_vertex_list(sf.to_dataframe())
else:
raise ValueError("Invalid format specifier")
def get_edges(self, src_ids=[], dst_ids=[], fields={}, format='sframe'):
"""
get_edges(self, src_ids=list(), dst_ids=list(), fields={}, format='sframe')
Return a collection of edges and their attributes. This function is used
to find edges by vertex IDs, filter on edge attributes, or list in-out
neighbors of vertex sets.
Parameters
----------
src_ids, dst_ids : list or SArray, optional
Parallel arrays of vertex IDs, with each pair corresponding to an
edge to fetch. Only edges in this list are returned. ``None`` can be
used to designate a wild card. For instance, ``src_ids=[1, 2,
None]``, ``dst_ids=[3, None, 5]`` will fetch the edge 1->3, all
outgoing edges of 2 and all incoming edges of 5. src_id and dst_id
may be left empty, which implies an array of all wild cards.
fields : dict, optional
Dictionary specifying equality constraints on field values. For
example, ``{'relationship': 'following'}``, returns only edges whose
'relationship' field equals 'following'. ``None`` can be used as a
value to designate a wild card. e.g. ``{'relationship': None}`` will
find all edges with the field 'relationship' regardless of the
value.
format : {'sframe', 'list'}, optional
Output format. The 'sframe' output (default) contains columns
__src_id and __dst_id with edge vertex IDs and a column for each
edge attribute. List output returns a list of Edge objects.
Returns
-------
out : SFrame | list [Edge]
An SFrame or list of edges.
See Also
--------
edges, get_vertices
Examples
--------
Return all edges in the graph.
>>> from graphlab import SGraph, Edge
>>> g = SGraph().add_edges([Edge(0, 1, attr={'rating': 5}),
Edge(0, 2, attr={'rating': 2}),
Edge(1, 2)])
>>> g.get_edges(src_ids=[None], dst_ids=[None])
+----------+----------+--------+
| __src_id | __dst_id | rating |
+----------+----------+--------+
| 0 | 2 | 2 |
| 0 | 1 | 5 |
| 1 | 2 | None |
+----------+----------+--------+
Return edges with the attribute "rating" of 5.
>>> g.get_edges(fields={'rating': 5})
+----------+----------+--------+
| __src_id | __dst_id | rating |
+----------+----------+--------+
| 0 | 1 | 5 |
+----------+----------+--------+
Return edges 0 --> 1 and 1 --> 2 (if present in the graph).
>>> g.get_edges(src_ids=[0, 1], dst_ids=[1, 2])
+----------+----------+--------+
| __src_id | __dst_id | rating |
+----------+----------+--------+
| 0 | 1 | 5 |
| 1 | 2 | None |
+----------+----------+--------+
"""
_mt._get_metric_tracker().track('sgraph.get_edges')
if not hasattr(src_ids, '__iter__'):
src_ids = [src_ids]
if not hasattr(dst_ids, '__iter__'):
dst_ids = [dst_ids]
if type(src_ids) not in (list, SArray):
raise TypeError('src_ids must be list or SArray type')
if type(dst_ids) not in (list, SArray):
raise TypeError('dst_ids must be list or SArray type')
# implicit Nones
if len(src_ids) == 0 and len(dst_ids) > 0:
src_ids = [None] * len(dst_ids)
# implicit Nones
if len(dst_ids) == 0 and len(src_ids) > 0:
dst_ids = [None] * len(src_ids)
with cython_context():
sf = SFrame(_proxy=self.__proxy__.get_edges(src_ids, dst_ids, fields))
if (format == 'sframe'):
return sf
if (format == 'dataframe'):
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
if sf.num_rows() == 0:
return pd.DataFrame()
else:
return sf.head(sf.num_rows()).to_dataframe()
elif (format == 'list'):
return _dataframe_to_edge_list(sf.to_dataframe())
else:
raise ValueError("Invalid format specifier")
def add_vertices(self, vertices, vid_field=None):
"""
Add vertices to the SGraph. Vertices should be input as a list of
:class:`~graphlab.Vertex` objects, an :class:`~graphlab.SFrame`, or a
pandas DataFrame. If vertices are specified by SFrame or DataFrame,
``vid_field`` specifies which column contains the vertex ID. Remaining
columns are assumed to hold additional vertex attributes. If these
attributes are not already present in the graph's vertex data, they are
added, with existing vertices acquiring the value ``None``.
Parameters
----------
vertices : Vertex | list [Vertex] | pandas.DataFrame | SFrame
Vertex data. If the vertices are in an SFrame or DataFrame, then
``vid_field`` specifies the column containing the vertex IDs.
Additional columns are treated as vertex attributes.
vid_field : string, optional
Column in the DataFrame or SFrame to use as vertex ID. Required if
vertices is an SFrame. If ``vertices`` is a DataFrame and
``vid_field`` is not specified, the row index is used as vertex ID.
Returns
-------
out : SGraph
A new SGraph with vertices added.
See Also
--------
vertices, SFrame, add_edges
Notes
-----
- If vertices are added with indices that already exist in the graph,
they are overwritten completely. All attributes for these vertices
will conform to the specification in this method.
Examples
--------
>>> from graphlab import SGraph, Vertex, SFrame
>>> g = SGraph()
Add a single vertex.
>>> g = g.add_vertices(Vertex(0, attr={'breed': 'labrador'}))
Add a list of vertices.
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
Add vertices from an SFrame.
>>> sf_vert = SFrame({'id': [0, 1, 2], 'breed':['lab', 'lab', 'vizsla']})
>>> g = g.add_vertices(sf_vert, vid_field='id')
"""
_mt._get_metric_tracker().track('sgraph.add_vertices')
sf = _vertex_data_to_sframe(vertices, vid_field)
with cython_context():
proxy = self.__proxy__.add_vertices(sf.__proxy__, _VID_COLUMN)
return SGraph(_proxy=proxy)
def add_edges(self, edges, src_field=None, dst_field=None):
"""
Add edges to the SGraph. Edges should be input as a list of
:class:`~graphlab.Edge` objects, an :class:`~graphlab.SFrame`, or a
Pandas DataFrame. If the new edges are in an SFrame or DataFrame, then
``src_field`` and ``dst_field`` are required to specify the columns that
contain the source and destination vertex IDs; additional columns are
treated as edge attributes. If these attributes are not already present
in the graph's edge data, they are added, with existing edges acquiring
the value ``None``.
Parameters
----------
edges : Edge | list [Edge] | pandas.DataFrame | SFrame
Edge data. If the edges are in an SFrame or DataFrame, then
``src_field`` and ``dst_field`` are required to specify the columns
that contain the source and destination vertex IDs. Additional
columns are treated as edge attributes.
src_field : string, optional
Column in the SFrame or DataFrame to use as source vertex IDs. Not
required if ``edges`` is a list.
dst_field : string, optional
Column in the SFrame or Pandas DataFrame to use as destination
vertex IDs. Not required if ``edges`` is a list.
Returns
-------
out : SGraph
A new SGraph with `edges` added.
See Also
--------
edges, SFrame, add_vertices
Notes
-----
- If an edge is added whose source and destination IDs match edges that
already exist in the graph, a new edge is added to the graph. This
contrasts with :py:func:`add_vertices`, which overwrites existing
vertices.
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge, SFrame
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
Add a single edge.
>>> g = g.add_edges(Edge(1, 2))
Add a list of edges.
>>> g = g.add_edges([Edge(0, 2), Edge(1, 2)])
Add edges from an SFrame.
>>> sf_edge = SFrame({'source': [0, 1], 'dest': [2, 2]})
>>> g = g.add_edges(sf_edge, src_field='source', dst_field='dest')
"""
_mt._get_metric_tracker().track('sgraph.add_edges')
sf = _edge_data_to_sframe(edges, src_field, dst_field)
with cython_context():
proxy = self.__proxy__.add_edges(sf.__proxy__, _SRC_VID_COLUMN, _DST_VID_COLUMN)
return SGraph(_proxy=proxy)
def get_fields(self):
"""
Return a list of vertex and edge attribute fields in the SGraph. If a
field is common to both vertex and edge attributes, it will show up
twice in the returned list.
Returns
-------
out : list
Names of fields contained in the vertex or edge data.
See Also
--------
get_vertex_fields, get_edge_fields
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'name': 'alex'}),
Vertex(1, attr={'name': 'barbara'})]
>>> g = g.add_vertices(verts)
>>> g = g.add_edges(Edge(0, 1, attr={'frequency': 6}))
>>> fields = g.get_fields()
['__id', 'name', '__src_id', '__dst_id', 'frequency']
"""
_mt._get_metric_tracker().track('sgraph.get_fields')
return self.get_vertex_fields() + self.get_edge_fields()
def get_vertex_fields(self):
"""
Return a list of vertex attribute fields in the SGraph.
Returns
-------
out : list
Names of fields contained in the vertex data.
See Also
--------
get_fields, get_edge_fields
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'name': 'alex'}),
Vertex(1, attr={'name': 'barbara'})]
>>> g = g.add_vertices(verts)
>>> g = g.add_edges(Edge(0, 1, attr={'frequency': 6}))
>>> fields = g.get_vertex_fields()
['__id', 'name']
"""
_mt._get_metric_tracker().track('sgraph.')
with cython_context():
return self.__proxy__.get_vertex_fields()
def get_edge_fields(self):
"""
Return a list of edge attribute fields in the graph.
Returns
-------
out : list
Names of fields contained in the vertex data.
See Also
--------
get_fields, get_vertex_fields
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'name': 'alex'}),
Vertex(1, attr={'name': 'barbara'})]
>>> g = g.add_vertices(verts)
>>> g = g.add_edges(Edge(0, 1, attr={'frequency': 6}))
>>> fields = g.get_vertex_fields()
['__src_id', '__dst_id', 'frequency']
"""
_mt._get_metric_tracker().track('sgraph.get_edge_fields')
with cython_context():
return self.__proxy__.get_edge_fields()
def select_fields(self, fields):
"""
Return a new SGraph with only the selected fields. Other fields are
discarded, while fields that do not exist in the SGraph are ignored.
Parameters
----------
fields : string | list [string]
A single field name or a list of field names to select.
Returns
-------
out : SGraph
A new graph whose vertex and edge data are projected to the selected
fields.
See Also
--------
get_fields, get_vertex_fields, get_edge_fields
Examples
--------
>>> from graphlab import SGraph, Vertex
>>> verts = [Vertex(0, attr={'breed': 'labrador', 'age': 5}),
Vertex(1, attr={'breed': 'labrador', 'age': 3}),
Vertex(2, attr={'breed': 'vizsla', 'age': 8})]
>>> g = SGraph()
>>> g = g.add_vertices(verts)
>>> g2 = g.select_fields(fields=['breed'])
"""
_mt._get_metric_tracker().track('sgraph.select_fields')
if (type(fields) is str):
fields = [fields]
if not isinstance(fields, list) or not all(type(x) is str for x in fields):
raise TypeError('\"fields\" must be a str or list[str]')
vfields = self.__proxy__.get_vertex_fields()
efields = self.__proxy__.get_edge_fields()
selected_vfields = []
selected_efields = []
for f in fields:
found = False
if f in vfields:
selected_vfields.append(f)
found = True
if f in efields:
selected_efields.append(f)
found = True
if not found:
raise ValueError('Field %s not in graph' % f)
with cython_context():
proxy = self.__proxy__
proxy = proxy.select_vertex_fields(selected_vfields)
proxy = proxy.select_edge_fields(selected_efields)
return SGraph(_proxy=proxy)
def triple_apply(self, triple_apply_fn, mutated_fields, input_fields=None):
'''
Apply a transform function to each edge and its associated source and
target vertices in parallel. Each edge is visited once and in parallel.
Modification to vertex data is protected by lock. The effect on the
returned SGraph is equivalent to the following pseudocode:
>>> PARALLEL FOR (source, edge, target) AS triple in G:
... LOCK (triple.source, triple.target)
... (source, edge, target) = triple_apply_fn(triple)
... UNLOCK (triple.source, triple.target)
... END PARALLEL FOR
Parameters
----------
triple_apply_fn : function : (dict, dict, dict) -> (dict, dict, dict)
The function to apply to each triple of (source_vertex, edge,
target_vertex). This function must take as input a tuple of
(source_data, edge_data, target_data) and return a tuple of
(new_source_data, new_edge_data, new_target_data). All variables in
the both tuples must be of dict type.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
mutated_fields : list[str] | str
Fields that ``triple_apply_fn`` will mutate. Note: columns that are
actually mutated by the triple apply function but not specified in
``mutated_fields`` will have undetermined effects.
input_fields : list[str] | str, optional
Fields that ``triple_apply_fn`` will have access to.
The default is ``None``, which grants access to all fields.
``mutated_fields`` will always be included in ``input_fields``.
Returns
-------
out : SGraph
A new SGraph with updated vertex and edge data. Only fields
specified in the ``mutated_fields`` parameter are updated.
Notes
-----
- ``triple_apply`` does not currently support creating new fields in the
lambda function.
Examples
--------
Import graphlab-create and set up the graph.
>>> edges = graphlab.SFrame({'source': range(9), 'dest': range(1, 10)})
>>> g = graphlab.SGraph()
>>> g = g.add_edges(edges, src_field='source', dst_field='dest')
>>> g.vertices['degree'] = 0
Define the function to apply to each (source_node, edge, target_node)
triple.
>>> def degree_count_fn (src, edge, dst):
src['degree'] += 1
dst['degree'] += 1
return (src, edge, dst)
Apply the function to the SGraph.
>>> g = g.triple_apply(degree_count_fn, mutated_fields=['degree'])
Using native toolkit extension function:
.. code-block:: c++
#include <graphlab/sdk/toolkit_function_macros.hpp>
#include <vector>
using namespace graphlab;
std::vector<variant_type> connected_components_parameterized(
std::map<std::string, flexible_type>& src,
std::map<std::string, flexible_type>& edge,
std::map<std::string, flexible_type>& dst,
std::string column) {
if (src[column] < dst[column]) dst[column] = src[column];
else src[column] = dst[column];
return {to_variant(src), to_variant(edge), to_variant(dst)};
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(connected_components_parameterized, "src", "edge", "dst", "column");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> from example import connected_components_parameterized as cc
>>> e = gl.SFrame({'__src_id':[1,2,3,4,5], '__dst_id':[3,1,2,5,4]})
>>> g = gl.SGraph().add_edges(e)
>>> g.vertices['cid'] = g.vertices['__id']
>>> for i in range(2):
... g = g.triple_apply(lambda src, edge, dst: cc(src, edge, dst, 'cid'), ['cid'], ['cid'])
>>> g.vertices['cid']
dtype: int
Rows: 5
[4, 1, 1, 1, 4]
'''
_mt._get_metric_tracker().track('sgraph.triple_apply')
assert inspect.isfunction(triple_apply_fn), "Input must be a function"
if not (type(mutated_fields) is list or type(mutated_fields) is str):
raise TypeError('mutated_fields must be str or list of str')
if not (input_fields is None or type(input_fields) is list or type(input_fields) is str):
raise TypeError('input_fields must be str or list of str')
if type(mutated_fields) == str:
mutated_fields = [mutated_fields]
if len(mutated_fields) is 0:
raise ValueError('mutated_fields cannot be empty')
for f in ['__id', '__src_id', '__dst_id']:
if f in mutated_fields:
raise ValueError('mutated_fields cannot contain %s' % f)
all_fields = self.get_fields()
if not set(mutated_fields).issubset(set(all_fields)):
extra_fields = list(set(mutated_fields).difference(set(all_fields)))
raise ValueError('graph does not contain fields: %s' % str(extra_fields))
# select input fields
if input_fields is None:
input_fields = self.get_fields()
elif type(input_fields) is str:
input_fields = [input_fields]
# make input fields a superset of mutated_fields
input_fields_set = set(input_fields + mutated_fields)
input_fields = [x for x in self.get_fields() if x in input_fields_set]
g = self.select_fields(input_fields)
nativefn = None
try:
from .. import extensions
nativefn = extensions._build_native_function_call(triple_apply_fn)
except:
# failure are fine. we just fall out into the next few phases
pass
if nativefn is not None:
with cython_context():
return SGraph(_proxy=g.__proxy__.lambda_triple_apply_native(nativefn, mutated_fields))
else:
with cython_context():
return SGraph(_proxy=g.__proxy__.lambda_triple_apply(triple_apply_fn, mutated_fields))
def save(self, filename, format='auto'):
"""
Save the SGraph to disk. If the graph is saved in binary format, the
graph can be re-loaded using the :py:func:`load_sgraph` method.
Alternatively, the SGraph can be saved in JSON format for a
human-readable and portable representation.
Parameters
----------
filename : string
Filename to use when saving the file. It can be either a local or
remote url.
format : {'auto', 'binary', 'json'}, optional
File format. If not specified, the format is detected automatically
based on the filename. Note that JSON format graphs cannot be
re-loaded with :py:func:`load_sgraph`.
See Also
--------
load_sgraph
Examples
--------
>>> g = graphlab.SGraph()
>>> g = g.add_vertices([graphlab.Vertex(i) for i in range(5)])
Save and load in binary format.
>>> g.save('mygraph')
>>> g2 = graphlab.load_graph('mygraph')
Save in JSON format.
>>> g.save('mygraph.json', format='json')
"""
_mt._get_metric_tracker().track('sgraph.save')
if format is 'auto':
if filename.endswith(('.json', '.json.gz')):
format = 'json'
else:
format = 'binary'
if format not in ['binary', 'json', 'csv']:
raise ValueError('Invalid format: %s. Supported formats are: %s'
% (format, ['binary', 'json', 'csv']))
with cython_context():
self.__proxy__.save_graph(_make_internal_url(filename), format)
def show(self, vlabel=None, vlabel_hover=False, vcolor=[0.522, 0.741, 0.],
highlight={}, highlight_color=[0.69, 0., 0.498], node_size=300,
elabel=None, elabel_hover=False, ecolor=[0.37, 0.33, 0.33],
ewidth=1, v_offset=0.03, h_offset=0., arrows=False,
vertex_positions=None):
"""
show(vlabel=None, vlabel_hover=False, vcolor=[0.522, 0.741, 0.], highlight={}, highlight_color=[0.69, 0., 0.498], node_size=300, elabel=None, elabel_hover=False, ecolor=[0.37, 0.33, 0.33], ewidth=1, v_offset=0.03, h_offset=0., arrows=False, vertex_positions=None)
Visualize the SGraph with GraphLab Create :mod:`~graphlab.canvas`. This
function starts Canvas if it is not already running. If the graph has
already been plotted, this function will update the plot. SGraphs must
have fewer than 1,000 edges and 1,000 vertices to be visualized in
Canvas.
Parameters
----------
vlabel : string, optional
Field name for the label on each vertex. The default is None,
which omits vertex labels. Set to 'id' to use the vertex ID as the
label.
vlabel_hover : bool, optional
If True, vertex labels, if specified, appear only on mouse hover.
Otherwise (the default), vertex labels, if specified are always
visible.
vcolor : list[float], optional
RGB triple for the primary vertex color. Default is green
([0.522, 0.741, 0.]).
highlight : dict or list or SArray, optional
As a dict, mapping of Vertex ID to RGB color triple (list of float,
as in vcolor).
As a list or SArray (DEPRECATED): Vertex IDs to highlight in
a different color.
highlight_color : list[float], optional
RGB triple for the color of highlighted vertices, when the
highlighted parameter is a list or SArray. Default is fuchsia
([0.69, 0.,0.498]). For fine-grained control over vertex coloring,
use the `highlight` parameter with a dictionary of Vertex IDs and
color values.
node_size : int, optional
Size of plotted vertices.
elabel : string, optional
Field name for the label on each edge.
elabel_hover : bool, optional
If True, edge labels, if specified, appear only on mouse hover.
Otherwise (the default), specified edge labels are always visible.
ecolor : string, optional
RGB triple for edge color. Default is gray ([0.37, 0.33, 0.33]).
ewidth : int, optional
Edge width.
v_offset : float, optional
Vertical offset of vertex labels, as a fraction of total plot
height. For example, the default of 0.03 moves the label 3% of the
plot height higher in the canvas.
h_offset : float, optional
Horizontal offset of vertex labels, as a fraction of total plot
width. For example, an offset of 0.03 moves the label 3% of the plot
width to the right. Default is 0.0.
arrows : bool, optional
If True, draw arrows indicating edge direction.
vertex_positions : tuple, optional
If a 2-element tuple of column names in self.vertices is specified,
those two columns will be used as the X and Y coordinates of
vertices in the graph layout. The 2d space represented by the X and
Y coordinates will be translated to a square display coordinate
space, preserving aspect ratio. If you want to fill both dimensions
entirely, normalize the positions to represent a square 2d space.
If vertex_positions is not specified, vertices will be arranged
according to a standard graph layout algorithm without regard to
vertex or edge attributes.
See Also
--------
canvas
Notes
-----
- Graphs with more than 1,000 vertices or 1,000 edges cannot be
displayed as-is. For such graphs, construct a subgraph by selecting
some vertices and edges, then call this method on the result.
- See the `user guide
<https://dato.com/learn/userguide/sframe/visualization.html>`_ for more details and extended examples.
Examples
--------
>>> g = graphlab.SGraph()
>>> g = sg.add_edges([graphlab.Edge(i, i+1) for i in range(5)])
>>> g.show(highlight=[2, 3], vlabel='id', arrows=True)
"""
from ..visualization.show import show
show(self,
vlabel=vlabel,
vlabel_hover=vlabel_hover,
vcolor=vcolor,
highlight=highlight,
highlight_color=highlight_color,
node_size=node_size,
elabel=elabel,
elabel_hover=elabel_hover,
ecolor=ecolor,
ewidth=ewidth,
v_offset=v_offset,
h_offset=h_offset,
arrows=arrows,
vertex_positions=vertex_positions)
def get_neighborhood(self, ids, radius=1, full_subgraph=True):
"""
Retrieve the graph neighborhood around a set of vertices, ignoring edge
directions. Note that setting radius greater than two often results in a
time-consuming query for a very large subgraph.
Parameters
----------
ids : list [int | float | str]
List of target vertex IDs.
radius : int, optional
Radius of the neighborhood. Every vertex in the returned subgraph is
reachable from at least one of the target vertices on a path of
length no longer than ``radius``. Setting radius larger than 2 may
result in a very large subgraph.
full_subgraph : bool, optional
If True, return all edges between vertices in the returned
neighborhood. The result is also known as the subgraph induced by
the target nodes' neighbors, or the egocentric network for the
target nodes. If False, return only edges on paths of length <=
``radius`` from the target node, also known as the reachability
graph.
Returns
-------
out : Graph
The subgraph with the neighborhoods around the target vertices.
See Also
--------
get_edges, get_vertices
References
----------
- Marsden, P. (2002) `Egocentric and sociocentric measures of network
centrality <http://www.sciencedirect.com/science/article/pii/S03788733
02000163>`_.
- `Wikipedia - Reachability <http://en.wikipedia.org/wiki/Reachability>`_
Examples
--------
>>> sf_edge = graphlab.SFrame({'source': range(9), 'dest': range(1, 10)})
>>> g = graphlab.SGraph()
>>> g = g.add_edges(sf_edge, src_field='source', dst_field='dest')
>>> subgraph = g.get_neighborhood(ids=[1, 7], radius=2,
full_subgraph=True)
"""
_mt._get_metric_tracker().track('sgraph.get_neighborhood')
verts = ids
## find the vertices within radius (and the path edges)
for i in range(radius):
edges_out = self.get_edges(src_ids=verts)
edges_in = self.get_edges(dst_ids=verts)
verts = list(edges_in['__src_id']) + list(edges_in['__dst_id']) + \
list(edges_out['__src_id']) + list(edges_out['__dst_id'])
verts = list(set(verts))
## make a new graph to return and add the vertices
g = SGraph()
g = g.add_vertices(self.get_vertices(verts), vid_field='__id')
## add the requested edge set
if full_subgraph is True:
induced_edge_out = self.get_edges(src_ids=verts)
induced_edge_in = self.get_edges(dst_ids=verts)
df_induced = induced_edge_out.append(induced_edge_in)
df_induced = df_induced.groupby(df_induced.column_names(), {})
verts_sa = SArray(list(verts))
edges = df_induced.filter_by(verts_sa, "__src_id")
edges = edges.filter_by(verts_sa, "__dst_id")
else:
path_edges = edges_out.append(edges_in)
edges = path_edges.groupby(path_edges.column_names(), {})
g = g.add_edges(edges, src_field='__src_id', dst_field='__dst_id')
return g
#/**************************************************************************/
#/* */
#/* Module Function */
#/* */
#/**************************************************************************/
def load_graph(filename, format='binary', delimiter='auto'):
import warnings
warnings.warn("load_graph has been renamed to load_sgraph. This function will be removed in the next release.", PendingDeprecationWarning)
return load_sgraph(filename, format=format)
def load_sgraph(filename, format='binary', delimiter='auto'):
"""
Load SGraph from text file or previously saved SGraph binary.
Parameters
----------
filename : string
Location of the file. Can be a local path or a remote URL.
format : {'binary', 'snap', 'csv', 'tsv'}, optional
Format to of the file to load.
- 'binary': native graph format obtained from `SGraph.save`.
- 'snap': tab or space separated edge list format with comments, used in
the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_.
- 'csv': comma-separated edge list without header or comments.
- 'tsv': tab-separated edge list without header or comments.
delimiter : str, optional
Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those
format has default delimiter, but sometimes it is useful to
overwrite the default delimiter.
Returns
-------
out : SGraph
Loaded SGraph.
See Also
--------
SGraph, SGraph.save
Examples
--------
>>> g = graphlab.SGraph().add_vertices([graphlab.Vertex(i) for i in range(5)])
Save and load in binary format.
>>> g.save('mygraph')
>>> g2 = graphlab.load_graph('mygraph')
"""
_mt._get_metric_tracker().track('sgraph.load_sgraph')
if not format in ['binary', 'snap', 'csv', 'tsv']:
raise ValueError('Invalid format: %s' % format)
with cython_context():
g = None
if format is 'binary':
proxy = glconnect.get_unity().load_graph(_make_internal_url(filename))
g = SGraph(_proxy=proxy)
elif format is 'snap':
if delimiter == 'auto':
delimiter = '\t'
sf = SFrame.read_csv(filename, comment_char='#', delimiter=delimiter,
header=False, column_type_hints=int)
g = SGraph().add_edges(sf, 'X1', 'X2')
elif format is 'csv':
if delimiter == 'auto':
delimiter = ','
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2')
elif format is 'tsv':
if delimiter == 'auto':
delimiter = '\t'
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2')
g.summary() # materialize
return g
#/**************************************************************************/
#/* */
#/* Helper Function */
#/* */
#/**************************************************************************/
def _vertex_list_to_dataframe(ls, id_column_name):
"""
Convert a list of vertices into dataframe.
"""
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
cols = reduce(set.union, (set(v.attr.keys()) for v in ls))
df = pd.DataFrame({id_column_name: [v.vid for v in ls]})
for c in cols:
df[c] = [v.attr.get(c) for v in ls]
return df
def _vertex_list_to_sframe(ls, id_column_name):
"""
Convert a list of vertices into an SFrame.
"""
sf = SFrame()
if type(ls) == list:
cols = reduce(set.union, (set(v.attr.keys()) for v in ls))
sf[id_column_name] = [v.vid for v in ls]
for c in cols:
sf[c] = [v.attr.get(c) for v in ls]
elif type(ls) == Vertex:
sf[id_column_name] = [ls.vid]
for col, val in ls.attr.iteritems():
sf[col] = [val]
else:
raise TypeError('Vertices type {} is Not supported.'.format(type(ls)))
return sf
def _edge_list_to_dataframe(ls, src_column_name, dst_column_name):
"""
Convert a list of edges into dataframe.
"""
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
cols = reduce(set.union, (set(e.attr.keys()) for e in ls))
df = pd.DataFrame({
src_column_name: [e.src_vid for e in ls],
dst_column_name: [e.dst_vid for e in ls]})
for c in cols:
df[c] = [e.attr.get(c) for e in ls]
return df
def _edge_list_to_sframe(ls, src_column_name, dst_column_name):
"""
Convert a list of edges into an SFrame.
"""
sf = SFrame()
if type(ls) == list:
cols = reduce(set.union, (set(v.attr.keys()) for v in ls))
sf[src_column_name] = [e.src_vid for e in ls]
sf[dst_column_name] = [e.dst_vid for e in ls]
for c in cols:
sf[c] = [e.attr.get(c) for e in ls]
elif type(ls) == Edge:
sf[src_column_name] = [ls.src_vid]
sf[dst_column_name] = [ls.dst_vid]
else:
raise TypeError('Edges type {} is Not supported.'.format(type(ls)))
return sf
def _dataframe_to_vertex_list(df):
"""
Convert dataframe into list of vertices, assuming that vertex ids are stored in _VID_COLUMN.
"""
cols = df.columns
if len(cols):
assert _VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _VID_COLUMN
df = df[cols].T
ret = [Vertex(None, _series=df[col]) for col in df]
return ret
else:
return []
def _dataframe_to_edge_list(df):
"""
Convert dataframe into list of edges, assuming that source and target ids are stored in _SRC_VID_COLUMN, and _DST_VID_COLUMN respectively.
"""
cols = df.columns
if len(cols):
assert _SRC_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _SRC_VID_COLUMN
assert _DST_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _DST_VID_COLUMN
df = df[cols].T
ret = [Edge(None, None, _series=df[col]) for col in df]
return ret
else:
return []
def _vertex_data_to_sframe(data, vid_field):
"""
Convert data into a vertex data sframe. Using vid_field to identify the id
column. The returned sframe will have id column name '__id'.
"""
if isinstance(data, SFrame):
# '__id' already in the sframe, and it is ok to not specify vid_field
if vid_field is None and _VID_COLUMN in data.column_names():
return data
if vid_field is None:
raise ValueError("vid_field must be specified for SFrame input")
data_copy = copy.copy(data)
data_copy.rename({vid_field: _VID_COLUMN})
return data_copy
if type(data) == Vertex or type(data) == list:
return _vertex_list_to_sframe(data, '__id')
elif HAS_PANDAS and type(data) == pd.DataFrame:
if vid_field is None:
# using the dataframe index as vertex id
if data.index.is_unique:
if not ("index" in data.columns):
# pandas reset_index() will insert a new column of name "index".
sf = SFrame(data.reset_index()) # "index"
sf.rename({'index': _VID_COLUMN})
return sf
else:
# pandas reset_index() will insert a new column of name "level_0" if there exists a column named "index".
sf = SFrame(data.reset_index()) # "level_0"
sf.rename({'level_0': _VID_COLUMN})
return sf
else:
raise ValueError("Index of the vertices dataframe is not unique, \
try specifying vid_field name to use a column for vertex ids.")
else:
sf = SFrame(data)
if _VID_COLUMN in sf.column_names():
raise ValueError('%s reserved vid column name already exists in the SFrame' % _VID_COLUMN)
sf.rename({vid_field: _VID_COLUMN})
return sf
else:
raise TypeError('Vertices type %s is Not supported.' % str(type(data)))
def _edge_data_to_sframe(data, src_field, dst_field):
"""
Convert data into an edge data sframe. Using src_field and dst_field to
identify the source and target id column. The returned sframe will have id
column name '__src_id', '__dst_id'
"""
if isinstance(data, SFrame):
# '__src_vid' and '__dst_vid' already in the sframe, and
# it is ok to not specify src_field and dst_field
if src_field is None and dst_field is None and \
_SRC_VID_COLUMN in data.column_names() and \
_DST_VID_COLUMN in data.column_names():
return data
if src_field is None:
raise ValueError("src_field must be specified for SFrame input")
if dst_field is None:
raise ValueError("dst_field must be specified for SFrame input")
data_copy = copy.copy(data)
if src_field == _DST_VID_COLUMN and dst_field == _SRC_VID_COLUMN:
# special case when src_field = "__dst_id" and dst_field = "__src_id"
# directly renaming will cause name collision
dst_id_column = data_copy[_DST_VID_COLUMN]
del data_copy[_DST_VID_COLUMN]
data_copy.rename({_SRC_VID_COLUMN: _DST_VID_COLUMN})
data_copy[_SRC_VID_COLUMN] = dst_id_column
else:
data_copy.rename({src_field: _SRC_VID_COLUMN, dst_field: _DST_VID_COLUMN})
return data_copy
elif HAS_PANDAS and type(data) == pd.DataFrame:
if src_field is None:
raise ValueError("src_field must be specified for Pandas input")
if dst_field is None:
raise ValueError("dst_field must be specified for Pandas input")
sf = SFrame(data)
if src_field == _DST_VID_COLUMN and dst_field == _SRC_VID_COLUMN:
# special case when src_field = "__dst_id" and dst_field = "__src_id"
# directly renaming will cause name collision
dst_id_column = data_copy[_DST_VID_COLUMN]
del sf[_DST_VID_COLUMN]
sf.rename({_SRC_VID_COLUMN: _DST_VID_COLUMN})
sf[_SRC_VID_COLUMN] = dst_id_column
else:
sf.rename({src_field: _SRC_VID_COLUMN, dst_field: _DST_VID_COLUMN})
return sf
elif type(data) == Edge:
return _edge_list_to_sframe([data], _SRC_VID_COLUMN, _DST_VID_COLUMN)
elif type(data) == list:
return _edge_list_to_sframe(data, _SRC_VID_COLUMN, _DST_VID_COLUMN)
else:
raise TypeError('Edges type %s is Not supported.' % str(type(data)))
## Hack: overriding GFrame class name to make it appears as SFrame##
GFrame.__name__ = SFrame.__name__
GFrame.__module__ = SFrame.__module__
| bsd-3-clause |
GraphProcessor/CommunityDetectionCodes | Prensentation/problem_definition/problem_vis.py | 1 | 2793 | import networkx as nx
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import graphviz_layout
def vis_input(graph):
pos = graphviz_layout(graph)
nx.draw(graph, with_labels=True, pos=pos, font_size=16, node_size=600, alpha=0.8, width=4,
edge_color='grey', node_color='white')
edge_dict = {}
for edge in graph.edges():
edge_dict[edge] = graph[edge[0]][edge[1]]['w']
nx.draw_networkx_edge_labels(graph, pos, edge_labels=edge_dict, font_size=14, alpha=0.1, font_color='blue')
plt.axis('off')
plt.savefig('./demo_graph.png', bbox_inches='tight', pad_inches=0, transparent=True)
plt.show()
def vis_output(graph, comm, comm_color, idx):
pos = graphviz_layout(graph)
nx.draw(graph, with_labels=True, pos=pos, font_size=16, node_size=600, alpha=0.8, width=4,
edge_color='grey', node_color='white')
nx.draw_networkx_nodes(graph, with_labels=True, pos=pos, font_size=16, node_size=600, alpha=0.8, width=4,
node_color=comm_color, nodelist=comm)
edge_dict = {}
for edge in graph.edges():
edge_dict[edge] = graph[edge[0]][edge[1]]['w']
nx.draw_networkx_edge_labels(graph, pos, edge_labels=edge_dict, font_size=14, alpha=0.1, font_color='blue')
plt.axis('off')
plt.savefig('./output_graph' + str(idx) + '.png', bbox_inches='tight', pad_inches=0, transparent=True)
plt.show()
def vis_post_output(graph, comm_list, color_list):
pos = graphviz_layout(graph)
nx.draw(graph, with_labels=True, pos=pos, font_size=16, node_size=600, alpha=0.8, width=4,
edge_color='grey', node_color='white')
for idx, comm in enumerate(comm_list):
nx.draw_networkx_nodes(graph, with_labels=True, pos=pos, font_size=16, node_size=600, alpha=0.8, width=4,
node_color=color_list[idx], nodelist=comm)
edge_dict = {}
for edge in graph.edges():
edge_dict[edge] = graph[edge[0]][edge[1]]['w']
nx.draw_networkx_edge_labels(graph, pos, edge_labels=edge_dict, font_size=14, alpha=0.1, font_color='blue')
plt.axis('off')
plt.savefig('./output_post_graph' + '.png', bbox_inches='tight', pad_inches=0, transparent=True)
plt.show()
if __name__ == '__main__':
graph = nx.read_edgelist('demo_input_graph.txt', nodetype=int, data=(('w', float),))
print graph.edges(data=True)
vis_input(graph)
with open('demo_cis_output_result.txt') as ifs:
eval_str = ifs.readline()
comm_list = eval(eval_str)
color_list = ['r', 'b', 'magenta']
for idx, comm in enumerate(comm_list):
print comm, idx
vis_output(graph, comm, color_list[idx], idx)
comm_list.append([1])
vis_post_output(graph, comm_list, color_list)
| gpl-2.0 |
sdiazb/airflow | airflow/hooks/base_hook.py | 23 | 2895 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import logging
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def _get_connections_from_db(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
session.expunge_all()
session.close()
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
return db
@classmethod
def _get_connection_from_env(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
return conn
@classmethod
def get_connections(cls, conn_id):
conn = cls._get_connection_from_env(conn_id)
if conn:
conns = [conn]
else:
conns = cls._get_connections_from_db(conn_id)
return conns
@classmethod
def get_connection(cls, conn_id):
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
| apache-2.0 |
cc272309126/panda3d | samples/carousel/main.py | 25 | 9571 | #!/usr/bin/env python
# Author: Shao Zhang, Phil Saltzman, and Eddie Canaan
# Last Updated: 2015-03-13
#
# This tutorial will demonstrate some uses for intervals in Panda
# to move objects in your panda world.
# Intervals are tools that change a value of something, like position,
# rotation or anything else, linearly, over a set period of time. They can be
# also be combined to work in sequence or in Parallel
#
# In this lesson, we will simulate a carousel in motion using intervals.
# The carousel will spin using an hprInterval while 4 pandas will represent
# the horses on a traditional carousel. The 4 pandas will rotate with the
# carousel and also move up and down on their poles using a LerpFunc interval.
# Finally there will also be lights on the outer edge of the carousel that
# will turn on and off by switching their texture with intervals in Sequence
# and Parallel
from direct.showbase.ShowBase import ShowBase
from panda3d.core import AmbientLight, DirectionalLight, LightAttrib
from panda3d.core import NodePath
from panda3d.core import LVector3
from direct.interval.IntervalGlobal import * # Needed to use Intervals
from direct.gui.DirectGui import *
# Importing math constants and functions
from math import pi, sin
class CarouselDemo(ShowBase):
def __init__(self):
# Initialize the ShowBase class from which we inherit, which will
# create a window and set up everything we need for rendering into it.
ShowBase.__init__(self)
# This creates the on screen title that is in every tutorial
self.title = OnscreenText(text="Panda3D: Tutorial - Carousel",
parent=base.a2dBottomCenter,
fg=(1, 1, 1, 1), shadow=(0, 0, 0, .5),
pos=(0, .1), scale=.1)
base.disableMouse() # Allow manual positioning of the camera
camera.setPosHpr(0, -8, 2.5, 0, -9, 0) # Set the cameras' position
# and orientation
self.loadModels() # Load and position our models
self.setupLights() # Add some basic lighting
self.startCarousel() # Create the needed intervals and put the
# carousel into motion
def loadModels(self):
# Load the carousel base
self.carousel = loader.loadModel("models/carousel_base")
self.carousel.reparentTo(render) # Attach it to render
# Load the modeled lights that are on the outer rim of the carousel
# (not Panda lights)
# There are 2 groups of lights. At any given time, one group will have
# the "on" texture and the other will have the "off" texture.
self.lights1 = loader.loadModel("models/carousel_lights")
self.lights1.reparentTo(self.carousel)
# Load the 2nd set of lights
self.lights2 = loader.loadModel("models/carousel_lights")
# We need to rotate the 2nd so it doesn't overlap with the 1st set.
self.lights2.setH(36)
self.lights2.reparentTo(self.carousel)
# Load the textures for the lights. One texture is for the "on" state,
# the other is for the "off" state.
self.lightOffTex = loader.loadTexture("models/carousel_lights_off.jpg")
self.lightOnTex = loader.loadTexture("models/carousel_lights_on.jpg")
# Create an list (self.pandas) with filled with 4 dummy nodes attached
# to the carousel.
# This uses a python concept called "Array Comprehensions." Check the
# Python manual for more information on how they work
self.pandas = [self.carousel.attachNewNode("panda" + str(i))
for i in range(4)]
self.models = [loader.loadModel("models/carousel_panda")
for i in range(4)]
self.moves = [0] * 4
for i in range(4):
# set the position and orientation of the ith panda node we just created
# The Z value of the position will be the base height of the pandas.
# The headings are multiplied by i to put each panda in its own position
# around the carousel
self.pandas[i].setPosHpr(0, 0, 1.3, i * 90, 0, 0)
# Load the actual panda model, and parent it to its dummy node
self.models[i].reparentTo(self.pandas[i])
# Set the distance from the center. This distance is based on the way the
# carousel was modeled in Maya
self.models[i].setY(.85)
# Load the environment (Sky sphere and ground plane)
self.env = loader.loadModel("models/env")
self.env.reparentTo(render)
self.env.setScale(7)
# Panda Lighting
def setupLights(self):
# Create some lights and add them to the scene. By setting the lights on
# render they affect the entire scene
# Check out the lighting tutorial for more information on lights
ambientLight = AmbientLight("ambientLight")
ambientLight.setColor((.4, .4, .35, 1))
directionalLight = DirectionalLight("directionalLight")
directionalLight.setDirection(LVector3(0, 8, -2.5))
directionalLight.setColor((0.9, 0.8, 0.9, 1))
render.setLight(render.attachNewNode(directionalLight))
render.setLight(render.attachNewNode(ambientLight))
# Explicitly set the environment to not be lit
self.env.setLightOff()
def startCarousel(self):
# Here's where we actually create the intervals to move the carousel
# The first type of interval we use is one created directly from a NodePath
# This interval tells the NodePath to vary its orientation (hpr) from its
# current value (0,0,0) to (360,0,0) over 20 seconds. Intervals created from
# NodePaths also exist for position, scale, color, and shear
self.carouselSpin = self.carousel.hprInterval(20, LVector3(360, 0, 0))
# Once an interval is created, we need to tell it to actually move.
# start() will cause an interval to play once. loop() will tell an interval
# to repeat once it finished. To keep the carousel turning, we use
# loop()
self.carouselSpin.loop()
# The next type of interval we use is called a LerpFunc interval. It is
# called that becuase it linearly interpolates (aka Lerp) values passed to
# a function over a given amount of time.
# In this specific case, horses on a carousel don't move contantly up,
# suddenly stop, and then contantly move down again. Instead, they start
# slowly, get fast in the middle, and slow down at the top. This motion is
# close to a sine wave. This LerpFunc calls the function oscillatePanda
# (which we will create below), which changes the height of the panda based
# on the sin of the value passed in. In this way we achieve non-linear
# motion by linearly changing the input to a function
for i in range(4):
self.moves[i] = LerpFunc(
self.oscillatePanda, # function to call
duration=3, # 3 second duration
fromData=0, # starting value (in radians)
toData=2 * pi, # ending value (2pi radians = 360 degrees)
# Additional information to pass to
# self.oscialtePanda
extraArgs=[self.models[i], pi * (i % 2)]
)
# again, we want these to play continuously so we start them with
# loop()
self.moves[i].loop()
# Finally, we combine Sequence, Parallel, Func, and Wait intervals,
# to schedule texture swapping on the lights to simulate the lights turning
# on and off.
# Sequence intervals play other intervals in a sequence. In other words,
# it waits for the current interval to finish before playing the next
# one.
# Parallel intervals play a group of intervals at the same time
# Wait intervals simply do nothing for a given amount of time
# Func intervals simply make a single function call. This is helpful because
# it allows us to schedule functions to be called in a larger sequence. They
# take virtually no time so they don't cause a Sequence to wait.
self.lightBlink = Sequence(
# For the first step in our sequence we will set the on texture on one
# light and set the off texture on the other light at the same time
Parallel(
Func(self.lights1.setTexture, self.lightOnTex, 1),
Func(self.lights2.setTexture, self.lightOffTex, 1)),
Wait(1), # Then we will wait 1 second
# Then we will switch the textures at the same time
Parallel(
Func(self.lights1.setTexture, self.lightOffTex, 1),
Func(self.lights2.setTexture, self.lightOnTex, 1)),
Wait(1) # Then we will wait another second
)
self.lightBlink.loop() # Loop this sequence continuously
def oscillatePanda(self, rad, panda, offset):
# This is the oscillation function mentioned earlier. It takes in a
# degree value, a NodePath to set the height on, and an offset. The
# offset is there so that the different pandas can move opposite to
# each other. The .2 is the amplitude, so the height of the panda will
# vary from -.2 to .2
panda.setZ(sin(rad + offset) * .2)
demo = CarouselDemo()
demo.run()
| bsd-3-clause |
bromjiri/Presto | trainer/classifier_test.py | 1 | 5292 | import nltk
import collections
from nltk.classify import NaiveBayesClassifier
from nltk.metrics import precision, recall, f_measure
import datetime
import pickle
from statistics import mode
import trainer.corpora as crp
import trainer.features as ftr
from nltk.classify import ClassifierI
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC, NuSVC
class VoteClassifier(ClassifierI):
def __init__(self, *classifiers):
self._classifiers = classifiers
def classify(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
return mode(votes)
def confidence(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
choice_votes = votes.count(mode(votes))
conf = choice_votes / len(votes)
return conf
def train(trainfeats, testfeats, nlt = True, skl = True, most = 0):
# print('train on %d instances, test on %d instances' % (len(trainfeats), len(testfeats)))
nltk_output = "none"
sklearn_output = "none"
if nlt:
my_classifier = NaiveBayesClassifier.train(trainfeats)
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
for i, (feats, label) in enumerate(testfeats):
refsets[label].add(i)
observed = my_classifier.classify(feats)
testsets[observed].add(i)
# precision and recall
accuracy = nltk.classify.util.accuracy(my_classifier, testfeats) * 100
pos_prec = precision(refsets['pos'], testsets['pos']) * 100
pos_rec = recall(refsets['pos'], testsets['pos']) * 100
neg_prec = precision(refsets['neg'], testsets['neg']) * 100
neg_rec = recall(refsets['neg'], testsets['neg']) * 100
# round
accuracy = round(accuracy, 1)
pos_prec = round(pos_prec, 1)
pos_rec = round(pos_rec, 1)
neg_prec = round(neg_prec, 1)
neg_rec = round(neg_rec, 1)
# print('pos F-measure:', f_measure(refsets['pos'], testsets['pos']))
# print('neg F-measure:', f_measure(refsets['neg'], testsets['neg']))
my_classifier.show_most_informative_features(most)
nltk_output = "nlt, " + str(accuracy) + ", " + str(pos_prec) + ", " + str(neg_prec) + ", " + str(
pos_rec) + ", " + str(neg_rec) + "\n"
if skl:
MNB_classifier = SklearnClassifier(MultinomialNB())
MNB_classifier._vectorizer.sort = False
MNB_classifier.train(trainfeats)
mnb = (nltk.classify.accuracy(MNB_classifier, testfeats)) * 100
mnb = round(mnb, 1)
print(mnb)
BernoulliNB_classifier = SklearnClassifier(BernoulliNB())
BernoulliNB_classifier._vectorizer.sort = False
BernoulliNB_classifier.train(trainfeats)
bnb = (nltk.classify.accuracy(BernoulliNB_classifier, testfeats)) * 100
bnb = round(bnb, 1)
print(bnb)
LogisticRegression_classifier = SklearnClassifier(LogisticRegression())
LogisticRegression_classifier._vectorizer.sort = False
LogisticRegression_classifier.train(trainfeats)
lr = (nltk.classify.accuracy(LogisticRegression_classifier, testfeats)) * 100
lr = round(lr, 1)
print(lr)
LinearSVC_classifier = SklearnClassifier(LinearSVC())
LinearSVC_classifier._vectorizer.sort = False
LinearSVC_classifier.train(trainfeats)
lsvc = (nltk.classify.accuracy(LinearSVC_classifier, testfeats)) * 100
lsvc = round(lsvc, 1)
print(lsvc)
NuSVC_classifier = SklearnClassifier(NuSVC())
NuSVC_classifier._vectorizer.sort = False
NuSVC_classifier.train(trainfeats)
nsvc = (nltk.classify.accuracy(NuSVC_classifier, testfeats)) * 100
nsvc = round(nsvc, 1)
print(nsvc)
voted_classifier = VoteClassifier(
NuSVC_classifier,
LinearSVC_classifier,
MNB_classifier,
BernoulliNB_classifier,
LogisticRegression_classifier)
voted = (nltk.classify.accuracy(voted_classifier, testfeats)) * 100
voted = round(voted, 1)
print(voted)
sklearn_output = "skl, " + str(mnb) + ", " + str(bnb) + ", " + str(lr) + ", " + str(lsvc) + ", " + str(nsvc) + ", " + str(voted) + "\n"
return (nltk_output, sklearn_output)
if __name__ == '__main__':
COUNT = 5000
cut = int((COUNT/2)*4/5)
corpora = crp.Corpora("stwits", count=COUNT, shuffle=True)
features = ftr.Features(corpora, total=COUNT, stem="porter", bigram=True, stop=True, inf_count=-1, lower=True)
# features = ftr.Features(corpora, total=COUNT, bigram=True, stem="porter")
posfeats = features.get_features_pos()
negfeats = features.get_fearures_neg()
trainfeats = negfeats[:cut] + posfeats[:cut]
testfeats = negfeats[cut:] + posfeats[cut:]
print('train on %d instances, test on %d instances' % (len(trainfeats), len(testfeats)))
nlt, skl = train(trainfeats, testfeats, skl=False, most=50)
print(nlt, skl) | mit |
kaiserroll14/301finalproject | main/pandas/tseries/util.py | 9 | 2955 | from pandas.compat import range, lrange
import numpy as np
import pandas.core.common as com
from pandas.core.frame import DataFrame
import pandas.core.nanops as nanops
def pivot_annual(series, freq=None):
"""
Group a series by years, taking leap years into account.
The output has as many rows as distinct years in the original series,
and as many columns as the length of a leap year in the units corresponding
to the original frequency (366 for daily frequency, 366*24 for hourly...).
The fist column of the output corresponds to Jan. 1st, 00:00:00,
while the last column corresponds to Dec, 31st, 23:59:59.
Entries corresponding to Feb. 29th are masked for non-leap years.
For example, if the initial series has a daily frequency, the 59th column
of the output always corresponds to Feb. 28th, the 61st column to Mar. 1st,
and the 60th column is masked for non-leap years.
With a hourly initial frequency, the (59*24)th column of the output always
correspond to Feb. 28th 23:00, the (61*24)th column to Mar. 1st, 00:00, and
the 24 columns between (59*24) and (61*24) are masked.
If the original frequency is less than daily, the output is equivalent to
``series.convert('A', func=None)``.
Parameters
----------
series : Series
freq : string or None, default None
Returns
-------
annual : DataFrame
"""
index = series.index
year = index.year
years = nanops.unique1d(year)
if freq is not None:
freq = freq.upper()
else:
freq = series.index.freq
if freq == 'D':
width = 366
offset = index.dayofyear - 1
# adjust for leap year
offset[(~isleapyear(year)) & (offset >= 59)] += 1
columns = lrange(1, 367)
# todo: strings like 1/1, 1/25, etc.?
elif freq in ('M', 'BM'):
width = 12
offset = index.month - 1
columns = lrange(1, 13)
elif freq == 'H':
width = 8784
grouped = series.groupby(series.index.year)
defaulted = grouped.apply(lambda x: x.reset_index(drop=True))
defaulted.index = defaulted.index.droplevel(0)
offset = np.asarray(defaulted.index)
offset[~isleapyear(year) & (offset >= 1416)] += 24
columns = lrange(1, 8785)
else:
raise NotImplementedError(freq)
flat_index = (year - years.min()) * width + offset
flat_index = com._ensure_platform_int(flat_index)
values = np.empty((len(years), width))
values.fill(np.nan)
values.put(flat_index, series.values)
return DataFrame(values, index=years, columns=columns)
def isleapyear(year):
"""
Returns true if year is a leap year.
Parameters
----------
year : integer / sequence
A given (list of) year(s).
"""
year = np.asarray(year)
return np.logical_or(year % 400 == 0,
np.logical_and(year % 4 == 0, year % 100 > 0))
| gpl-3.0 |
mapsme/omim | tools/python/booking_hotels_quality.py | 20 | 2632 | #!/usr/bin/env python
# coding: utf8
from __future__ import print_function
from collections import namedtuple, defaultdict
from datetime import datetime
from sklearn import metrics
import argparse
import base64
import json
import logging
import matplotlib.pyplot as plt
import os
import pickle
import time
import urllib2
import re
# init logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(levelname)s: %(message)s')
def load_binary_list(path):
"""Loads reference binary classifier output. """
bits = []
with open(path, 'r') as fd:
for line in fd:
if (not line.strip()) or line.startswith('#'):
continue
bits.append(1 if line.startswith('y') else 0)
return bits
def load_score_list(path):
"""Loads list of matching scores. """
scores = []
with open(path, 'r') as fd:
for line in fd:
if (not line.strip()) or line.startswith('#'):
continue
scores.append(float(re.search(r'result score: (\d*\.\d+)', line).group(1)))
return scores
def process_options():
# TODO(mgsergio): Fix description.
parser = argparse.ArgumentParser(description="Download and process booking hotels.")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose")
parser.add_argument("-q", "--quiet", action="store_false", dest="verbose")
parser.add_argument("--reference_list", dest="reference_list", help="Path to data files")
parser.add_argument("--sample_list", dest="sample_list", help="Name and destination for output file")
parser.add_argument("--show", dest="show", default=False, action="store_true",
help="Show graph for precision and recall")
options = parser.parse_args()
if not options.reference_list or not options.sample_list:
parser.print_help()
exit()
return options
def main():
options = process_options()
reference = load_binary_list(options.reference_list)
sample = load_score_list(options.sample_list)
precision, recall, threshold = metrics.precision_recall_curve(reference, sample)
aa = zip(precision, recall, threshold)
max_by_hmean = max(aa, key=lambda (p, r, t): p*r/(p+r))
print("Optimal threshold: {2} for precision: {0} and recall: {1}".format(*max_by_hmean))
print("AUC: {0}".format(metrics.roc_auc_score(reference, sample)))
if options.show:
plt.plot(recall, precision)
plt.title("Precision/Recall")
plt.ylabel("Precision")
plt.xlabel("Recall")
plt.show()
if __name__ == "__main__":
main()
| apache-2.0 |
kif/freesas | freesas/align.py | 1 | 17075 | __author__ = "Guillaume Bonamis"
__license__ = "MIT"
__copyright__ = "2015, ESRF"
import os
import sys
import numpy
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
from freesas.model import SASModel
import itertools
from scipy.optimize import fmin
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("log_freesas")
class InputModels:
def __init__(self):
self.inputfiles = []
self.sasmodels = []
self.rfactors = []
self.rmax = None
self.validmodels = []
def __repr_(self):
return "Preparation of %s models for alignment" % len(self.inputfiles)
def assign_models(self, molecule=None):
"""
Create SASModels from pdb files saved in self.inputfiles and saved them in self.models.
Center of mass, inertia tensor and canonical parameters are computed for each SASModel.
:param molecule: optional 2d array, coordinates of the atoms for the model to create
:return self.models: list of SASModel
"""
if not self.inputfiles and len(molecule) == 0:
logger.error("No input files")
if self.inputfiles:
for inputpdb in self.inputfiles:
model = SASModel()
model.read(inputpdb)
model.centroid()
model.inertiatensor()
model.canonical_parameters()
self.sasmodels.append(model)
if len(self.inputfiles) != len(self.sasmodels):
logger.error("Problem of assignment\n%s models for %s files" % (len(self.sasmodels), len(self.inputfiles)))
elif len(molecule) != 0:
model = SASModel()
model.atoms = molecule
model.centroid()
model.inertiatensor()
model.canonical_parameters()
self.sasmodels.append(model)
return self.sasmodels
def rcalculation(self):
"""
Calculation the maximal value for the R-factors, which is the mean of all the R-factors of
inputs plus 2 times the standard deviation.
R-factors are saved in the attribute self.rfactors, 1d array, and in percentage.
:return rmax: maximal value for the R-factor
"""
if len(self.sasmodels) == 0:
self.assign_models()
models = self.sasmodels
rfactors = numpy.empty(len(models), dtype="float")
for i in range(len(models)):
rfactors[i] = models[i].rfactor
self.rfactors = 100.0 * rfactors
rmax = self.rfactors.mean() + 2 * self.rfactors.std()
self.rmax = rmax
return rmax
def models_selection(self):
"""
Check if each model respect the limit for the R-factor
:return self.validmodels: 1d array, 0 for a non valid model, else 1
"""
if self.rmax is None:
self.rcalculation()
rmax = self.rmax
validmodels = []
for i in range(len(self.sasmodels)):
rfactor = self.rfactors[i]
if rfactor <= rmax:
validmodels.append(1.0)
else:
validmodels.append(0.0)
self.validmodels = numpy.array(validmodels, dtype="float")
return self.validmodels
def rfactorplot(self, filename=None, save=False):
"""
Create a png file with the table of R factor for each model.
A threshold is computed to discarded models with Rfactor>Rmax.
:param filename: filename for the figure, default to Rfactor.png
:param save: save automatically the figure if True, else show it
:return fig: the wanted figures
"""
if filename is None:
filename = "Rfactor.png"
if len(self.validmodels) == 0:
self.models_selection()
dammif_files = len(self.inputfiles)
R = self.rfactors
Rmax = self.rmax
xticks = 1 + numpy.arange(dammif_files)
fig = plt.figure(figsize=(7.5, 10))
labels = [os.path.splitext(os.path.basename(self.inputfiles[i]))[0] for i in range(dammif_files)]
ax2 = fig.add_subplot(1, 1, 1)
ax2.set_title("Selection of dammif models based on R factor")
ax2.bar(xticks - 0.5, R)
ax2.plot([0.5, dammif_files + 0.5], [Rmax, Rmax], "-r", label="R$_{max}$ = %.3f" % Rmax)
ax2.set_ylabel("R factor in percent")
ax2.set_xticks(xticks)
ax2.set_xticklabels(labels, rotation=90)
ax2.legend(loc=8)
bbox_props = dict(fc="pink", ec="r", lw=1)
for i in range(dammif_files):
if not self.validmodels[i]:
ax2.text(i + 0.95, Rmax / 2, "Discarded", ha="center", va="center", rotation=90, size=10, bbox=bbox_props)
logger.info("model %s discarded, Rfactor > Rmax" % self.inputfiles[i])
if save:
fig.savefig(filename)
else:
fig.show()
return fig
class AlignModels:
"""
Used to align DAM from pdb files
"""
def __init__(self, files, slow=True, enantiomorphs=True):
"""
:param files: list of pdb files to read to create DAM
:param slow: optimized every symmetry if True, else only optimized the best one
:param enantiomorphs: take into account both enantiomorphs if True (i.e. inversion authorized)
"""
self.slow = slow
self.enantiomorphs = enantiomorphs
self.inputfiles = files
self.outputfiles = []
self.models = []
self.arrayNSD = None
self.validmodels = []
self.reference = None
def __repr__(self):
return "alignment process for %s models" % len(self.models)
def assign_models(self):
"""
Create SASModels from pdb files saved in self.inputfiles and saved them in self.models.
Center of mass, inertia tensor and canonical parameters are computed for each SASModel.
:return self.models: list of SASModel
"""
for inputpdb in self.inputfiles:
model = SASModel()
model.read(inputpdb)
model.centroid()
model.inertiatensor()
model.canonical_parameters()
self.models.append(model)
if len(self.inputfiles) != len(self.models):
logger.error("Problem of assignment\n%s models for %s files" % (len(self.models), len(self.inputfiles)))
return self.models
def optimize(self, reference, molecule, symmetry):
"""
Use scipy.optimize to optimize transformation parameters to minimize NSD
:param reference: SASmodel
:param molecule: SASmodel
:param symmetry: 3-list of +/-1
:return p: transformation parameters optimized
:return dist: NSD after optimization
"""
p, dist, niter, nfuncalls, warmflag = fmin(reference.dist_after_movement, molecule.can_param, args=(molecule, symmetry), ftol=1e-4, maxiter=200, full_output=True, disp=False)
if niter == 200:
logger.debug("convergence not reached")
else:
logger.debug("convergence reach after %s iterations" % niter)
return p, dist
def alignment_sym(self, reference, molecule):
"""
Apply 8 combinations to the molecule and select the one which minimize the distance between it and the reference.
:param reference: SASModel, the one which do not move
:param molecule: SASModel, the one wich has to be aligned
:return combinaison: best symmetry to minimize NSD
:return p: transformation parameters optimized if slow is true, unoptimized else
"""
can_paramref = reference.can_param
can_parammol = molecule.can_param
ref_can = reference.transform(can_paramref, [1, 1, 1])
mol_can = molecule.transform(can_parammol, [1, 1, 1])
if self.slow:
parameters, dist = self.optimize(reference, molecule, [1, 1, 1])
else:
parameters = can_parammol
dist = reference.dist(molecule, ref_can, mol_can)
combinaison = None
for comb in itertools.product((-1, 1), repeat=3):
if comb == (1, 1, 1):
continue
if not self.enantiomorphs and comb[0] * comb[1] * comb[2] == -1:
continue
sym = numpy.diag(comb + (1,))
mol_sym = numpy.dot(sym, mol_can.T).T
if self.slow:
symmetry = [sym[0, 0], sym[1, 1], sym[2, 2]]
p, d = self.optimize(reference, molecule, symmetry)
else:
p = can_parammol
d = reference.dist(molecule, ref_can, mol_sym)
if d < dist:
dist = d
parameters = p
combinaison = comb
if combinaison is not None:
combinaison = list(combinaison)
else:
combinaison = [1, 1, 1]
return combinaison, parameters
def makeNSDarray(self):
"""
Calculate the NSD correlation table and save it in self.arrayNSD
:return self.arrayNSD: 2d array, NSD correlation table
"""
models = self.models
size = len(models)
valid = self.validmodels
self.arrayNSD = numpy.zeros((size, size), dtype="float")
for i in range(size):
if valid[i] == 1.0:
reference = models[i]
else:
self.arrayNSD[i, :] = 0.00
continue
for j in range(size):
if i == j:
self.arrayNSD[i, j] = 0.00
elif i < j:
if valid[j] == 1.0:
molecule = models[j]
symmetry, p = self.alignment_sym(reference, molecule)
if self.slow:
dist = reference.dist_after_movement(p, molecule, symmetry)
else:
p, dist = self.optimize(reference, molecule, symmetry)
else:
dist = 0.00
self.arrayNSD[i, j] = self.arrayNSD[j, i] = dist
return self.arrayNSD
def plotNSDarray(self, rmax=None, filename=None, save=False):
"""
Create a png file with the table of NSD and the average NSD for each model.
A threshold is computed to segregate good models and the ones to exclude.
:param rmax: threshold of R factor for the validity of a model
:param filename: filename for the figure, default to nsd.png
:param save: save automatically the figure if True, else show it
:return fig: the wanted figures
"""
if self.arrayNSD is None:
self.makeNSDarray()
if not self.reference:
self.reference = self.find_reference()
if filename is None:
filename = "nsd.png"
dammif_files = len(self.inputfiles)
valid_models = self.validmodels
labels = [os.path.splitext(os.path.basename(self.outputfiles[i]))[0] for i in range(dammif_files)]
mask2d = (numpy.outer(valid_models, valid_models))
tableNSD = self.arrayNSD * mask2d
maskedNSD = numpy.ma.masked_array(tableNSD, mask=numpy.logical_not(mask2d))
data = valid_models * (tableNSD.sum(axis=-1) / (valid_models.sum() - 1)) # mean for the valid models, excluding itself
fig = plt.figure(figsize=(15, 10))
xticks = 1 + numpy.arange(dammif_files)
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
# first subplot : the NSD table
lnsd = []
for i in range(dammif_files):
for j in range(dammif_files):
nsd = maskedNSD[i, j]
if not maskedNSD.mask[i, j]:
ax1.text(i, j, "%.2f" % nsd, ha="center", va="center", size=12 * 8 // dammif_files)
ax1.text(j, i, "%.2f" % nsd, ha="center", va="center", size=12 * 8 // dammif_files)
if i != j:
lnsd.append(nsd)
lnsd = numpy.array(lnsd)
nsd_max = lnsd.mean() + lnsd.std() # threshold for nsd mean
ax1.imshow(maskedNSD, interpolation="nearest", origin="upper", cmap="YlOrRd", norm=matplotlib.colors.Normalize(vmin=min(lnsd)))
ax1.set_title(u"NSD correlation table")
ax1.set_xticks(range(dammif_files))
ax1.set_xticklabels(labels, rotation=90)
ax1.set_xlim(-0.5, dammif_files - 0.5)
ax1.set_ylim(-0.5, dammif_files - 0.5)
ax1.set_yticks(range(dammif_files))
ax1.set_yticklabels(labels)
# second subplot : the NSD mean for each model
ax2.bar(xticks - 0.5, data)
ax2.plot([0.5, dammif_files + 0.5], [nsd_max, nsd_max], "-r", label=u"NSD$_{max}$ = %.2f" % nsd_max)
ax2.set_title(u"NSD between any model and all others")
ax2.set_ylabel("Normalized Spatial Discrepancy")
ax2.set_xticks(xticks)
ax2.set_xticklabels(labels, rotation=90)
bbox_props = dict(fc="cyan", ec="b", lw=1)
ax2.text(self.reference + 0.95, data[self.reference] / 2, "Reference", ha="center", va="center", rotation=90, size=10, bbox=bbox_props)
ax2.legend(loc=8)
bbox_props = dict(fc="pink", ec="r", lw=1)
valid_number = 0
for i in range(dammif_files):
if data[i] > nsd_max:
ax2.text(i + 0.95, data[self.reference] / 2, "Discarded", ha="center", va="center", rotation=90, size=10, bbox=bbox_props)
logger.debug("model %s discarded, nsd > nsd_max" % self.inputfiles[i])
elif not valid_models[i]:
if rmax:
ax2.text(i + 0.95, data[self.reference] / 2, "Discarded, Rfactor = %s > Rmax = %s" % (100.0 * self.models[i].rfactor, rmax), ha="center", va="center", rotation=90, size=10, bbox=bbox_props)
else:
ax2.text(i + 0.95, data[self.reference] / 2, "Discarded", ha="center", va="center", rotation=90, size=10, bbox=bbox_props)
else:
if valid_models[i] == 1.0:
valid_number += 1
logger.debug("%s valid models" % valid_number)
if save:
fig.savefig(filename)
else:
fig.show()
return fig
def find_reference(self):
"""
Find the reference model among the models aligned.
The reference model is the one with lower average NSD with other models.
:return ref_number: position of the reference model in the list self.models
"""
if self.arrayNSD is None:
self.makeNSDarray()
if len(self.validmodels) == 0:
logger.error("Validity of models is not computed")
valid = self.validmodels
valid = valid.astype(bool)
averNSD = numpy.zeros(len(self.models))
averNSD += sys.maxsize
averNSD[valid] = ((self.arrayNSD.sum(axis=-1)) / (valid.sum() - 1))[valid]
self.reference = averNSD.argmin()
return self.reference
def alignment_reference(self, ref_number=None):
"""
Align all models in self.models with the reference one.
The aligned models are saved in pdb files (names in list self.outputfiles)
"""
if self.reference is None and ref_number is None:
self.find_reference()
ref_number = self.reference
models = self.models
reference = models[ref_number]
for i in range(len(models)):
if i == ref_number:
continue
else:
molecule = models[i]
symmetry, p = self.alignment_sym(reference, molecule)
if not self.slow:
p, dist = self.optimize(reference, molecule, symmetry)
molecule.atoms = molecule.transform(p, symmetry) # molecule sent on its canonical position
molecule.atoms = molecule.transform(reference.can_param, [1, 1, 1], reverse=True) # molecule sent on reference position
molecule.save(self.outputfiles[i])
reference.save(self.outputfiles[ref_number])
return 0
def alignment_2models(self, save=True):
"""
Align two models using the first one as reference.
The aligned models are save in pdb files.
:return dist: NSD after alignment
"""
models = self.models
reference = models[0]
molecule = models[1]
symmetry, p = self.alignment_sym(reference, molecule)
if not self.slow:
p, dist = self.optimize(reference, molecule, symmetry)
molecule.atoms = molecule.transform(p, symmetry)
molecule.atoms = molecule.transform(reference.can_param, [1, 1, 1], reverse=True)
if self.slow:
dist = reference.dist(molecule, reference.atoms, molecule.atoms)
if save:
molecule.save(self.outputfiles)
return dist
| mit |
pkruskal/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
ankurankan/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
bdh1011/wau | venv/lib/python2.7/site-packages/IPython/core/display.py | 3 | 33184 | # -*- coding: utf-8 -*-
"""Top-level display functions for displaying object in different formats."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import json
import mimetypes
import os
import struct
import warnings
from IPython.core.formatters import _safe_get_formatter_method
from IPython.utils.py3compat import (string_types, cast_bytes_py2, cast_unicode,
unicode_type)
from IPython.testing.skipdoctest import skip_doctest
__all__ = ['display', 'display_pretty', 'display_html', 'display_markdown',
'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'JSON', 'Javascript',
'Image', 'clear_output', 'set_matplotlib_formats', 'set_matplotlib_close',
'publish_display_data']
#-----------------------------------------------------------------------------
# utility functions
#-----------------------------------------------------------------------------
def _safe_exists(path):
"""Check path, but don't let exceptions raise"""
try:
return os.path.exists(path)
except Exception:
return False
def _merge(d1, d2):
"""Like update, but merges sub-dicts instead of clobbering at the top level.
Updates d1 in-place
"""
if not isinstance(d2, dict) or not isinstance(d1, dict):
return d2
for key, value in d2.items():
d1[key] = _merge(d1.get(key), value)
return d1
def _display_mimetype(mimetype, objs, raw=False, metadata=None):
"""internal implementation of all display_foo methods
Parameters
----------
mimetype : str
The mimetype to be published (e.g. 'image/png')
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
if metadata:
metadata = {mimetype: metadata}
if raw:
# turn list of pngdata into list of { 'image/png': pngdata }
objs = [ {mimetype: obj} for obj in objs ]
display(*objs, raw=raw, metadata=metadata, include=[mimetype])
#-----------------------------------------------------------------------------
# Main functions
#-----------------------------------------------------------------------------
def publish_display_data(data, metadata=None, source=None):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. mime-type keys matching those in data can be used
to specify metadata about particular representations.
source : str, deprecated
Unused.
"""
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.instance().display_pub.publish(
data=data,
metadata=metadata,
)
def display(*objs, **kwargs):
"""Display a Python object in all frontends.
By default all representations will be computed and sent to the frontends.
Frontends can decide which representation is used and how.
Parameters
----------
objs : tuple of objects
The Python objects to display.
raw : bool, optional
Are the objects to be displayed already mimetype-keyed dicts of raw display data,
or Python objects that need to be formatted before display? [default: False]
include : list or tuple, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
A list of format type strings (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
metadata : dict, optional
A dictionary of metadata to associate with the output.
mime-type keys in this dictionary will be associated with the individual
representation formats, if they exist.
"""
raw = kwargs.get('raw', False)
include = kwargs.get('include')
exclude = kwargs.get('exclude')
metadata = kwargs.get('metadata')
from IPython.core.interactiveshell import InteractiveShell
if not raw:
format = InteractiveShell.instance().display_formatter.format
for obj in objs:
if raw:
publish_display_data(data=obj, metadata=metadata)
else:
format_dict, md_dict = format(obj, include=include, exclude=exclude)
if not format_dict:
# nothing to display (e.g. _ipython_display_ took over)
continue
if metadata:
# kwarg-specified metadata gets precedence
_merge(md_dict, metadata)
publish_display_data(data=format_dict, metadata=md_dict)
def display_pretty(*objs, **kwargs):
"""Display the pretty (default) representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/plain', objs, **kwargs)
def display_html(*objs, **kwargs):
"""Display the HTML representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw HTML data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/html', objs, **kwargs)
def display_markdown(*objs, **kwargs):
"""Displays the Markdown representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw markdown data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/markdown', objs, **kwargs)
def display_svg(*objs, **kwargs):
"""Display the SVG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw svg data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/svg+xml', objs, **kwargs)
def display_png(*objs, **kwargs):
"""Display the PNG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw png data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/png', objs, **kwargs)
def display_jpeg(*objs, **kwargs):
"""Display the JPEG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw JPEG data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/jpeg', objs, **kwargs)
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/latex', objs, **kwargs)
def display_json(*objs, **kwargs):
"""Display the JSON representation of an object.
Note that not many frontends support displaying JSON.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw json data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/json', objs, **kwargs)
def display_javascript(*objs, **kwargs):
"""Display the Javascript representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/javascript', objs, **kwargs)
def display_pdf(*objs, **kwargs):
"""Display the PDF representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/pdf', objs, **kwargs)
#-----------------------------------------------------------------------------
# Smart classes
#-----------------------------------------------------------------------------
class DisplayObject(object):
"""An object that wraps data to be displayed."""
_read_flags = 'r'
_show_mem_addr = False
def __init__(self, data=None, url=None, filename=None):
"""Create a display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. The MIME type of the data should match the
subclasses used, so the Png subclass should be used for 'image/png'
data. If the data is a URL, the data will first be downloaded
and then displayed. If
Parameters
----------
data : unicode, str or bytes
The raw data or a URL or file to load the data from
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
"""
if data is not None and isinstance(data, string_types):
if data.startswith('http') and url is None:
url = data
filename = None
data = None
elif _safe_exists(data) and filename is None:
url = None
filename = data
data = None
self.data = data
self.url = url
self.filename = None if filename is None else unicode_type(filename)
self.reload()
self._check_data()
def __repr__(self):
if not self._show_mem_addr:
cls = self.__class__
r = "<%s.%s object>" % (cls.__module__, cls.__name__)
else:
r = super(DisplayObject, self).__repr__()
return r
def _check_data(self):
"""Override in subclasses if there's something to check."""
pass
def reload(self):
"""Reload the raw data from file or URL."""
if self.filename is not None:
with open(self.filename, self._read_flags) as f:
self.data = f.read()
elif self.url is not None:
try:
try:
from urllib.request import urlopen # Py3
except ImportError:
from urllib2 import urlopen
response = urlopen(self.url)
self.data = response.read()
# extract encoding from header, if there is one:
encoding = None
for sub in response.headers['content-type'].split(';'):
sub = sub.strip()
if sub.startswith('charset'):
encoding = sub.split('=')[-1].strip()
break
# decode data, if an encoding was specified
if encoding:
self.data = self.data.decode(encoding, 'replace')
except:
self.data = None
class TextDisplayObject(DisplayObject):
"""Validate that display data is text"""
def _check_data(self):
if self.data is not None and not isinstance(self.data, string_types):
raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
class Pretty(TextDisplayObject):
def _repr_pretty_(self):
return self.data
class HTML(TextDisplayObject):
def _repr_html_(self):
return self.data
def __html__(self):
"""
This method exists to inform other HTML-using modules (e.g. Markupsafe,
htmltag, etc) that this object is HTML and does not need things like
special characters (<>&) escaped.
"""
return self._repr_html_()
class Markdown(TextDisplayObject):
def _repr_markdown_(self):
return self.data
class Math(TextDisplayObject):
def _repr_latex_(self):
s = self.data.strip('$')
return "$$%s$$" % s
class Latex(TextDisplayObject):
def _repr_latex_(self):
return self.data
class SVG(DisplayObject):
# wrap data in a property, which extracts the <svg> tag, discarding
# document headers
_data = None
@property
def data(self):
return self._data
@data.setter
def data(self, svg):
if svg is None:
self._data = None
return
# parse into dom object
from xml.dom import minidom
svg = cast_bytes_py2(svg)
x = minidom.parseString(svg)
# get svg tag (should be 1)
found_svg = x.getElementsByTagName('svg')
if found_svg:
svg = found_svg[0].toxml()
else:
# fallback on the input, trust the user
# but this is probably an error.
pass
svg = cast_unicode(svg)
self._data = svg
def _repr_svg_(self):
return self.data
class JSON(DisplayObject):
"""JSON expects a JSON-able dict or list
not an already-serialized JSON string.
Scalar types (None, number, string) are not allowed, only dict or list containers.
"""
# wrap data in a property, which warns about passing already-serialized JSON
_data = None
def _check_data(self):
if self.data is not None and not isinstance(self.data, (dict, list)):
raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
@property
def data(self):
return self._data
@data.setter
def data(self, data):
if isinstance(data, string_types):
warnings.warn("JSON expects JSONable dict or list, not JSON strings")
data = json.loads(data)
self._data = data
def _repr_json_(self):
return self.data
css_t = """$("head").append($("<link/>").attr({
rel: "stylesheet",
type: "text/css",
href: "%s"
}));
"""
lib_t1 = """$.getScript("%s", function () {
"""
lib_t2 = """});
"""
class Javascript(TextDisplayObject):
def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
"""Create a Javascript display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. If the data is a URL, the data will first be
downloaded and then displayed.
In the Notebook, the containing element will be available as `element`,
and jQuery will be available. Content appended to `element` will be
visible in the output area.
Parameters
----------
data : unicode, str or bytes
The Javascript source code or a URL to download it from.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
lib : list or str
A sequence of Javascript library URLs to load asynchronously before
running the source code. The full URLs of the libraries should
be given. A single Javascript library URL can also be given as a
string.
css: : list or str
A sequence of css files to load before running the source code.
The full URLs of the css files should be given. A single css URL
can also be given as a string.
"""
if isinstance(lib, string_types):
lib = [lib]
elif lib is None:
lib = []
if isinstance(css, string_types):
css = [css]
elif css is None:
css = []
if not isinstance(lib, (list,tuple)):
raise TypeError('expected sequence, got: %r' % lib)
if not isinstance(css, (list,tuple)):
raise TypeError('expected sequence, got: %r' % css)
self.lib = lib
self.css = css
super(Javascript, self).__init__(data=data, url=url, filename=filename)
def _repr_javascript_(self):
r = ''
for c in self.css:
r += css_t % c
for l in self.lib:
r += lib_t1 % l
r += self.data
r += lib_t2*len(self.lib)
return r
# constants for identifying png/jpeg data
_PNG = b'\x89PNG\r\n\x1a\n'
_JPEG = b'\xff\xd8'
def _pngxy(data):
"""read the (width, height) from a PNG header"""
ihdr = data.index(b'IHDR')
# next 8 bytes are width/height
w4h4 = data[ihdr+4:ihdr+12]
return struct.unpack('>ii', w4h4)
def _jpegxy(data):
"""read the (width, height) from a JPEG header"""
# adapted from http://www.64lines.com/jpeg-width-height
idx = 4
while True:
block_size = struct.unpack('>H', data[idx:idx+2])[0]
idx = idx + block_size
if data[idx:idx+2] == b'\xFF\xC0':
# found Start of Frame
iSOF = idx
break
else:
# read another block
idx += 2
h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
return w, h
class Image(DisplayObject):
_read_flags = 'rb'
_FMT_JPEG = u'jpeg'
_FMT_PNG = u'png'
_ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
def __init__(self, data=None, url=None, filename=None, format=u'png',
embed=None, width=None, height=None, retina=False,
unconfined=False, metadata=None):
"""Create a PNG/JPEG image object given raw data.
When this object is returned by an input cell or passed to the
display function, it will result in the image being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Images from a file are always embedded.
format : unicode
The format of the image data (png/jpeg/jpg). If a filename or URL is given
for format will be inferred from the filename extension.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
width : int
Width to which to constrain the image in html
height : int
Height to which to constrain the image in html
retina : bool
Automatically set the width and height to half of the measured
width and height.
This only works for embedded images because it reads the width/height
from image data.
For non-embedded images, you can just set the desired display width
and height directly.
unconfined: bool
Set unconfined=True to disable max-width confinement of the image.
metadata: dict
Specify extra metadata to attach to the image.
Examples
--------
# embedded image data, works in qtconsole and notebook
# when passed positionally, the first arg can be any of raw image data,
# a URL, or a filename from which to load image data.
# The result is always embedding image data for inline images.
Image('http://www.google.fr/images/srpr/logo3w.png')
Image('/path/to/image.jpg')
Image(b'RAW_PNG_DATA...')
# Specifying Image(url=...) does not embed the image data,
# it only generates `<img>` tag with a link to the source.
# This will not work in the qtconsole or offline.
Image(url='http://www.google.fr/images/srpr/logo3w.png')
"""
if filename is not None:
ext = self._find_ext(filename)
elif url is not None:
ext = self._find_ext(url)
elif data is None:
raise ValueError("No image data found. Expecting filename, url, or data.")
elif isinstance(data, string_types) and (
data.startswith('http') or _safe_exists(data)
):
ext = self._find_ext(data)
else:
ext = None
if ext is not None:
format = ext.lower()
if ext == u'jpg' or ext == u'jpeg':
format = self._FMT_JPEG
if ext == u'png':
format = self._FMT_PNG
elif isinstance(data, bytes) and format == 'png':
# infer image type from image data header,
# only if format might not have been specified.
if data[:2] == _JPEG:
format = 'jpeg'
self.format = unicode_type(format).lower()
self.embed = embed if embed is not None else (url is None)
if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
raise ValueError("Cannot embed the '%s' image format" % (self.format))
self.width = width
self.height = height
self.retina = retina
self.unconfined = unconfined
self.metadata = metadata
super(Image, self).__init__(data=data, url=url, filename=filename)
if retina:
self._retina_shape()
def _retina_shape(self):
"""load pixel-doubled width and height from image data"""
if not self.embed:
return
if self.format == 'png':
w, h = _pngxy(self.data)
elif self.format == 'jpeg':
w, h = _jpegxy(self.data)
else:
# retina only supports png
return
self.width = w // 2
self.height = h // 2
def reload(self):
"""Reload the raw data from file or URL."""
if self.embed:
super(Image,self).reload()
if self.retina:
self._retina_shape()
def _repr_html_(self):
if not self.embed:
width = height = klass = ''
if self.width:
width = ' width="%d"' % self.width
if self.height:
height = ' height="%d"' % self.height
if self.unconfined:
klass = ' class="unconfined"'
return u'<img src="{url}"{width}{height}{klass}/>'.format(
url=self.url,
width=width,
height=height,
klass=klass,
)
def _data_and_metadata(self):
"""shortcut for returning metadata with shape information, if defined"""
md = {}
if self.width:
md['width'] = self.width
if self.height:
md['height'] = self.height
if self.unconfined:
md['unconfined'] = self.unconfined
if self.metadata:
md.update(self.metadata)
if md:
return self.data, md
else:
return self.data
def _repr_png_(self):
if self.embed and self.format == u'png':
return self._data_and_metadata()
def _repr_jpeg_(self):
if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
return self._data_and_metadata()
def _find_ext(self, s):
return unicode_type(s.split('.')[-1].lower())
class Video(DisplayObject):
def __init__(self, data=None, url=None, filename=None, embed=None, mimetype=None):
"""Create a video object given raw data or an URL.
When this object is returned by an input cell or passed to the
display function, it will result in the video being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Videos from a file are always embedded.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
mimetype: unicode
Specify the mimetype in case you load in a encoded video.
Examples
--------
Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
Video('path/to/video.mp4')
Video('path/to/video.mp4', embed=False)
"""
if url is None and (data.startswith('http') or data.startswith('https')):
url = data
data = None
embed = False
elif os.path.exists(data):
filename = data
data = None
self.mimetype = mimetype
self.embed = embed if embed is not None else (filename is not None)
super(Video, self).__init__(data=data, url=url, filename=filename)
def _repr_html_(self):
# External URLs and potentially local files are not embedded into the
# notebook output.
if not self.embed:
url = self.url if self.url is not None else self.filename
output = """<video src="{0}" controls>
Your browser does not support the <code>video</code> element.
</video>""".format(url)
return output
# Embedded videos uses base64 encoded videos.
if self.filename is not None:
mimetypes.init()
mimetype, encoding = mimetypes.guess_type(self.filename)
video = open(self.filename, 'rb').read()
video_encoded = video.encode('base64')
else:
video_encoded = self.data
mimetype = self.mimetype
output = """<video controls>
<source src="data:{0};base64,{1}" type="{0}">
Your browser does not support the video tag.
</video>""".format(mimetype, video_encoded)
return output
def reload(self):
# TODO
pass
def _repr_png_(self):
# TODO
pass
def _repr_jpeg_(self):
# TODO
pass
def clear_output(wait=False):
"""Clear the output of the current cell receiving output.
Parameters
----------
wait : bool [default: false]
Wait to clear the output until new output is available to replace it."""
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
InteractiveShell.instance().display_pub.clear_output(wait)
else:
from IPython.utils import io
print('\033[2K\r', file=io.stdout, end='')
io.stdout.flush()
print('\033[2K\r', file=io.stderr, end='')
io.stderr.flush()
@skip_doctest
def set_matplotlib_formats(*formats, **kwargs):
"""Select figure formats for the inline backend. Optionally pass quality for JPEG.
For example, this enables PNG and JPEG output with a JPEG quality of 90%::
In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
To set this in your config files use the following::
c.InlineBackend.figure_formats = {'png', 'jpeg'}
c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
Parameters
----------
*formats : strs
One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs :
Keyword args will be relayed to ``figure.canvas.print_figure``.
"""
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.pylabtools import select_figure_formats
# build kwargs, starting with InlineBackend config
kw = {}
from ipykernel.pylab.config import InlineBackend
cfg = InlineBackend.instance()
kw.update(cfg.print_figure_kwargs)
kw.update(**kwargs)
shell = InteractiveShell.instance()
select_figure_formats(shell, formats, **kw)
@skip_doctest
def set_matplotlib_close(close=True):
"""Set whether the inline backend closes all figures automatically or not.
By default, the inline backend used in the IPython Notebook will close all
matplotlib figures automatically after each cell is run. This means that
plots in different cells won't interfere. Sometimes, you may want to make
a plot in one cell and then refine it in later cells. This can be accomplished
by::
In [1]: set_matplotlib_close(False)
To set this in your config files use the following::
c.InlineBackend.close_figures = False
Parameters
----------
close : bool
Should all matplotlib figures be automatically closed after each cell is
run?
"""
from ipykernel.pylab.config import InlineBackend
cfg = InlineBackend.instance()
cfg.close_figures = close
| mit |
MadsJensen/CAA | extract_data_numb_trials.py | 1 | 1526 | # -*- coding: utf-8 -*-
"""
@author: mje
"""
from my_settings import *
import mne
import pandas as pd
sides = ["left", "right"]
conditions = ["ctl", "ent"]
rois = ["lh", "rh"]
corr = ["correct", "incorrect"]
phase = ["in_phase", "out_phase"]
columns_keys = ["subject", "condition_type", "condition_side",
"correct", "n"]
df = pd.DataFrame(columns=columns_keys)
for subject in subjects_select:
epochs = mne.read_epochs(epochs_folder + "%s_target-epo.fif" % subject,
preload=False)
for condition in conditions:
for side in sides:
for roi in rois:
for cor in corr:
for p in phase:
row = pd.DataFrame([{"subject": subject,
"condition_type": condition,
"condition_side": side,
"ROI": roi,
"correct": cor,
"phase": p,
"n": len(epochs[condition + "/" +
side + "/" +
cor + "/" +
p])}])
df = df.append(row, ignore_index=True)
df.to_csv(data_path + "alpha_mean_n_data_extracted_phase_target.csv",
index=False)
| bsd-3-clause |
kaichogami/scikit-learn | benchmarks/bench_mnist.py | 44 | 6801 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
vighneshbirodkar/scikit-image | doc/examples/features_detection/plot_windowed_histogram.py | 4 | 5150 | """
========================
Sliding window histogram
========================
Histogram matching can be used for object detection in images [1]_. This
example extracts a single coin from the ``skimage.data.coins`` image and uses
histogram matching to attempt to locate it within the original image.
First, a box-shaped region of the image containing the target coin is
extracted and a histogram of its greyscale values is computed.
Next, for each pixel in the test image, a histogram of the greyscale values in
a region of the image surrounding the pixel is computed.
``skimage.filters.rank.windowed_histogram`` is used for this task, as it employs
an efficient sliding window based algorithm that is able to compute these
histograms quickly [2]_. The local histogram for the region surrounding each
pixel in the image is compared to that of the single coin, with a similarity
measure being computed and displayed.
The histogram of the single coin is computed using ``numpy.histogram`` on a box
shaped region surrounding the coin, while the sliding window histograms are
computed using a disc shaped structural element of a slightly different size.
This is done in aid of demonstrating that the technique still finds similarity
in spite of these differences.
To demonstrate the rotational invariance of the technique, the same test is
performed on a version of the coins image rotated by 45 degrees.
References
----------
.. [1] Porikli, F. "Integral Histogram: A Fast Way to Extract Histograms
in Cartesian Spaces" CVPR, 2005. Vol. 1. IEEE, 2005
.. [2] S.Perreault and P.Hebert. Median filtering in constant time.
Trans. Image Processing, 16(9):2389-2394, 2007.
"""
from __future__ import division
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from skimage import data, transform
from skimage.util import img_as_ubyte
from skimage.morphology import disk
from skimage.filters import rank
matplotlib.rcParams['font.size'] = 9
def windowed_histogram_similarity(image, selem, reference_hist, n_bins):
# Compute normalized windowed histogram feature vector for each pixel
px_histograms = rank.windowed_histogram(image, selem, n_bins=n_bins)
# Reshape coin histogram to (1,1,N) for broadcast when we want to use it in
# arithmetic operations with the windowed histograms from the image
reference_hist = reference_hist.reshape((1, 1) + reference_hist.shape)
# Compute Chi squared distance metric: sum((X-Y)^2 / (X+Y));
# a measure of distance between histograms
X = px_histograms
Y = reference_hist
num = (X - Y) ** 2
denom = X + Y
denom[denom == 0] = np.infty
frac = num / denom
chi_sqr = 0.5 * np.sum(frac, axis=2)
# Generate a similarity measure. It needs to be low when distance is high
# and high when distance is low; taking the reciprocal will do this.
# Chi squared will always be >= 0, add small value to prevent divide by 0.
similarity = 1 / (chi_sqr + 1.0e-4)
return similarity
# Load the `skimage.data.coins` image
img = img_as_ubyte(data.coins())
# Quantize to 16 levels of greyscale; this way the output image will have a
# 16-dimensional feature vector per pixel
quantized_img = img // 16
# Select the coin from the 4th column, second row.
# Co-ordinate ordering: [x1,y1,x2,y2]
coin_coords = [184, 100, 228, 148] # 44 x 44 region
coin = quantized_img[coin_coords[1]:coin_coords[3],
coin_coords[0]:coin_coords[2]]
# Compute coin histogram and normalize
coin_hist, _ = np.histogram(coin.flatten(), bins=16, range=(0, 16))
coin_hist = coin_hist.astype(float) / np.sum(coin_hist)
# Compute a disk shaped mask that will define the shape of our sliding window
# Example coin is ~44px across, so make a disk 61px wide (2 * rad + 1) to be
# big enough for other coins too.
selem = disk(30)
# Compute the similarity across the complete image
similarity = windowed_histogram_similarity(quantized_img, selem, coin_hist,
coin_hist.shape[0])
# Now try a rotated image
rotated_img = img_as_ubyte(transform.rotate(img, 45.0, resize=True))
# Quantize to 16 levels as before
quantized_rotated_image = rotated_img // 16
# Similarity on rotated image
rotated_similarity = windowed_histogram_similarity(quantized_rotated_image,
selem, coin_hist,
coin_hist.shape[0])
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))
axes[0, 0].imshow(quantized_img, cmap='gray')
axes[0, 0].set_title('Quantized image')
axes[0, 0].axis('off')
axes[0, 1].imshow(coin, cmap='gray')
axes[0, 1].set_title('Coin from 2nd row, 4th column')
axes[0, 1].axis('off')
axes[1, 0].imshow(img, cmap='gray')
axes[1, 0].imshow(similarity, cmap='hot', alpha=0.5)
axes[1, 0].set_title('Original image with overlaid similarity')
axes[1, 0].axis('off')
axes[1, 1].imshow(rotated_img, cmap='gray')
axes[1, 1].imshow(rotated_similarity, cmap='hot', alpha=0.5)
axes[1, 1].set_title('Rotated image with overlaid similarity')
axes[1, 1].axis('off')
plt.tight_layout()
plt.show()
| bsd-3-clause |
odlgroup/odl | examples/solvers/rosenbrock_minimization.py | 3 | 2620 | # Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Minimize the Rosenbrock functional.
This example shows how this can be done using a variety of solution methods.
"""
import odl
from matplotlib import pyplot as plt
# Create the solution space
space = odl.rn(2)
# Create objective functional
f = odl.solvers.RosenbrockFunctional(space)
# Define a line search method
line_search = odl.solvers.BacktrackingLineSearch(f)
# Solve problem using steepest descent
callback = odl.solvers.CallbackShowConvergence(f, logx=True, logy=True,
color='b')
x = space.zero()
odl.solvers.steepest_descent(f, x, line_search=line_search,
callback=callback)
legend_artists = [callback.ax.collections[-1], ]
legend_labels = ['SD', ]
# Solve problem using nonlinear conjugate gradient
callback = odl.solvers.CallbackShowConvergence(f, logx=True, logy=True,
color='g')
x = space.zero()
odl.solvers.conjugate_gradient_nonlinear(f, x, line_search=line_search,
callback=callback)
legend_artists.append(callback.ax.collections[-1])
legend_labels.append('CG')
# Solve problem using bfgs
callback = odl.solvers.CallbackShowConvergence(f, logx=True, logy=True,
color='r')
x = space.zero()
odl.solvers.bfgs_method(f, x, line_search=line_search,
callback=callback)
legend_artists.append(callback.ax.collections[-1])
legend_labels.append('BFGS')
# Solve problem using newtons method
callback = odl.solvers.CallbackShowConvergence(f, logx=True, logy=True,
color='k')
x = space.zero()
odl.solvers.newtons_method(f, x, line_search=line_search,
callback=callback)
legend_artists.append(callback.ax.collections[-1])
legend_labels.append('Newton')
plt.legend(legend_artists, legend_labels)
plt.show()
| mpl-2.0 |
mirjalil/ml-visual-recognition | codes/classify_lower.py | 1 | 2209 | import numpy as np
import pandas
import scipy, scipy.spatial
import sklearn
import sys
import argparse
def get_label(arr):
return(y.iloc[arr,1].values)
sys.setrecursionlimit(1500)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('data_train', help='Datafile')
parser.add_argument('label_train', help='The label file for training data')
parser.add_argument('data_cv', help='Datafile for CrossValidation')
parser.add_argument('label_cv', help='Labels for CrossValidation')
parser.add_argument('data_test', help='Test dataset')
parser.add_argument('out', help='Output file')
parser.add_argument('jobid', help='Job ID number')
args = parser.parse_args()
ndim= 900
global y
y = pandas.read_table(args.label_train, sep=' ', header=None, dtype='int')
#ycv = pandas.read_table(args.label_cv, sep=' ', header=None, dtype='int')
print(np.unique(y[1]))
#print(np.unique(ycv[1]))
ntot_train = y.shape[0]
feat_idx = np.random.choice(ndim, size=30, replace=False)
sel_rows = np.random.choice(y.shape[0], int(0.2 * ntot_train))
df = pandas.read_table(args.data_train, usecols=feat_idx, header=None, sep=' ')
df = df.iloc[sel_rows, :]
print(df.shape)
Xcv = pandas.read_table(args.data_cv, usecols=feat_idx, header=None, sep=' ')
print('\n %s %d %d ==> ' %(args.jobid, df.shape[0], Xcv.shape[0]))
print('%6d-%6d '%(df.shape[0], df.shape[1]))
kdt = scipy.spatial.KDTree(df, leafsize=1000)
print('KDTree is built succesfully!!')
qt_dist, qt_idx = kdt.query(Xcv, k=10)
print('Query of XC data finished!!')
pred_cv = np.apply_along_axis(get_label, 0, qt_idx)
np.savetxt('%s.%s.dist_cv'%(args.out, args.jobid), qt_dist, fmt='%.4f')
np.savetxt('%s.%s.pred_cv'%(args.out, args.jobid), pred_cv, fmt='%d')
Xts = pandas.read_table(args.data_test, usecols=feat_idx, header=None, sep=' ')
qt_dist, qt_idx = kdt.query(Xts, k=10)
pred = np.apply_along_axis(get_label, 0, qt_idx)
np.savetxt('%s.%s.dist'%(args.out, args.jobid), qt_dist, fmt='%.4f')
np.savetxt('%s.%s.pred'%(args.out, args.jobid), pred, fmt='%d')
if __name__ == '__main__':
main()
| apache-2.0 |
quheng/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
matthiasmengel/sealevel | sealevel/projection.py | 1 | 5508 | # This file is part of SEALEVEL - a tool to estimates future sea-level rise
# constrained by past obervations and long-term sea-level commitment
# Copyright (C) 2016 Matthias Mengel working at PIK Potsdam
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE.txt for more details.
import os
import numpy as np
import pandas as pd
import dimarray as da
import sealevel.contributor_functions as cf
reload(cf)
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
inputdatadir = os.path.join(project_dir, "data/")
######## parameters that need to be known for calibrations and projection
# add temperature offset to be used with the box & colgan 2013 data to fit past observations
# similar offset is used by e.g. Rahmstorf 2007, Science.
gis_colgan_temperature_offset = 0.5
######## sea level projection using for Monte-Carlo sampling ########
def project(gmt, proj_period, calibdata, temp_anomaly_year, sl_contributor,
sample_number, contrib_name):
"""
Monte Carlo sampling for slr contribution
for a single global mean temperature (gmt) timeseries or
an ensemble of gmt timeseries,
for one random choice of observations obs_choice,
and one random tuple of independent and dependent parameter.
the contributor function (i.e. thermal expansion) is chosen through
contrib_name.
Parameters
----------
gmt : single or ensemble of gmt timeseries
proj_period : time period for which slr projection is done
calibdata : calibration data for the several observations per component
temp_anomaly_year: year in which global mean temperature passes zero,
depending on observation.
sl_contributor: function to calculate transient sea level rise.
sample_number : number for seed to be created to make sampling reproducible
Returns
-------
contrib : timeseries of sea level contribution with length proj_period
"""
np.random.seed(sample_number)
try:
gmt_ensemble_size = gmt.shape[1]
gmt_choice = np.random.randint(gmt_ensemble_size)
# print gmt_choice
driving_temperature = gmt[:, gmt_choice]
except IndexError:
# this is the case if single gmt is supplied
gmt_choice = 0
driving_temperature = gmt
# print contrib_name, temp_anomaly_year
# convert index to str to avoid floating point issues
calibdata.index = [str(i) for i in calibdata.index]
# use one of the observational dataset
obs_choice = np.random.choice(calibdata.index.unique())
params_of_obs = calibdata.loc[obs_choice]
# print params_of_obs
# temp_anomaly_year = params.temp_anomaly_year
if obs_choice == "box_colgan13":
driving_temperature += gis_colgan_temperature_offset
# for dp16, the different ensemble members are interpreted
# as different observations, so selection already happened
# above through obs_choice
if contrib_name == "ant_dp16":
params = params_of_obs
else:
# choose a random parameter set
paramset_choice = np.random.randint(len(params_of_obs.index))
# can be variable number of parameters per each observation
params = params_of_obs.iloc[paramset_choice,:]
# print "pp",params
contributor = sl_contributor(params, temp_anomaly_year.loc[obs_choice][0])
contrib = contributor.calc_contribution(
driving_temperature,proj_period)
# print contrib
return [contrib, gmt_choice, obs_choice, params]
def project_slr(scen, gmt, settings):
projection_data = {}
temp_anomaly_years = pd.read_csv(os.path.join(
settings.calibfolder, "temp_anomaly_years.csv"),index_col=[0,1])
temp_anomaly_years = temp_anomaly_years.where(
pd.notnull(temp_anomaly_years), None)
for i, contrib_name in enumerate(settings.project_these):
print "conribution", contrib_name
realizations = np.arange(settings.nrealizations)
calibdata = pd.read_csv(
os.path.join(settings.calibfolder, contrib_name+".csv"),
index_col=[0])
temp_anomaly_year = temp_anomaly_years.loc[contrib_name]
sl_contributor = cf.contributor_functions[contrib_name]
proj = np.zeros([len(settings.proj_period), settings.nrealizations])
for n in realizations:
slr, gmt_n, obs_choice, params = project(
gmt, settings.proj_period, calibdata, temp_anomaly_year,
sl_contributor, n, contrib_name)
proj[:, n] = slr
pdata = da.DimArray(proj, axes=[settings.proj_period, realizations],
dims=["time", "runnumber"])
projection_data[contrib_name] = pdata
if not os.path.exists(settings.projected_slr_folder):
os.makedirs(settings.projected_slr_folder)
fname = "projected_slr_"+scen+"_n"+str(settings.nrealizations)+".nc"
da.Dataset(projection_data).write_nc(os.path.join(
settings.projected_slr_folder,fname))
print "Sea level projection data written to"
print settings.projected_slr_folder | gpl-3.0 |
aelaguiz/pyvotune | samples/util/pickler.py | 1 | 1908 | # -*- coding: utf-8 -*-
import pyvotune
import collections
import pyvotune.sklearn
import random
import copy
import sys
try:
import cPickle as pickle
except ImportError:
import pickle
log = pyvotune.log.logger()
def reproduce(offspring_cs, variator, rng, args):
if isinstance(variator, collections.Iterable):
for op in variator:
offspring_cs = op(random=rng, candidates=offspring_cs, args=args)
return offspring_cs
else:
return [variator(random=rng, candidates=offspring_cs, args=args)]
if __name__ == '__main__':
pyvotune.set_debug(True)
# Dummy data
n_features = 28 * 28
rng = random.Random()
#################################
# Initialize PyvoTune Generator #
#################################
gen = pyvotune.Generate(
initial_state={
'sparse': False
},
gene_pool=pyvotune.sklearn.get_classifiers(n_features, rng) +
pyvotune.sklearn.get_decomposers(n_features, rng) +
pyvotune.sklearn.get_image_features(n_features, rng) +
pyvotune.sklearn.get_preprocessors(n_features, rng),
max_length=4,
noop_frequency=0.2,
rng=rng)
args = {
'crossover_rate': 0.5,
'mutation_rate': 0.3,
'pyvotune_generator': gen
}
# Use PyvoTun variators
variators = [
pyvotune.variators.random_reset_mutation,
pyvotune.variators.param_reset_mutation,
pyvotune.variators.scramble_mutation,
pyvotune.variators.uniform_crossover,
pyvotune.variators.n_point_crossover
]
genome = gen.generate(max_retries=150)
print genome
p_genome = pickle.dumps(genome)
print p_genome
u_genome = pickle.loads(p_genome)
print pyvotune.util.side_by_side([genome, u_genome], 50)
if genome == u_genome:
print "EQUAL"
else:
print "NOT EQUAL"
| mit |
Fenrir12/Master_BCG_EEG | Cours/Machine Learning - UDEMY/Machine Learning A-Z/Part 2 - Regression/Section 5 - Multiple Linear Regression/Homework_Solutions/multiple_linear_regression.py | 1 | 1942 | # Multiple Linear Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder = LabelEncoder()
X[:, 3] = labelencoder.fit_transform(X[:, 3])
onehotencoder = OneHotEncoder(categorical_features = [3])
X = onehotencoder.fit_transform(X).toarray()
# Avoiding the Dummy Variable Trap
X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting Multiple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
# Building the optimal model using Backward Elimination
import statsmodels.formula.api as sm
X = np.append(arr = np.ones((50, 1)).astype(int), values = X, axis = 1)
X_opt = X[:, [0, 1, 2, 3, 4, 5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0, 1, 3, 4, 5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0, 3, 4, 5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0, 3, 5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0, 3]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary() | apache-2.0 |
Chaparqanatoos/kaggle-knowledge | src/main/python/wordimputation.py | 4 | 1778 | __author__ = 'namukhtar'
import IPython
import sklearn as sk
import numpy as np
import matplotlib
import nltk
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces()
print faces.DESCR
print faces.keys()
print faces.images.shape
print faces.data.shape
print faces.target.shape
print np.max(faces.data)
print np.min(faces.data)
print np.mean(faces.data)
def print_faces(images, target, top_n):
# set up the figure size in inches
fig = plt.figure(figsize=(12, 12))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(top_n):
# plot the images in a matrix of 20x20
p = fig.add_subplot(20, 20, i + 1, xticks=[], yticks=[])
p.imshow(images[i], cmap=plt.cm.bone)
# label the image with the target value
p.text(0, 14, str(target[i]))
p.text(0, 60, str(i))
print_faces(faces.images, faces.target, 20)
from sklearn.svm import SVC
svc_1 = SVC(kernel='linear')
print svc_1
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
faces.data, faces.target, test_size=0.25, random_state=0)
from sklearn.cross_validation import cross_val_score, KFold
from scipy.stats import sem
def evaluate_cross_validation(clf, X, y, K):
# create a k-fold croos validation iterator
cv = KFold(len(y), K, shuffle=True, random_state=0)
# by default the score used is the one returned by score method of the estimator (accuracy)
scores = cross_val_score(clf, X, y, cv=cv)
print scores
print ("Mean score: {0:.3f} (+/-{1:.3f})").format(
np.mean(scores), sem(scores))
evaluate_cross_validation(svc_1, X_train, y_train, 5) | apache-2.0 |
espenhgn/nest-simulator | pynest/examples/gap_junctions_inhibitory_network.py | 5 | 5989 | # -*- coding: utf-8 -*-
#
# gap_junctions_inhibitory_network.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Gap Junctions: Inhibitory network example
-----------------------------------------------
This script simulates an inhibitory network of 500 Hodgkin-Huxley neurons.
Without the gap junctions (meaning for ``gap_weight = 0.0``) the network shows
an asynchronous irregular state that is caused by the external excitatory
Poissonian drive being balanced by the inhibitory feedback within the
network. With increasing `gap_weight` the network synchronizes:
For a lower gap weight of 0.3 nS the network remains in an asynchronous
state. With a weight of 0.54 nS the network switches randomly between the
asynchronous to the synchronous state, while for a gap weight of 0.7 nS a
stable synchronous state is reached.
This example is also used as test case 2 (see Figure 9 and 10)
in [1]_.
References
~~~~~~~~~~~
.. [1] Hahne et al. (2015) A unified framework for spiking and gap-junction
interactions in distributed neuronal network simulations, Front.
Neuroinform. http://dx.doi.org/10.3389/neuro.11.012.2008
"""
import nest
import matplotlib.pyplot as plt
import numpy
n_neuron = 500
gap_per_neuron = 60
inh_per_neuron = 50
delay = 1.0
j_exc = 300.
j_inh = -50.
threads = 8
stepsize = 0.05
simtime = 501.
gap_weight = 0.3
nest.ResetKernel()
###############################################################################
# First we set the random seed, adjust the kernel settings and create
# ``hh_psc_alpha_gap`` neurons, ``spike_detector`` and ``poisson_generator``.
numpy.random.seed(1)
nest.SetKernelStatus({'resolution': 0.05,
'total_num_virtual_procs': threads,
'print_time': True,
# Settings for waveform relaxation
# 'use_wfr': False uses communication in every step
# instead of an iterative solution
'use_wfr': True,
'wfr_comm_interval': 1.0,
'wfr_tol': 0.0001,
'wfr_max_iterations': 15,
'wfr_interpolation_order': 3})
neurons = nest.Create('hh_psc_alpha_gap', n_neuron)
sd = nest.Create("spike_detector")
pg = nest.Create("poisson_generator", params={'rate': 500.0})
###############################################################################
# Each neuron shall receive ``inh_per_neuron = 50`` inhibitory synaptic inputs
# that are randomly selected from all other neurons, each with synaptic
# weight ``j_inh = -50.0`` pA and a synaptic delay of 1.0 ms. Furthermore each
# neuron shall receive an excitatory external Poissonian input of 500.0 Hz
# with synaptic weight ``j_exc = 300.0`` pA and the same delay.
# The desired connections are created with the following commands:
conn_dict = {'rule': 'fixed_indegree',
'indegree': inh_per_neuron,
'allow_autapses': False,
'allow_multapses': True}
syn_dict = {'synapse_model': 'static_synapse',
'weight': j_inh,
'delay': delay}
nest.Connect(neurons, neurons, conn_dict, syn_dict)
nest.Connect(pg, neurons, 'all_to_all',
syn_spec={'synapse_model': 'static_synapse',
'weight': j_exc,
'delay': delay})
###############################################################################
# Then the neurons are connected to the ``spike_detector`` and the initial
# membrane potential of each neuron is set randomly between -40 and -80 mV.
nest.Connect(neurons, sd)
neurons.V_m = nest.random.uniform(min=-80., max=-40.)
#######################################################################################
# Finally gap junctions are added to the network. :math:`(60*500)/2` ``gap_junction``
# connections are added randomly resulting in an average of 60 gap-junction
# connections per neuron. We must not use the ``fixed_indegree`` oder
# ``fixed_outdegree`` functionality of ``nest.Connect()`` to create the
# connections, as ``gap_junction`` connections are bidirectional connections
# and we need to make sure that the same neurons are connected in both ways.
# This is achieved by creating the connections on the Python level with the
# `random` module of the Python Standard Library and connecting the neurons
# using the ``make_symmetric`` flag for ``one_to_one`` connections.
n_connection = int(n_neuron * gap_per_neuron / 2)
neuron_list = neurons.tolist()
connections = numpy.random.choice(neuron_list, [n_connection, 2])
for source_node_id, target_node_id in connections:
nest.Connect(nest.NodeCollection([source_node_id]),
nest.NodeCollection([target_node_id]),
{'rule': 'one_to_one', 'make_symmetric': True},
{'synapse_model': 'gap_junction', 'weight': gap_weight})
###############################################################################
# In the end we start the simulation and plot the spike pattern.
nest.Simulate(simtime)
times = sd.get('events', 'times')
spikes = sd.get('events', 'senders')
n_spikes = sd.n_events
hz_rate = (1000.0 * n_spikes / simtime) / n_neuron
plt.figure(1)
plt.plot(times, spikes, 'o')
plt.title('Average spike rate (Hz): %.2f' % hz_rate)
plt.xlabel('time (ms)')
plt.ylabel('neuron no')
plt.show()
| gpl-2.0 |
raincoatrun/basemap | examples/test_rotpole.py | 3 | 2637 | from __future__ import print_function
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
nc = Dataset('wm201_Arctic_JJA_1990-2008_moyenneDesMoyennes.nc')
lats = nc.variables['lat'][:]
lons = nc.variables['lon'][:]
rlats = nc.variables['rlat'][:]
rlons = nc.variables['rlon'][:]
rlons, rlats = np.meshgrid(rlons, rlats)
data = nc.variables['air'][0,0,:,:].squeeze()
data = np.ma.masked_values(data,-999.)
rotpole = nc.variables['rotated_pole']
m = Basemap(projection='npstere',lon_0=10,boundinglat=30,resolution='c')
x,y = m(lons,lats)
m.drawcoastlines()
m.contourf(x,y,data,20)
m.drawmeridians(np.arange(-180,180,20))
m.drawparallels(np.arange(20,80,20))
m.colorbar()
plt.title('rotated pole data in polar stere map')
plt.figure()
# o_lon_p, o_lat_p: true lat/lon of pole in rotated coordinate system
# mapping to CF metadata convention:
# grid_north_pole_longitude = normalize180(180 + lon_0), where normalize180
# is a function that maps to interval [-180,180).
# grid_north_pole_latitude = o_lat_p
# north_pole_grid_longitude = o_lon_p (optional, assumed zero if not present)
def normalize180(lon):
"""Normalize lon to range [180, 180)"""
lower = -180.; upper = 180.
if lon > upper or lon == lower:
lon = lower + abs(lon + upper) % (abs(lower) + abs(upper))
if lon < lower or lon == upper:
lon = upper - abs(lon - lower) % (abs(lower) + abs(upper))
return lower if lon == upper else lon
lon_0 = normalize180(rotpole.grid_north_pole_longitude-180.)
o_lon_p = rotpole.north_pole_grid_longitude
o_lat_p = rotpole.grid_north_pole_latitude
print( rotpole )
print( 'lon_0,o_lon_p,o_lat_p=',lon_0,o_lon_p,o_lat_p)
m= Basemap(projection='rotpole',lon_0=lon_0,o_lon_p=o_lon_p,o_lat_p=o_lat_p,\
llcrnrlat = lats[0,0], urcrnrlat = lats[-1,-1],\
llcrnrlon = lons[0,0], urcrnrlon = lons[-1,-1],resolution='c')
x,y = m(lons,lats)
m.drawcoastlines()
m.contourf(x,y,data,20)
m.drawmeridians(np.arange(-180,180,20))
m.drawparallels(np.arange(20,80,20))
m.colorbar()
plt.title('rotated pole data in native map using real sphere corner lat/lons' )
plt.figure()
m= Basemap(projection='rotpole',lon_0=lon_0,o_lon_p=o_lon_p,o_lat_p=o_lat_p,\
llcrnry = rlats[0,0], urcrnry = rlats[-1,-1],\
llcrnrx = rlons[0,0], urcrnrx = rlons[-1,-1],resolution='c')
x,y = m(lons,lats)
m.drawcoastlines()
m.contourf(x,y,data,20)
m.drawmeridians(np.arange(-180,180,20))
m.drawparallels(np.arange(20,80,20))
m.colorbar()
plt.title('rotated pole data in native map using rotated sphere corner lat/lons' )
plt.show()
| gpl-2.0 |
droundy/protein | old/show_area_rating.py | 2 | 2047 | import pylab, sys, math
from pylab import *
import matplotlib.pyplot as plt
import numpy as np
import sys
import file_loader as load
dx = load.dx
f_shape = sys.argv[1]
f_param1 = sys.argv[2]
f_param2 = sys.argv[3]
f_param3 = sys.argv[4]
f_param4 = sys.argv[5]
f_param5 = sys.argv[6]
fname = "data/shape-" + str(sys.argv[1]) + "/area_rating-" + str(sys.argv[2]) + "-" + str(sys.argv[3]) + "-" + str(sys.argv[4]) + "-" + str(sys.argv[5]) + "-" + str(sys.argv[6]) + ".dat"
#print fname
A = np.loadtxt(fname, dtype = float)
8/.05 +4
#print A[:,0]
print "HERE"
width_y = 0
for i in range(len(A[:,0])):
if A[i,1]==0:
width_y +=1
width_z = len(A[:,0])/width_y
print width_y
print width_z
print len(A[:,0])
print "This place"
y = np.zeros(width_y)
z = np.zeros(width_z)
for i in range(width_y):
y[i] = i*.05
for i in range(width_z):
z[i] = i*.05
print "widths of Grid"
print width_y*dx, width_y
print width_z*dx, width_z
print "length of y"
print len(y)
Y,Z = meshgrid(y,z) #2d array of zeros
area_rating = np.zeros_like(Y)
count=0
print "HEEEEEEEEEER"
print len(Z)
for i in range(len(A[:,0])):
#print A[i,3]
count+=1
print count
print "Now here"
def unzip(A):
print len(A[:,0]-1)
print A[2,3]
for i in range(len(A[:,0])-1):
y = (round(A[i,1]/dx))
z = (round(A[i,2]/dx))
area_rating[y][z] = A[i,3]
#print area_rating[y][z], i
return area_rating
max_rating = 0
min_rating = 5.0
for i in range(len(A[:,0])):
if (A[i,3] > max_rating):
max_rating = A[i,3]
if (A[i,3] < min_rating):# and A[i,3] != 0.0):
min_rating = A[i,3]
print min_rating
area_rating = unzip(A)
mylevel = np.arange(500,800,1)
contourf(Y,Z,area_rating,cmap=plt.cm.jet,levels=mylevel)
colorbar()
savefig('./data/shape-'+f_shape+'/plots/show_area_rating-natp-'+f_shape+'-'+f_param1+'-'+f_param2+'-'+f_param3+'-'+f_param4+'-'+f_param5+'.pdf')
#this only does it for NATP right now. will need to automate for other protein types later.
print "show_area_rating plot generated."
#show()
| mit |
blaze/dask | dask/array/tests/test_rechunk.py | 1 | 27650 | from itertools import product
import warnings
import pytest
np = pytest.importorskip("numpy")
import dask
from dask.utils import funcname
from dask.array.utils import assert_eq
from dask.array.rechunk import intersect_chunks, rechunk, normalize_chunks
from dask.array.rechunk import cumdims_label, _breakpoints, _intersect_1d, _old_to_new
from dask.array.rechunk import plan_rechunk, divide_to_width, merge_to_number
import dask.array as da
def test_rechunk_internals_1():
"""Test the cumdims_label and _breakpoints and
_intersect_1d internal funcs to rechunk."""
new = cumdims_label(((1, 1, 2), (1, 5, 1)), "n")
old = cumdims_label(((4,), (1,) * 5), "o")
breaks = tuple(_breakpoints(o, n) for o, n in zip(old, new))
answer = (("o", 0), ("n", 0), ("n", 1), ("n", 2), ("o", 4), ("n", 4))
assert breaks[0] == answer
answer2 = (
("o", 0),
("n", 0),
("o", 1),
("n", 1),
("o", 2),
("o", 3),
("o", 4),
("o", 5),
("n", 6),
("n", 7),
)
assert breaks[1] == answer2
i1d = [_intersect_1d(b) for b in breaks]
answer3 = [[(0, slice(0, 1))], [(0, slice(1, 2))], [(0, slice(2, 4))]]
assert i1d[0] == answer3
answer4 = [
[(0, slice(0, 1))],
[
(1, slice(0, 1)),
(2, slice(0, 1)),
(3, slice(0, 1)),
(4, slice(0, 1)),
(5, slice(0, 1)),
],
[(5, slice(1, 2))],
]
assert i1d[1] == answer4
def test_intersect_1():
""" Convert 1 D chunks"""
old = ((10, 10, 10, 10, 10),)
new = ((25, 5, 20),)
answer = [
(((0, slice(0, 10)),), ((1, slice(0, 10)),), ((2, slice(0, 5)),)),
(((2, slice(5, 10)),),),
(((3, slice(0, 10)),), ((4, slice(0, 10)),)),
]
cross = list(intersect_chunks(old_chunks=old, new_chunks=new))
assert answer == cross
def test_intersect_2():
""" Convert 1 D chunks"""
old = ((20, 20, 20, 20, 20),)
new = ((58, 4, 20, 18),)
answer = [
(((0, slice(0, 20)),), ((1, slice(0, 20)),), ((2, slice(0, 18)),)),
(((2, slice(18, 20)),), ((3, slice(0, 2)),)),
(((3, slice(2, 20)),), ((4, slice(0, 2)),)),
(((4, slice(2, 20)),),),
]
cross = list(intersect_chunks(old_chunks=old, new_chunks=new))
assert answer == cross
def test_rechunk_1d():
"""Try rechunking a random 1d matrix"""
a = np.random.uniform(0, 1, 30)
x = da.from_array(a, chunks=((10,) * 3,))
new = ((5,) * 6,)
x2 = rechunk(x, chunks=new)
assert x2.chunks == new
assert np.all(x2.compute() == a)
def test_rechunk_2d():
"""Try rechunking a random 2d matrix"""
a = np.random.uniform(0, 1, 300).reshape((10, 30))
x = da.from_array(a, chunks=((1, 2, 3, 4), (5,) * 6))
new = ((5, 5), (15,) * 2)
x2 = rechunk(x, chunks=new)
assert x2.chunks == new
assert np.all(x2.compute() == a)
def test_rechunk_4d():
"""Try rechunking a random 4d matrix"""
old = ((5, 5),) * 4
a = np.random.uniform(0, 1, 10000).reshape((10,) * 4)
x = da.from_array(a, chunks=old)
new = ((10,),) * 4
x2 = rechunk(x, chunks=new)
assert x2.chunks == new
assert np.all(x2.compute() == a)
def test_rechunk_expand():
a = np.random.uniform(0, 1, 100).reshape((10, 10))
x = da.from_array(a, chunks=(5, 5))
y = x.rechunk(chunks=((3, 3, 3, 1), (3, 3, 3, 1)))
assert np.all(y.compute() == a)
def test_rechunk_expand2():
(a, b) = (3, 2)
orig = np.random.uniform(0, 1, a ** b).reshape((a,) * b)
for off, off2 in product(range(1, a - 1), range(1, a - 1)):
old = ((a - off, off),) * b
x = da.from_array(orig, chunks=old)
new = ((a - off2, off2),) * b
assert np.all(x.rechunk(chunks=new).compute() == orig)
if a - off - off2 > 0:
new = ((off, a - off2 - off, off2),) * b
y = x.rechunk(chunks=new).compute()
assert np.all(y == orig)
def test_rechunk_method():
""" Test rechunking can be done as a method of dask array."""
old = ((5, 2, 3),) * 4
new = ((3, 3, 3, 1),) * 4
a = np.random.uniform(0, 1, 10000).reshape((10,) * 4)
x = da.from_array(a, chunks=old)
x2 = x.rechunk(chunks=new)
assert x2.chunks == new
assert np.all(x2.compute() == a)
def test_rechunk_blockshape():
""" Test that blockshape can be used."""
new_shape, new_chunks = (10, 10), (4, 3)
new_blockdims = normalize_chunks(new_chunks, new_shape)
old_chunks = ((4, 4, 2), (3, 3, 3, 1))
a = np.random.uniform(0, 1, 100).reshape((10, 10))
x = da.from_array(a, chunks=old_chunks)
check1 = rechunk(x, chunks=new_chunks)
assert check1.chunks == new_blockdims
assert np.all(check1.compute() == a)
def test_dtype():
x = da.ones(5, chunks=(2,))
assert x.rechunk(chunks=(1,)).dtype == x.dtype
def test_rechunk_with_dict():
x = da.ones((24, 24), chunks=(4, 8))
y = x.rechunk(chunks={0: 12})
assert y.chunks == ((12, 12), (8, 8, 8))
x = da.ones((24, 24), chunks=(4, 8))
y = x.rechunk(chunks={0: (12, 12)})
assert y.chunks == ((12, 12), (8, 8, 8))
x = da.ones((24, 24), chunks=(4, 8))
y = x.rechunk(chunks={0: -1})
assert y.chunks == ((24,), (8, 8, 8))
def test_rechunk_with_empty_input():
x = da.ones((24, 24), chunks=(4, 8))
assert x.rechunk(chunks={}).chunks == x.chunks
pytest.raises(ValueError, lambda: x.rechunk(chunks=()))
def test_rechunk_with_null_dimensions():
x = da.from_array(np.ones((24, 24)), chunks=(4, 8))
assert x.rechunk(chunks=(None, 4)).chunks == da.ones((24, 24), chunks=(4, 4)).chunks
def test_rechunk_with_integer():
x = da.from_array(np.arange(5), chunks=4)
y = x.rechunk(3)
assert y.chunks == ((3, 2),)
assert (x.compute() == y.compute()).all()
def test_rechunk_0d():
a = np.array(42)
x = da.from_array(a, chunks=())
y = x.rechunk(())
assert y.chunks == ()
assert y.compute() == a
@pytest.mark.parametrize(
"arr", [da.array([]), da.array([[], []]), da.array([[[]], [[]]])]
)
def test_rechunk_empty_array(arr):
arr.rechunk()
assert arr.size == 0
def test_rechunk_empty():
x = da.ones((0, 10), chunks=(5, 5))
y = x.rechunk((2, 2))
assert y.chunks == ((0,), (2,) * 5)
assert_eq(x, y)
def test_rechunk_zero_dim_array():
x = da.zeros((4, 0), chunks=3)
y = x.rechunk({0: 4})
assert y.chunks == ((4,), (0,))
assert_eq(x, y)
def test_rechunk_zero_dim_array_II():
x = da.zeros((4, 0, 6, 10), chunks=3)
y = x.rechunk({0: 4, 2: 2})
assert y.chunks == ((4,), (0,), (2, 2, 2), (3, 3, 3, 1))
assert_eq(x, y)
def test_rechunk_same():
x = da.ones((24, 24), chunks=(4, 8))
y = x.rechunk(x.chunks)
assert x is y
def test_rechunk_with_zero_placeholders():
x = da.ones((24, 24), chunks=((12, 12), (24, 0)))
y = da.ones((24, 24), chunks=((12, 12), (12, 12)))
y = y.rechunk(((12, 12), (24, 0)))
assert x.chunks == y.chunks
def test_rechunk_minus_one():
x = da.ones((24, 24), chunks=(4, 8))
y = x.rechunk((-1, 8))
assert y.chunks == ((24,), (8, 8, 8))
assert_eq(x, y)
def test_rechunk_intermediates():
x = da.random.normal(10, 0.1, (10, 10), chunks=(10, 1))
y = x.rechunk((1, 10))
assert len(y.dask) > 30
def test_divide_to_width():
chunks = divide_to_width((8, 9, 10), 10)
assert chunks == (8, 9, 10)
chunks = divide_to_width((8, 2, 9, 10, 11, 12), 4)
# Note how 9 gives (3, 3, 3), not (4, 4, 1) or whatever
assert chunks == (4, 4, 2, 3, 3, 3, 3, 3, 4, 3, 4, 4, 4, 4, 4)
def test_merge_to_number():
chunks = merge_to_number((10,) * 4, 5)
assert chunks == (10, 10, 10, 10)
chunks = merge_to_number((10,) * 4, 4)
assert chunks == (10, 10, 10, 10)
chunks = merge_to_number((10,) * 4, 3)
assert chunks == (20, 10, 10)
chunks = merge_to_number((10,) * 4, 2)
assert chunks == (20, 20)
chunks = merge_to_number((10,) * 4, 1)
assert chunks == (40,)
chunks = merge_to_number((10,) * 10, 2)
assert chunks == (50,) * 2
chunks = merge_to_number((10,) * 10, 3)
assert chunks == (40, 30, 30)
chunks = merge_to_number((5, 1, 1, 15, 10), 4)
assert chunks == (5, 2, 15, 10)
chunks = merge_to_number((5, 1, 1, 15, 10), 3)
assert chunks == (7, 15, 10)
chunks = merge_to_number((5, 1, 1, 15, 10), 2)
assert chunks == (22, 10)
chunks = merge_to_number((5, 1, 1, 15, 10), 1)
assert chunks == (32,)
chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 6)
assert chunks == (2, 1, 1, 3, 1, 1)
chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 5)
assert chunks == (2, 2, 3, 1, 1)
chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 4)
assert chunks == (2, 2, 3, 2)
chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 3)
assert chunks == (4, 3, 2)
chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 2)
assert chunks == (4, 5)
chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 1)
assert chunks == (9,)
def _plan(old_chunks, new_chunks, itemsize=1, block_size_limit=1e7, threshold=4):
return plan_rechunk(
old_chunks,
new_chunks,
itemsize=itemsize,
block_size_limit=block_size_limit,
threshold=threshold,
)
def _assert_steps(steps, expected):
assert len(steps) == len(expected)
assert steps == expected
def test_plan_rechunk():
c = (20,) * 2 # coarse
f = (2,) * 20 # fine
nc = (float("nan"),) * 2 # nan-coarse
nf = (float("nan"),) * 20 # nan-fine
# Trivial cases
steps = _plan((), ())
_assert_steps(steps, [()])
steps = _plan((c, ()), (f, ()))
_assert_steps(steps, [(f, ())])
# No intermediate required
steps = _plan((c,), (f,))
_assert_steps(steps, [(f,)])
steps = _plan((f,), (c,))
_assert_steps(steps, [(c,)])
steps = _plan((c, c), (f, f))
_assert_steps(steps, [(f, f)])
steps = _plan((f, f), (c, c))
_assert_steps(steps, [(c, c)])
steps = _plan((f, c), (c, c))
_assert_steps(steps, [(c, c)])
steps = _plan((c, c, c, c), (c, f, c, c))
_assert_steps(steps, [(c, f, c, c)])
# An intermediate is used to reduce graph size
steps = _plan((f, c), (c, f))
_assert_steps(steps, [(c, c), (c, f)])
steps = _plan((c + c, c + f), (f + f, c + c))
_assert_steps(steps, [(c + c, c + c), (f + f, c + c)])
# Same, with unknown dim
steps = _plan((nc + nf, c + c, c + f), (nc + nf, f + f, c + c))
_assert_steps(steps, steps)
# Regression test for #5908
steps = _plan((c, c), (f, f), threshold=1)
_assert_steps(steps, [(f, f)])
# Just at the memory limit => an intermediate is used
steps = _plan((f, c), (c, f), block_size_limit=400)
_assert_steps(steps, [(c, c), (c, f)])
# Hitting the memory limit => partial merge
m = (10,) * 4 # mid
steps = _plan((f, c), (c, f), block_size_limit=399)
_assert_steps(steps, [(m, c), (c, f)])
steps2 = _plan((f, c), (c, f), block_size_limit=3999, itemsize=10)
_assert_steps(steps2, steps)
# Larger problem size => more intermediates
c = (1000,) * 2 # coarse
f = (2,) * 1000 # fine
steps = _plan((f, c), (c, f), block_size_limit=99999)
assert len(steps) == 3
assert steps[-1] == (c, f)
for i in range(len(steps) - 1):
prev = steps[i]
succ = steps[i + 1]
# Merging on the first dim, splitting on the second dim
assert len(succ[0]) <= len(prev[0]) / 2.0
assert len(succ[1]) >= len(prev[1]) * 2.0
def test_plan_rechunk_5d():
# 5d problem
c = (10,) * 1 # coarse
f = (1,) * 10 # fine
steps = _plan((c, c, c, c, c), (f, f, f, f, f))
_assert_steps(steps, [(f, f, f, f, f)])
steps = _plan((f, f, f, f, c), (c, c, c, f, f))
_assert_steps(steps, [(c, c, c, f, c), (c, c, c, f, f)])
# Only 1 dim can be merged at first
steps = _plan((c, c, f, f, c), (c, c, c, f, f), block_size_limit=2e4)
_assert_steps(steps, [(c, c, c, f, c), (c, c, c, f, f)])
def test_plan_rechunk_heterogeneous():
c = (10,) * 1 # coarse
f = (1,) * 10 # fine
cf = c + f
cc = c + c
ff = f + f
fc = f + c
# No intermediate required
steps = _plan((cc, cf), (ff, ff))
_assert_steps(steps, [(ff, ff)])
steps = _plan((cf, fc), (ff, cf))
_assert_steps(steps, [(ff, cf)])
# An intermediate is used to reduce graph size
steps = _plan((cc, cf), (ff, cc))
_assert_steps(steps, [(cc, cc), (ff, cc)])
steps = _plan((cc, cf, cc), (ff, cc, cf))
_assert_steps(steps, [(cc, cc, cc), (ff, cc, cf)])
# Imposing a memory limit => the first intermediate is constrained:
# * cc -> ff would increase the graph size: no
# * ff -> cf would increase the block size too much: no
# * cf -> cc fits the bill (graph size /= 10, block size neutral)
# * cf -> fc also fits the bill (graph size and block size neutral)
steps = _plan((cc, ff, cf), (ff, cf, cc), block_size_limit=100)
_assert_steps(steps, [(cc, ff, cc), (ff, cf, cc)])
def test_plan_rechunk_asymmetric():
a = ((1,) * 1000, (80000000,))
b = ((1000,), (80000,) * 1000)
steps = plan_rechunk(a, b, itemsize=8)
assert len(steps) > 1
x = da.ones((1000, 80000000), chunks=(1, 80000000))
y = x.rechunk((1000, x.shape[1] // 1000))
assert len(y.dask) < 100000
def test_rechunk_warning():
N = 20
x = da.random.normal(size=(N, N, 100), chunks=(1, N, 100))
with warnings.catch_warnings(record=True) as w:
x = x.rechunk((N, 1, 100))
assert not w
@pytest.mark.parametrize(
"shape,chunks", [[(4,), (2,)], [(4, 4), (2, 2)], [(4, 4), (4, 2)]]
)
def test_dont_concatenate_single_chunks(shape, chunks):
x = da.ones(shape, chunks=shape)
y = x.rechunk(chunks)
dsk = dict(y.dask)
assert not any(
funcname(task[0]).startswith("concat")
for task in dsk.values()
if dask.istask(task)
)
def test_intersect_nan():
old_chunks = ((float("nan"), float("nan")), (8,))
new_chunks = ((float("nan"), float("nan")), (4, 4))
result = list(intersect_chunks(old_chunks, new_chunks))
expected = [
(((0, slice(0, None, None)), (0, slice(0, 4, None))),),
(((0, slice(0, None, None)), (0, slice(4, 8, None))),),
(((1, slice(0, None, None)), (0, slice(0, 4, None))),),
(((1, slice(0, None, None)), (0, slice(4, 8, None))),),
]
assert result == expected
def test_intersect_nan_single():
old_chunks = ((float("nan"),), (10,))
new_chunks = ((float("nan"),), (5, 5))
result = list(intersect_chunks(old_chunks, new_chunks))
expected = [
(((0, slice(0, None, None)), (0, slice(0, 5, None))),),
(((0, slice(0, None, None)), (0, slice(5, 10, None))),),
]
assert result == expected
def test_intersect_nan_long():
old_chunks = (tuple([float("nan")] * 4), (10,))
new_chunks = (tuple([float("nan")] * 4), (5, 5))
result = list(intersect_chunks(old_chunks, new_chunks))
expected = [
(((0, slice(0, None, None)), (0, slice(0, 5, None))),),
(((0, slice(0, None, None)), (0, slice(5, 10, None))),),
(((1, slice(0, None, None)), (0, slice(0, 5, None))),),
(((1, slice(0, None, None)), (0, slice(5, 10, None))),),
(((2, slice(0, None, None)), (0, slice(0, 5, None))),),
(((2, slice(0, None, None)), (0, slice(5, 10, None))),),
(((3, slice(0, None, None)), (0, slice(0, 5, None))),),
(((3, slice(0, None, None)), (0, slice(5, 10, None))),),
]
assert result == expected
def test_rechunk_unknown_from_pandas():
dd = pytest.importorskip("dask.dataframe")
pd = pytest.importorskip("pandas")
arr = np.random.randn(50, 10)
x = dd.from_pandas(pd.DataFrame(arr), 2).values
result = x.rechunk((None, (5, 5)))
assert np.isnan(x.chunks[0]).all()
assert np.isnan(result.chunks[0]).all()
assert result.chunks[1] == (5, 5)
expected = da.from_array(arr, chunks=((25, 25), (10,))).rechunk((None, (5, 5)))
assert_eq(result, expected)
def test_rechunk_unknown_from_array():
dd = pytest.importorskip("dask.dataframe")
# pd = pytest.importorskip('pandas')
x = dd.from_array(da.ones(shape=(4, 4), chunks=(2, 2))).values
# result = x.rechunk({1: 5})
result = x.rechunk((None, 4))
assert np.isnan(x.chunks[0]).all()
assert np.isnan(result.chunks[0]).all()
assert x.chunks[1] == (4,)
assert_eq(x, result)
@pytest.mark.parametrize(
"x, chunks",
[
(da.ones(shape=(50, 10), chunks=(25, 10)), (None, 5)),
(da.ones(shape=(50, 10), chunks=(25, 10)), {1: 5}),
(da.ones(shape=(50, 10), chunks=(25, 10)), (None, (5, 5))),
(da.ones(shape=(1000, 10), chunks=(5, 10)), (None, 5)),
(da.ones(shape=(1000, 10), chunks=(5, 10)), {1: 5}),
(da.ones(shape=(1000, 10), chunks=(5, 10)), (None, (5, 5))),
(da.ones(shape=(10, 10), chunks=(10, 10)), (None, 5)),
(da.ones(shape=(10, 10), chunks=(10, 10)), {1: 5}),
(da.ones(shape=(10, 10), chunks=(10, 10)), (None, (5, 5))),
(da.ones(shape=(10, 10), chunks=(10, 2)), (None, 5)),
(da.ones(shape=(10, 10), chunks=(10, 2)), {1: 5}),
(da.ones(shape=(10, 10), chunks=(10, 2)), (None, (5, 5))),
],
)
def test_rechunk_unknown(x, chunks):
dd = pytest.importorskip("dask.dataframe")
y = dd.from_array(x).values
result = y.rechunk(chunks)
expected = x.rechunk(chunks)
assert_chunks_match(result.chunks, expected.chunks)
assert_eq(result, expected)
def test_rechunk_unknown_explicit():
dd = pytest.importorskip("dask.dataframe")
x = da.ones(shape=(10, 10), chunks=(5, 2))
y = dd.from_array(x).values
result = y.rechunk(((float("nan"), float("nan")), (5, 5)))
expected = x.rechunk((None, (5, 5)))
assert_chunks_match(result.chunks, expected.chunks)
assert_eq(result, expected)
def assert_chunks_match(left, right):
for x, y in zip(left, right):
if np.isnan(x).any():
assert np.isnan(x).all()
else:
assert x == y
def test_rechunk_unknown_raises():
dd = pytest.importorskip("dask.dataframe")
x = dd.from_array(da.ones(shape=(10, 10), chunks=(5, 5))).values
with pytest.raises(ValueError):
x.rechunk((None, (5, 5, 5)))
def test_old_to_new_single():
old = ((float("nan"), float("nan")), (8,))
new = ((float("nan"), float("nan")), (4, 4))
result = _old_to_new(old, new)
expected = [
[[(0, slice(0, None, None))], [(1, slice(0, None, None))]],
[[(0, slice(0, 4, None))], [(0, slice(4, 8, None))]],
]
assert result == expected
def test_old_to_new():
old = ((float("nan"),), (10,))
new = ((float("nan"),), (5, 5))
result = _old_to_new(old, new)
expected = [
[[(0, slice(0, None, None))]],
[[(0, slice(0, 5, None))], [(0, slice(5, 10, None))]],
]
assert result == expected
def test_old_to_new_large():
old = (tuple([float("nan")] * 4), (10,))
new = (tuple([float("nan")] * 4), (5, 5))
result = _old_to_new(old, new)
expected = [
[
[(0, slice(0, None, None))],
[(1, slice(0, None, None))],
[(2, slice(0, None, None))],
[(3, slice(0, None, None))],
],
[[(0, slice(0, 5, None))], [(0, slice(5, 10, None))]],
]
assert result == expected
def test_changing_raises():
nan = float("nan")
with pytest.raises(ValueError) as record:
_old_to_new(((nan, nan), (4, 4)), ((nan, nan, nan), (4, 4)))
assert "unchanging" in str(record.value)
def test_old_to_new_known():
old = ((10, 10, 10, 10, 10),)
new = ((25, 5, 20),)
result = _old_to_new(old, new)
expected = [
[
[(0, slice(0, 10, None)), (1, slice(0, 10, None)), (2, slice(0, 5, None))],
[(2, slice(5, 10, None))],
[(3, slice(0, 10, None)), (4, slice(0, 10, None))],
]
]
assert result == expected
def test_rechunk_zero_dim():
da = pytest.importorskip("dask.array")
x = da.ones((0, 10, 100), chunks=(0, 10, 10)).rechunk((0, 10, 50))
assert len(x.compute()) == 0
def test_rechunk_empty_chunks():
x = da.zeros((7, 24), chunks=((7,), (10, 0, 0, 9, 0, 5)))
y = x.rechunk((2, 3))
assert_eq(x, y)
def test_rechunk_avoid_needless_chunking():
x = da.ones(16, chunks=2)
y = x.rechunk(8)
dsk = y.__dask_graph__()
assert len(dsk) <= 8 + 2
@pytest.mark.parametrize(
"shape,chunks,bs,expected",
[
(100, 1, 10, (10,) * 10),
(100, 50, 10, (10,) * 10),
(100, 100, 10, (10,) * 10),
(20, 7, 10, (7, 7, 6)),
(20, (1, 1, 1, 1, 6, 2, 1, 7), 5, (5, 5, 5, 5)),
],
)
def test_rechunk_auto_1d(shape, chunks, bs, expected):
x = da.ones(shape, chunks=(chunks,))
y = x.rechunk({0: "auto"}, block_size_limit=bs * x.dtype.itemsize)
assert y.chunks == (expected,)
def test_rechunk_auto_2d():
x = da.ones((20, 20), chunks=(2, 2))
y = x.rechunk({0: -1, 1: "auto"}, block_size_limit=20 * x.dtype.itemsize)
assert y.chunks == ((20,), (1,) * 20)
x = da.ones((20, 20), chunks=(2, 2))
y = x.rechunk((-1, "auto"), block_size_limit=80 * x.dtype.itemsize)
assert y.chunks == ((20,), (4,) * 5)
x = da.ones((20, 20), chunks=((2, 2)))
y = x.rechunk({0: "auto"}, block_size_limit=20 * x.dtype.itemsize)
assert y.chunks[1] == x.chunks[1]
assert y.chunks[0] == (10, 10)
x = da.ones((20, 20), chunks=((2,) * 10, (2, 2, 2, 2, 2, 5, 5)))
y = x.rechunk({0: "auto"}, block_size_limit=20 * x.dtype.itemsize)
assert y.chunks[1] == x.chunks[1]
assert y.chunks[0] == (4, 4, 4, 4, 4) # limited by largest
def test_rechunk_auto_3d():
x = da.ones((20, 20, 20), chunks=((2, 2, 2)))
y = x.rechunk({0: "auto", 1: "auto"}, block_size_limit=200 * x.dtype.itemsize)
assert y.chunks[2] == x.chunks[2]
assert y.chunks[0] == (10, 10)
assert y.chunks[1] == (10, 10) # even split
@pytest.mark.parametrize("n", [100, 1000])
def test_rechunk_auto_image_stack(n):
with dask.config.set({"array.chunk-size": "10MiB"}):
x = da.ones((n, 1000, 1000), chunks=(1, 1000, 1000), dtype="uint8")
y = x.rechunk("auto")
assert y.chunks == ((10,) * (n // 10), (1000,), (1000,))
assert y.rechunk("auto").chunks == y.chunks # idempotent
with dask.config.set({"array.chunk-size": "7MiB"}):
z = x.rechunk("auto")
assert z.chunks == ((5,) * (n // 5), (1000,), (1000,))
with dask.config.set({"array.chunk-size": "1MiB"}):
x = da.ones((n, 1000, 1000), chunks=(1, 1000, 1000), dtype="float64")
z = x.rechunk("auto")
assert z.chunks == ((1,) * n, (250,) * 4, (250,) * 4)
def test_rechunk_down():
with dask.config.set({"array.chunk-size": "10MiB"}):
x = da.ones((100, 1000, 1000), chunks=(1, 1000, 1000), dtype="uint8")
y = x.rechunk("auto")
assert y.chunks == ((10,) * 10, (1000,), (1000,))
with dask.config.set({"array.chunk-size": "1MiB"}):
z = y.rechunk("auto")
assert z.chunks == ((5,) * 20, (250,) * 4, (250,) * 4)
with dask.config.set({"array.chunk-size": "1MiB"}):
z = y.rechunk({0: "auto"})
assert z.chunks == ((1,) * 100, (1000,), (1000,))
z = y.rechunk({1: "auto"})
assert z.chunks == ((10,) * 10, (100,) * 10, (1000,))
def test_rechunk_zero():
with dask.config.set({"array.chunk-size": "1B"}):
x = da.ones(10, chunks=(5,))
y = x.rechunk("auto")
assert y.chunks == ((1,) * 10,)
def test_rechunk_bad_keys():
x = da.zeros((2, 3, 4), chunks=1)
assert x.rechunk({-1: 4}).chunks == ((1, 1), (1, 1, 1), (4,))
assert x.rechunk({-x.ndim: 2}).chunks == ((2,), (1, 1, 1), (1, 1, 1, 1))
with pytest.raises(TypeError) as info:
x.rechunk({"blah": 4})
assert "blah" in str(info.value)
with pytest.raises(ValueError) as info:
x.rechunk({100: 4})
assert "100" in str(info.value)
with pytest.raises(ValueError) as info:
x.rechunk({-100: 4})
assert "-100" in str(info.value)
def test_balance_basics():
arr_len = 220
x = da.from_array(np.arange(arr_len), chunks=100)
balanced = x.rechunk(chunks=100, balance=True)
unbalanced = x.rechunk(chunks=100, balance=False)
assert unbalanced.chunks[0] == (100, 100, 20)
assert balanced.chunks[0] == (110, 110)
def test_balance_chunks_unchanged():
arr_len = 220
x = da.from_array(np.arange(arr_len))
balanced = x.rechunk(chunks=100, balance=True)
unbalanced = x.rechunk(chunks=100, balance=False)
assert unbalanced.chunks[0] == (100, 100, 20)
assert balanced.chunks[0] == (110, 110)
def test_balance_small():
arr_len = 13
x = da.from_array(np.arange(arr_len))
balanced = x.rechunk(chunks=4, balance=True)
unbalanced = x.rechunk(chunks=4, balance=False)
assert balanced.chunks[0] == (5, 5, 3)
assert unbalanced.chunks[0] == (4, 4, 4, 1)
arr_len = 7
x = da.from_array(np.arange(arr_len))
balanced = x.rechunk(chunks=3, balance=True)
unbalanced = x.rechunk(chunks=3, balance=False)
assert balanced.chunks[0] == (4, 3)
assert unbalanced.chunks[0] == (3, 3, 1)
def test_balance_n_chunks_size():
arr_len = 100
n_chunks = 8
x = da.from_array(np.arange(arr_len))
balanced = x.rechunk(chunks=arr_len // n_chunks, balance=True)
unbalanced = x.rechunk(chunks=arr_len // n_chunks, balance=False)
assert balanced.chunks[0] == (13,) * 7 + (9,)
assert unbalanced.chunks[0] == (12,) * 8 + (4,)
def test_balance_raises():
arr_len = 100
n_chunks = 11
x = da.from_array(np.arange(arr_len))
with pytest.warns(UserWarning, match="Try increasing the chunk size"):
balanced = x.rechunk(chunks=arr_len // n_chunks, balance=True)
unbalanced = x.rechunk(chunks=arr_len // n_chunks, balance=False)
assert balanced.chunks == unbalanced.chunks
n_chunks = 10
x.rechunk(chunks=arr_len // n_chunks, balance=True)
def test_balance_basics_2d():
N = 210
x = da.from_array(np.random.uniform(size=(N, N)))
balanced = x.rechunk(chunks=(100, 100), balance=True)
unbalanced = x.rechunk(chunks=(100, 100), balance=False)
assert unbalanced.chunks == ((100, 100, 10), (100, 100, 10))
assert balanced.chunks == ((105, 105), (105, 105))
def test_balance_2d_negative_dimension():
N = 210
x = da.from_array(np.random.uniform(size=(N, N)))
balanced = x.rechunk(chunks=(100, -1), balance=True)
unbalanced = x.rechunk(chunks=(100, -1), balance=False)
assert unbalanced.chunks == ((100, 100, 10), (N,))
assert balanced.chunks == ((105, 105), (N,))
def test_balance_different_inputs():
N = 210
x = da.from_array(np.random.uniform(size=(N, N)))
balanced = x.rechunk(chunks=("10MB", -1), balance=True)
unbalanced = x.rechunk(chunks=("10MB", -1), balance=False)
assert balanced.chunks == unbalanced.chunks
assert balanced.chunks[1] == (N,)
def test_balance_split_into_n_chunks():
# Some prime numbers around 1000
array_lens = [
991,
997,
1009,
1013,
1019,
1021,
1031,
1033,
1039,
1049,
1051,
1061,
1063,
1069,
]
for N in array_lens:
for nchunks in range(1, 20):
x = da.from_array(np.random.uniform(size=N))
y = x.rechunk(chunks=len(x) // nchunks, balance=True)
assert len(y.chunks[0]) == nchunks
| bsd-3-clause |
AnasGhrab/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
alejamp/ardupilot | libraries/AP_Math/tools/geodesic_grid/plot.py | 110 | 2876 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import icosahedron as ico
import grid
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-2, 2)
ax.set_ylim3d(-2, 2)
ax.set_zlim3d(-2, 2)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
added_polygons = set()
added_sections = set()
def polygons(polygons):
for p in polygons:
polygon(p)
def polygon(polygon):
added_polygons.add(polygon)
def section(s):
added_sections.add(s)
def sections(sections):
for s in sections:
section(s)
def show(subtriangles=False):
polygons = []
facecolors = []
triangles_indexes = set()
subtriangle_facecolors = (
'#CCCCCC',
'#CCE5FF',
'#E5FFCC',
'#FFCCCC',
)
if added_sections:
subtriangles = True
for p in added_polygons:
try:
i = ico.triangles.index(p)
except ValueError:
polygons.append(p)
continue
if subtriangles:
sections(range(i * 4, i * 4 + 4))
else:
triangles_indexes.add(i)
polygons.append(p)
facecolors.append('#DDDDDD')
for s in added_sections:
triangles_indexes.add(int(s / 4))
subtriangle_index = s % 4
polygons.append(grid.section_triangle(s))
facecolors.append(subtriangle_facecolors[subtriangle_index])
ax.add_collection3d(Poly3DCollection(
polygons,
facecolors=facecolors,
edgecolors="#777777",
))
for i in triangles_indexes:
t = ico.triangles[i]
mx = my = mz = 0
for x, y, z in t:
mx += x
my += y
mz += z
ax.text(mx / 2.6, my / 2.6, mz / 2.6, i, color='#444444')
if subtriangles:
ax.legend(
handles=tuple(
mpatches.Patch(color=c, label='Sub-triangle #%d' % i)
for i, c in enumerate(subtriangle_facecolors)
),
)
plt.show()
| gpl-3.0 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/stats/ols.py | 2 | 40430 | """
Ordinary least squares regression
"""
# pylint: disable-msg=W0201
# flake8: noqa
from pandas.compat import zip, range, StringIO
from itertools import starmap
from pandas import compat
import numpy as np
from pandas.core.api import DataFrame, Series, isnull
from pandas.core.base import StringMixin
from pandas.core.common import _ensure_float64
from pandas.core.index import MultiIndex
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly
import pandas.stats.common as scom
import pandas.stats.math as math
import pandas.stats.moments as moments
_FP_ERR = 1e-8
class OLS(StringMixin):
"""
Runs a full sample ordinary least squares regression.
Parameters
----------
y : Series
x : Series, DataFrame, dict of Series
intercept : bool
True if you want an intercept.
weights : array-like, optional
1d array of weights. If you supply 1/W then the variables are pre-
multiplied by 1/sqrt(W). If no weights are supplied the default value
is 1 and WLS reults are the same as OLS.
nw_lags : None or int
Number of Newey-West lags.
nw_overlap : boolean, default False
Assume data is overlapping when computing Newey-West estimator
"""
_panel_model = False
def __init__(self, y, x, intercept=True, weights=None, nw_lags=None,
nw_overlap=False):
import warnings
warnings.warn("The pandas.stats.ols module is deprecated and will be "
"removed in a future version. We refer to external packages "
"like statsmodels, see some examples here: http://statsmodels.sourceforge.net/stable/regression.html",
FutureWarning, stacklevel=4)
try:
import statsmodels.api as sm
except ImportError:
import scikits.statsmodels.api as sm
self._x_orig = x
self._y_orig = y
self._weights_orig = weights
self._intercept = intercept
self._nw_lags = nw_lags
self._nw_overlap = nw_overlap
(self._y, self._x, self._weights, self._x_filtered,
self._index, self._time_has_obs) = self._prepare_data()
if self._weights is not None:
self._x_trans = self._x.mul(np.sqrt(self._weights), axis=0)
self._y_trans = self._y * np.sqrt(self._weights)
self.sm_ols = sm.WLS(self._y.get_values(),
self._x.get_values(),
weights=self._weights.values).fit()
else:
self._x_trans = self._x
self._y_trans = self._y
self.sm_ols = sm.OLS(self._y.get_values(),
self._x.get_values()).fit()
def _prepare_data(self):
"""
Cleans the input for single OLS.
Parameters
----------
lhs: Series
Dependent variable in the regression.
rhs: dict, whose values are Series, DataFrame, or dict
Explanatory variables of the regression.
Returns
-------
Series, DataFrame
Cleaned lhs and rhs
"""
(filt_lhs, filt_rhs, filt_weights,
pre_filt_rhs, index, valid) = _filter_data(self._y_orig, self._x_orig,
self._weights_orig)
if self._intercept:
filt_rhs['intercept'] = 1.
pre_filt_rhs['intercept'] = 1.
if hasattr(filt_weights, 'to_dense'):
filt_weights = filt_weights.to_dense()
return (filt_lhs, filt_rhs, filt_weights,
pre_filt_rhs, index, valid)
@property
def nobs(self):
return self._nobs
@property
def _nobs(self):
return len(self._y)
@property
def nw_lags(self):
return self._nw_lags
@property
def x(self):
"""Returns the filtered x used in the regression."""
return self._x
@property
def y(self):
"""Returns the filtered y used in the regression."""
return self._y
@cache_readonly
def _beta_raw(self):
"""Runs the regression and returns the beta."""
return self.sm_ols.params
@cache_readonly
def beta(self):
"""Returns the betas in Series form."""
return Series(self._beta_raw, index=self._x.columns)
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
return math.rank(self._x.values)
@cache_readonly
def df(self):
"""Returns the degrees of freedom.
This equals the rank of the X matrix.
"""
return self._df_raw
@cache_readonly
def _df_model_raw(self):
"""Returns the raw model degrees of freedom."""
return self.sm_ols.df_model
@cache_readonly
def df_model(self):
"""Returns the degrees of freedom of the model."""
return self._df_model_raw
@cache_readonly
def _df_resid_raw(self):
"""Returns the raw residual degrees of freedom."""
return self.sm_ols.df_resid
@cache_readonly
def df_resid(self):
"""Returns the degrees of freedom of the residuals."""
return self._df_resid_raw
@cache_readonly
def _f_stat_raw(self):
"""Returns the raw f-stat value."""
from scipy.stats import f
cols = self._x.columns
if self._nw_lags is None:
F = self._r2_raw / (self._r2_raw - self._r2_adj_raw)
q = len(cols)
if 'intercept' in cols:
q -= 1
shape = q, self.df_resid
p_value = 1 - f.cdf(F, shape[0], shape[1])
return F, shape, p_value
k = len(cols)
R = np.eye(k)
r = np.zeros((k, 1))
try:
intercept = cols.get_loc('intercept')
R = np.concatenate((R[0: intercept], R[intercept + 1:]))
r = np.concatenate((r[0: intercept], r[intercept + 1:]))
except KeyError:
# no intercept
pass
return math.calc_F(R, r, self._beta_raw, self._var_beta_raw,
self._nobs, self.df)
@cache_readonly
def f_stat(self):
"""Returns the f-stat value."""
return f_stat_to_dict(self._f_stat_raw)
def f_test(self, hypothesis):
"""Runs the F test, given a joint hypothesis. The hypothesis is
represented by a collection of equations, in the form
A*x_1+B*x_2=C
You must provide the coefficients even if they're 1. No spaces.
The equations can be passed as either a single string or a
list of strings.
Examples
--------
o = ols(...)
o.f_test('1*x1+2*x2=0,1*x3=0')
o.f_test(['1*x1+2*x2=0','1*x3=0'])
"""
x_names = self._x.columns
R = []
r = []
if isinstance(hypothesis, str):
eqs = hypothesis.split(',')
elif isinstance(hypothesis, list):
eqs = hypothesis
else: # pragma: no cover
raise Exception('hypothesis must be either string or list')
for equation in eqs:
row = np.zeros(len(x_names))
lhs, rhs = equation.split('=')
for s in lhs.split('+'):
ss = s.split('*')
coeff = float(ss[0])
x_name = ss[1]
if x_name not in x_names:
raise Exception('no coefficient named %s' % x_name)
idx = x_names.get_loc(x_name)
row[idx] = coeff
rhs = float(rhs)
R.append(row)
r.append(rhs)
R = np.array(R)
q = len(r)
r = np.array(r).reshape(q, 1)
result = math.calc_F(R, r, self._beta_raw, self._var_beta_raw,
self._nobs, self.df)
return f_stat_to_dict(result)
@cache_readonly
def _p_value_raw(self):
"""Returns the raw p values."""
from scipy.stats import t
return 2 * t.sf(np.fabs(self._t_stat_raw),
self._df_resid_raw)
@cache_readonly
def p_value(self):
"""Returns the p values."""
return Series(self._p_value_raw, index=self.beta.index)
@cache_readonly
def _r2_raw(self):
"""Returns the raw r-squared values."""
if self._use_centered_tss:
return 1 - self.sm_ols.ssr / self.sm_ols.centered_tss
else:
return 1 - self.sm_ols.ssr / self.sm_ols.uncentered_tss
@property
def _use_centered_tss(self):
# has_intercept = np.abs(self._resid_raw.sum()) < _FP_ERR
return self._intercept
@cache_readonly
def r2(self):
"""Returns the r-squared values."""
return self._r2_raw
@cache_readonly
def _r2_adj_raw(self):
"""Returns the raw r-squared adjusted values."""
return self.sm_ols.rsquared_adj
@cache_readonly
def r2_adj(self):
"""Returns the r-squared adjusted values."""
return self._r2_adj_raw
@cache_readonly
def _resid_raw(self):
"""Returns the raw residuals."""
return self.sm_ols.resid
@cache_readonly
def resid(self):
"""Returns the residuals."""
return Series(self._resid_raw, index=self._x.index)
@cache_readonly
def _rmse_raw(self):
"""Returns the raw rmse values."""
return np.sqrt(self.sm_ols.mse_resid)
@cache_readonly
def rmse(self):
"""Returns the rmse value."""
return self._rmse_raw
@cache_readonly
def _std_err_raw(self):
"""Returns the raw standard err values."""
return np.sqrt(np.diag(self._var_beta_raw))
@cache_readonly
def std_err(self):
"""Returns the standard err values of the betas."""
return Series(self._std_err_raw, index=self.beta.index)
@cache_readonly
def _t_stat_raw(self):
"""Returns the raw t-stat value."""
return self._beta_raw / self._std_err_raw
@cache_readonly
def t_stat(self):
"""Returns the t-stat values of the betas."""
return Series(self._t_stat_raw, index=self.beta.index)
@cache_readonly
def _var_beta_raw(self):
"""
Returns the raw covariance of beta.
"""
x = self._x.values
y = self._y.values
xx = np.dot(x.T, x)
if self._nw_lags is None:
return math.inv(xx) * (self._rmse_raw ** 2)
else:
resid = y - np.dot(x, self._beta_raw)
m = (x.T * resid).T
xeps = math.newey_west(m, self._nw_lags, self._nobs, self._df_raw,
self._nw_overlap)
xx_inv = math.inv(xx)
return np.dot(xx_inv, np.dot(xeps, xx_inv))
@cache_readonly
def var_beta(self):
"""Returns the variance-covariance matrix of beta."""
return DataFrame(self._var_beta_raw, index=self.beta.index,
columns=self.beta.index)
@cache_readonly
def _y_fitted_raw(self):
"""Returns the raw fitted y values."""
if self._weights is None:
X = self._x_filtered.values
else:
# XXX
return self.sm_ols.fittedvalues
b = self._beta_raw
return np.dot(X, b)
@cache_readonly
def y_fitted(self):
"""Returns the fitted y values. This equals BX."""
if self._weights is None:
index = self._x_filtered.index
orig_index = index
else:
index = self._y.index
orig_index = self._y_orig.index
result = Series(self._y_fitted_raw, index=index)
return result.reindex(orig_index)
@cache_readonly
def _y_predict_raw(self):
"""Returns the raw predicted y values."""
return self._y_fitted_raw
@cache_readonly
def y_predict(self):
"""Returns the predicted y values.
For in-sample, this is same as y_fitted."""
return self.y_fitted
def predict(self, beta=None, x=None, fill_value=None,
fill_method=None, axis=0):
"""
Parameters
----------
beta : Series
x : Series or DataFrame
fill_value : scalar or dict, default None
fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
axis : {0, 1}, default 0
See DataFrame.fillna for more details
Notes
-----
1. If both fill_value and fill_method are None then NaNs are dropped
(this is the default behavior)
2. An intercept will be automatically added to the new_y_values if
the model was fitted using an intercept
Returns
-------
Series of predicted values
"""
if beta is None and x is None:
return self.y_predict
if beta is None:
beta = self.beta
else:
beta = beta.reindex(self.beta.index)
if isnull(beta).any():
raise ValueError('Must supply betas for same variables')
if x is None:
x = self._x
orig_x = x
else:
orig_x = x
if fill_value is None and fill_method is None:
x = x.dropna(how='any')
else:
x = x.fillna(value=fill_value, method=fill_method, axis=axis)
if isinstance(x, Series):
x = DataFrame({'x': x})
if self._intercept:
x['intercept'] = 1.
x = x.reindex(columns=self._x.columns)
rs = np.dot(x.values, beta.values)
return Series(rs, x.index).reindex(orig_x.index)
RESULT_FIELDS = ['r2', 'r2_adj', 'df', 'df_model', 'df_resid', 'rmse',
'f_stat', 'beta', 'std_err', 't_stat', 'p_value', 'nobs']
@cache_readonly
def _results(self):
results = {}
for result in self.RESULT_FIELDS:
results[result] = getattr(self, result)
return results
@cache_readonly
def _coef_table(self):
buf = StringIO()
buf.write('%14s %10s %10s %10s %10s %10s %10s\n' %
('Variable', 'Coef', 'Std Err', 't-stat',
'p-value', 'CI 2.5%', 'CI 97.5%'))
buf.write(scom.banner(''))
coef_template = '\n%14s %10.4f %10.4f %10.2f %10.4f %10.4f %10.4f'
results = self._results
beta = results['beta']
for i, name in enumerate(beta.index):
if i and not (i % 5):
buf.write('\n' + scom.banner(''))
std_err = results['std_err'][name]
CI1 = beta[name] - 1.96 * std_err
CI2 = beta[name] + 1.96 * std_err
t_stat = results['t_stat'][name]
p_value = results['p_value'][name]
line = coef_template % (name,
beta[name], std_err, t_stat, p_value, CI1, CI2)
buf.write(line)
if self.nw_lags is not None:
buf.write('\n')
buf.write('*** The calculations are Newey-West '
'adjusted with lags %5d\n' % self.nw_lags)
return buf.getvalue()
@cache_readonly
def summary_as_matrix(self):
"""Returns the formatted results of the OLS as a DataFrame."""
results = self._results
beta = results['beta']
data = {'beta': results['beta'],
't-stat': results['t_stat'],
'p-value': results['p_value'],
'std err': results['std_err']}
return DataFrame(data, beta.index).T
@cache_readonly
def summary(self):
"""
This returns the formatted result of the OLS computation
"""
template = """
%(bannerTop)s
Formula: Y ~ %(formula)s
Number of Observations: %(nobs)d
Number of Degrees of Freedom: %(df)d
R-squared: %(r2)10.4f
Adj R-squared: %(r2_adj)10.4f
Rmse: %(rmse)10.4f
F-stat %(f_stat_shape)s: %(f_stat)10.4f, p-value: %(f_stat_p_value)10.4f
Degrees of Freedom: model %(df_model)d, resid %(df_resid)d
%(bannerCoef)s
%(coef_table)s
%(bannerEnd)s
"""
coef_table = self._coef_table
results = self._results
f_stat = results['f_stat']
bracketed = ['<%s>' % str(c) for c in results['beta'].index]
formula = StringIO()
formula.write(bracketed[0])
tot = len(bracketed[0])
line = 1
for coef in bracketed[1:]:
tot = tot + len(coef) + 3
if tot // (68 * line):
formula.write('\n' + ' ' * 12)
line += 1
formula.write(' + ' + coef)
params = {
'bannerTop': scom.banner('Summary of Regression Analysis'),
'bannerCoef': scom.banner('Summary of Estimated Coefficients'),
'bannerEnd': scom.banner('End of Summary'),
'formula': formula.getvalue(),
'r2': results['r2'],
'r2_adj': results['r2_adj'],
'nobs': results['nobs'],
'df': results['df'],
'df_model': results['df_model'],
'df_resid': results['df_resid'],
'coef_table': coef_table,
'rmse': results['rmse'],
'f_stat': f_stat['f-stat'],
'f_stat_shape': '(%d, %d)' % (f_stat['DF X'], f_stat['DF Resid']),
'f_stat_p_value': f_stat['p-value'],
}
return template % params
def __unicode__(self):
return self.summary
@cache_readonly
def _time_obs_count(self):
# XXX
return self._time_has_obs.astype(int)
@property
def _total_times(self):
return self._time_has_obs.sum()
class MovingOLS(OLS):
"""
Runs a rolling/expanding simple OLS.
Parameters
----------
y : Series
x : Series, DataFrame, or dict of Series
weights : array-like, optional
1d array of weights. If None, equivalent to an unweighted OLS.
window_type : {'full sample', 'rolling', 'expanding'}
Default expanding
window : int
size of window (for rolling/expanding OLS)
min_periods : int
Threshold of non-null data points to require.
If None, defaults to size of window for window_type='rolling' and 1
otherwise
intercept : bool
True if you want an intercept.
nw_lags : None or int
Number of Newey-West lags.
nw_overlap : boolean, default False
Assume data is overlapping when computing Newey-West estimator
"""
def __init__(self, y, x, weights=None, window_type='expanding',
window=None, min_periods=None, intercept=True,
nw_lags=None, nw_overlap=False):
self._args = dict(intercept=intercept, nw_lags=nw_lags,
nw_overlap=nw_overlap)
OLS.__init__(self, y=y, x=x, weights=weights, **self._args)
self._set_window(window_type, window, min_periods)
def _set_window(self, window_type, window, min_periods):
self._window_type = scom._get_window_type(window_type)
if self._is_rolling:
if window is None:
raise AssertionError("Must specify window.")
if min_periods is None:
min_periods = window
else:
window = len(self._x)
if min_periods is None:
min_periods = 1
self._window = int(window)
self._min_periods = min_periods
#------------------------------------------------------------------------------
# "Public" results
@cache_readonly
def beta(self):
"""Returns the betas in Series/DataFrame form."""
return DataFrame(self._beta_raw,
index=self._result_index,
columns=self._x.columns)
@cache_readonly
def rank(self):
return Series(self._rank_raw, index=self._result_index)
@cache_readonly
def df(self):
"""Returns the degrees of freedom."""
return Series(self._df_raw, index=self._result_index)
@cache_readonly
def df_model(self):
"""Returns the model degrees of freedom."""
return Series(self._df_model_raw, index=self._result_index)
@cache_readonly
def df_resid(self):
"""Returns the residual degrees of freedom."""
return Series(self._df_resid_raw, index=self._result_index)
@cache_readonly
def f_stat(self):
"""Returns the f-stat value."""
f_stat_dicts = dict((date, f_stat_to_dict(f_stat))
for date, f_stat in zip(self.beta.index,
self._f_stat_raw))
return DataFrame(f_stat_dicts).T
def f_test(self, hypothesis):
raise NotImplementedError('must use full sample')
@cache_readonly
def forecast_mean(self):
return Series(self._forecast_mean_raw, index=self._result_index)
@cache_readonly
def forecast_vol(self):
return Series(self._forecast_vol_raw, index=self._result_index)
@cache_readonly
def p_value(self):
"""Returns the p values."""
cols = self.beta.columns
return DataFrame(self._p_value_raw, columns=cols,
index=self._result_index)
@cache_readonly
def r2(self):
"""Returns the r-squared values."""
return Series(self._r2_raw, index=self._result_index)
@cache_readonly
def resid(self):
"""Returns the residuals."""
return Series(self._resid_raw[self._valid_obs_labels],
index=self._result_index)
@cache_readonly
def r2_adj(self):
"""Returns the r-squared adjusted values."""
index = self.r2.index
return Series(self._r2_adj_raw, index=index)
@cache_readonly
def rmse(self):
"""Returns the rmse values."""
return Series(self._rmse_raw, index=self._result_index)
@cache_readonly
def std_err(self):
"""Returns the standard err values."""
return DataFrame(self._std_err_raw, columns=self.beta.columns,
index=self._result_index)
@cache_readonly
def t_stat(self):
"""Returns the t-stat value."""
return DataFrame(self._t_stat_raw, columns=self.beta.columns,
index=self._result_index)
@cache_readonly
def var_beta(self):
"""Returns the covariance of beta."""
result = {}
result_index = self._result_index
for i in range(len(self._var_beta_raw)):
dm = DataFrame(self._var_beta_raw[i], columns=self.beta.columns,
index=self.beta.columns)
result[result_index[i]] = dm
return Panel.from_dict(result, intersect=False)
@cache_readonly
def y_fitted(self):
"""Returns the fitted y values."""
return Series(self._y_fitted_raw[self._valid_obs_labels],
index=self._result_index)
@cache_readonly
def y_predict(self):
"""Returns the predicted y values."""
return Series(self._y_predict_raw[self._valid_obs_labels],
index=self._result_index)
#------------------------------------------------------------------------------
# "raw" attributes, calculations
@property
def _is_rolling(self):
return self._window_type == 'rolling'
@cache_readonly
def _beta_raw(self):
"""Runs the regression and returns the beta."""
beta, indices, mask = self._rolling_ols_call
return beta[indices]
@cache_readonly
def _result_index(self):
return self._index[self._valid_indices]
@property
def _valid_indices(self):
return self._rolling_ols_call[1]
@cache_readonly
def _rolling_ols_call(self):
return self._calc_betas(self._x_trans, self._y_trans)
def _calc_betas(self, x, y):
N = len(self._index)
K = len(self._x.columns)
betas = np.empty((N, K), dtype=float)
betas[:] = np.NaN
valid = self._time_has_obs
enough = self._enough_obs
window = self._window
# Use transformed (demeaned) Y, X variables
cum_xx = self._cum_xx(x)
cum_xy = self._cum_xy(x, y)
for i in range(N):
if not valid[i] or not enough[i]:
continue
xx = cum_xx[i]
xy = cum_xy[i]
if self._is_rolling and i >= window:
xx = xx - cum_xx[i - window]
xy = xy - cum_xy[i - window]
betas[i] = math.solve(xx, xy)
mask = ~np.isnan(betas).any(axis=1)
have_betas = np.arange(N)[mask]
return betas, have_betas, mask
def _rolling_rank(self):
dates = self._index
window = self._window
ranks = np.empty(len(dates), dtype=float)
ranks[:] = np.NaN
for i, date in enumerate(dates):
if self._is_rolling and i >= window:
prior_date = dates[i - window + 1]
else:
prior_date = dates[0]
x_slice = self._x.truncate(before=prior_date, after=date).values
if len(x_slice) == 0:
continue
ranks[i] = math.rank(x_slice)
return ranks
def _cum_xx(self, x):
dates = self._index
K = len(x.columns)
valid = self._time_has_obs
cum_xx = []
slicer = lambda df, dt: df.truncate(dt, dt).values
if not self._panel_model:
_get_index = x.index.get_loc
def slicer(df, dt):
i = _get_index(dt)
return df.values[i:i + 1, :]
last = np.zeros((K, K))
for i, date in enumerate(dates):
if not valid[i]:
cum_xx.append(last)
continue
x_slice = slicer(x, date)
xx = last = last + np.dot(x_slice.T, x_slice)
cum_xx.append(xx)
return cum_xx
def _cum_xy(self, x, y):
dates = self._index
valid = self._time_has_obs
cum_xy = []
x_slicer = lambda df, dt: df.truncate(dt, dt).values
if not self._panel_model:
_get_index = x.index.get_loc
def x_slicer(df, dt):
i = _get_index(dt)
return df.values[i:i + 1]
_y_get_index = y.index.get_loc
_values = y.values
if isinstance(y.index, MultiIndex):
def y_slicer(df, dt):
loc = _y_get_index(dt)
return _values[loc]
else:
def y_slicer(df, dt):
i = _y_get_index(dt)
return _values[i:i + 1]
last = np.zeros(len(x.columns))
for i, date in enumerate(dates):
if not valid[i]:
cum_xy.append(last)
continue
x_slice = x_slicer(x, date)
y_slice = y_slicer(y, date)
xy = last = last + np.dot(x_slice.T, y_slice)
cum_xy.append(xy)
return cum_xy
@cache_readonly
def _rank_raw(self):
rank = self._rolling_rank()
return rank[self._valid_indices]
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
return self._rank_raw
@cache_readonly
def _df_model_raw(self):
"""Returns the raw model degrees of freedom."""
return self._df_raw - 1
@cache_readonly
def _df_resid_raw(self):
"""Returns the raw residual degrees of freedom."""
return self._nobs - self._df_raw
@cache_readonly
def _f_stat_raw(self):
"""Returns the raw f-stat value."""
from scipy.stats import f
items = self.beta.columns
nobs = self._nobs
df = self._df_raw
df_resid = nobs - df
# var_beta has not been newey-west adjusted
if self._nw_lags is None:
F = self._r2_raw / (self._r2_raw - self._r2_adj_raw)
q = len(items)
if 'intercept' in items:
q -= 1
def get_result_simple(Fst, d):
return Fst, (q, d), 1 - f.cdf(Fst, q, d)
# Compute the P-value for each pair
result = starmap(get_result_simple, zip(F, df_resid))
return list(result)
K = len(items)
R = np.eye(K)
r = np.zeros((K, 1))
try:
intercept = items.get_loc('intercept')
R = np.concatenate((R[0: intercept], R[intercept + 1:]))
r = np.concatenate((r[0: intercept], r[intercept + 1:]))
except KeyError:
# no intercept
pass
def get_result(beta, vcov, n, d):
return math.calc_F(R, r, beta, vcov, n, d)
results = starmap(get_result,
zip(self._beta_raw, self._var_beta_raw, nobs, df))
return list(results)
@cache_readonly
def _p_value_raw(self):
"""Returns the raw p values."""
from scipy.stats import t
result = [2 * t.sf(a, b)
for a, b in zip(np.fabs(self._t_stat_raw),
self._df_resid_raw)]
return np.array(result)
@cache_readonly
def _resid_stats(self):
uncentered_sst = []
sst = []
sse = []
Yreg = self._y
Y = self._y_trans
X = self._x_trans
weights = self._weights
dates = self._index
window = self._window
for n, index in enumerate(self._valid_indices):
if self._is_rolling and index >= window:
prior_date = dates[index - window + 1]
else:
prior_date = dates[0]
date = dates[index]
beta = self._beta_raw[n]
X_slice = X.truncate(before=prior_date, after=date).values
Y_slice = _y_converter(Y.truncate(before=prior_date, after=date))
resid = Y_slice - np.dot(X_slice, beta)
if weights is not None:
Y_slice = _y_converter(Yreg.truncate(before=prior_date,
after=date))
weights_slice = weights.truncate(prior_date, date)
demeaned = Y_slice - np.average(Y_slice, weights=weights_slice)
SS_total = (weights_slice * demeaned ** 2).sum()
else:
SS_total = ((Y_slice - Y_slice.mean()) ** 2).sum()
SS_err = (resid ** 2).sum()
SST_uncentered = (Y_slice ** 2).sum()
sse.append(SS_err)
sst.append(SS_total)
uncentered_sst.append(SST_uncentered)
return {
'sse': np.array(sse),
'centered_tss': np.array(sst),
'uncentered_tss': np.array(uncentered_sst),
}
@cache_readonly
def _rmse_raw(self):
"""Returns the raw rmse values."""
return np.sqrt(self._resid_stats['sse'] / self._df_resid_raw)
@cache_readonly
def _r2_raw(self):
rs = self._resid_stats
if self._use_centered_tss:
return 1 - rs['sse'] / rs['centered_tss']
else:
return 1 - rs['sse'] / rs['uncentered_tss']
@cache_readonly
def _r2_adj_raw(self):
"""Returns the raw r-squared adjusted values."""
nobs = self._nobs
factors = (nobs - 1) / (nobs - self._df_raw)
return 1 - (1 - self._r2_raw) * factors
@cache_readonly
def _resid_raw(self):
"""Returns the raw residuals."""
return (self._y.values - self._y_fitted_raw)
@cache_readonly
def _std_err_raw(self):
"""Returns the raw standard err values."""
results = []
for i in range(len(self._var_beta_raw)):
results.append(np.sqrt(np.diag(self._var_beta_raw[i])))
return np.array(results)
@cache_readonly
def _t_stat_raw(self):
"""Returns the raw t-stat value."""
return self._beta_raw / self._std_err_raw
@cache_readonly
def _var_beta_raw(self):
"""Returns the raw covariance of beta."""
x = self._x_trans
y = self._y_trans
dates = self._index
nobs = self._nobs
rmse = self._rmse_raw
beta = self._beta_raw
df = self._df_raw
window = self._window
cum_xx = self._cum_xx(self._x)
results = []
for n, i in enumerate(self._valid_indices):
xx = cum_xx[i]
date = dates[i]
if self._is_rolling and i >= window:
xx = xx - cum_xx[i - window]
prior_date = dates[i - window + 1]
else:
prior_date = dates[0]
x_slice = x.truncate(before=prior_date, after=date)
y_slice = y.truncate(before=prior_date, after=date)
xv = x_slice.values
yv = np.asarray(y_slice)
if self._nw_lags is None:
result = math.inv(xx) * (rmse[n] ** 2)
else:
resid = yv - np.dot(xv, beta[n])
m = (xv.T * resid).T
xeps = math.newey_west(m, self._nw_lags, nobs[n], df[n],
self._nw_overlap)
xx_inv = math.inv(xx)
result = np.dot(xx_inv, np.dot(xeps, xx_inv))
results.append(result)
return np.array(results)
@cache_readonly
def _forecast_mean_raw(self):
"""Returns the raw covariance of beta."""
nobs = self._nobs
window = self._window
# x should be ones
dummy = DataFrame(index=self._y.index)
dummy['y'] = 1
cum_xy = self._cum_xy(dummy, self._y)
results = []
for n, i in enumerate(self._valid_indices):
sumy = cum_xy[i]
if self._is_rolling and i >= window:
sumy = sumy - cum_xy[i - window]
results.append(sumy[0] / nobs[n])
return np.array(results)
@cache_readonly
def _forecast_vol_raw(self):
"""Returns the raw covariance of beta."""
beta = self._beta_raw
window = self._window
dates = self._index
x = self._x
results = []
for n, i in enumerate(self._valid_indices):
date = dates[i]
if self._is_rolling and i >= window:
prior_date = dates[i - window + 1]
else:
prior_date = dates[0]
x_slice = x.truncate(prior_date, date).values
x_demeaned = x_slice - x_slice.mean(0)
x_cov = np.dot(x_demeaned.T, x_demeaned) / (len(x_slice) - 1)
B = beta[n]
result = np.dot(B, np.dot(x_cov, B))
results.append(np.sqrt(result))
return np.array(results)
@cache_readonly
def _y_fitted_raw(self):
"""Returns the raw fitted y values."""
return (self._x.values * self._beta_matrix(lag=0)).sum(1)
@cache_readonly
def _y_predict_raw(self):
"""Returns the raw predicted y values."""
return (self._x.values * self._beta_matrix(lag=1)).sum(1)
@cache_readonly
def _results(self):
results = {}
for result in self.RESULT_FIELDS:
value = getattr(self, result)
if isinstance(value, Series):
value = value[self.beta.index[-1]]
elif isinstance(value, DataFrame):
value = value.xs(self.beta.index[-1])
else: # pragma: no cover
raise Exception('Problem retrieving %s' % result)
results[result] = value
return results
@cache_readonly
def _window_time_obs(self):
window_obs = (Series(self._time_obs_count > 0)
.rolling(self._window, min_periods=1)
.sum()
.values
)
window_obs[np.isnan(window_obs)] = 0
return window_obs.astype(int)
@cache_readonly
def _nobs_raw(self):
if self._is_rolling:
window = self._window
else:
# expanding case
window = len(self._index)
result = Series(self._time_obs_count).rolling(
window, min_periods=1).sum().values
return result.astype(int)
def _beta_matrix(self, lag=0):
if lag < 0:
raise AssertionError("'lag' must be greater than or equal to 0, "
"input was {0}".format(lag))
betas = self._beta_raw
labels = np.arange(len(self._y)) - lag
indexer = self._valid_obs_labels.searchsorted(labels, side='left')
indexer[indexer == len(betas)] = len(betas) - 1
beta_matrix = betas[indexer]
beta_matrix[labels < self._valid_obs_labels[0]] = np.NaN
return beta_matrix
@cache_readonly
def _valid_obs_labels(self):
dates = self._index[self._valid_indices]
return self._y.index.searchsorted(dates)
@cache_readonly
def _nobs(self):
return self._nobs_raw[self._valid_indices]
@property
def nobs(self):
return Series(self._nobs, index=self._result_index)
@cache_readonly
def _enough_obs(self):
# XXX: what's the best way to determine where to start?
return self._nobs_raw >= max(self._min_periods,
len(self._x.columns) + 1)
def _safe_update(d, other):
"""
Combine dictionaries with non-overlapping keys
"""
for k, v in compat.iteritems(other):
if k in d:
raise Exception('Duplicate regressor: %s' % k)
d[k] = v
def _filter_data(lhs, rhs, weights=None):
"""
Cleans the input for single OLS.
Parameters
----------
lhs : Series
Dependent variable in the regression.
rhs : dict, whose values are Series, DataFrame, or dict
Explanatory variables of the regression.
weights : array-like, optional
1d array of weights. If None, equivalent to an unweighted OLS.
Returns
-------
Series, DataFrame
Cleaned lhs and rhs
"""
if not isinstance(lhs, Series):
if len(lhs) != len(rhs):
raise AssertionError("length of lhs must equal length of rhs")
lhs = Series(lhs, index=rhs.index)
rhs = _combine_rhs(rhs)
lhs = DataFrame({'__y__': lhs}, dtype=float)
pre_filt_rhs = rhs.dropna(how='any')
combined = rhs.join(lhs, how='outer')
if weights is not None:
combined['__weights__'] = weights
valid = (combined.count(1) == len(combined.columns)).values
index = combined.index
combined = combined[valid]
if weights is not None:
filt_weights = combined.pop('__weights__')
else:
filt_weights = None
filt_lhs = combined.pop('__y__')
filt_rhs = combined
if hasattr(filt_weights, 'to_dense'):
filt_weights = filt_weights.to_dense()
return (filt_lhs.to_dense(), filt_rhs.to_dense(), filt_weights,
pre_filt_rhs.to_dense(), index, valid)
def _combine_rhs(rhs):
"""
Glue input X variables together while checking for potential
duplicates
"""
series = {}
if isinstance(rhs, Series):
series['x'] = rhs
elif isinstance(rhs, DataFrame):
series = rhs.copy()
elif isinstance(rhs, dict):
for name, value in compat.iteritems(rhs):
if isinstance(value, Series):
_safe_update(series, {name: value})
elif isinstance(value, (dict, DataFrame)):
_safe_update(series, value)
else: # pragma: no cover
raise Exception('Invalid RHS data type: %s' % type(value))
else: # pragma: no cover
raise Exception('Invalid RHS type: %s' % type(rhs))
if not isinstance(series, DataFrame):
series = DataFrame(series, dtype=float)
return series
# A little kludge so we can use this method for both
# MovingOLS and MovingPanelOLS
def _y_converter(y):
y = y.values.squeeze()
if y.ndim == 0: # pragma: no cover
return np.array([y])
else:
return y
def f_stat_to_dict(result):
f_stat, shape, p_value = result
result = {}
result['f-stat'] = f_stat
result['DF X'] = shape[0]
result['DF Resid'] = shape[1]
result['p-value'] = p_value
return result
| mit |
jayflo/scikit-learn | sklearn/cluster/bicluster.py | 211 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
CamDavidsonPilon/lifestyles | lifestyles/models/cbc.py | 1 | 1207 | from theano import tensor as tt
import pandas as pd
import pymc3 as pm
"""
Looking at what xlstat does, they make sure that all weights in a level sum to 0
"""
profiles = pd.get_dummies(pd.read_csv("data/lemonade/profiles.tsv", sep="\s+").set_index('Profile'), drop_first=True)
comparisons = pd.read_csv("data/lemonade/comparisons.tsv", sep="\s+").set_index('Comparisons')
selections = pd.read_csv("data/lemonade/selections.tsv", sep="\s+").set_index("Comparisons")
first_choice = profiles.loc[comparisons['Profile1']]
second_choice = profiles.loc[comparisons['Profile2']]
third_choice = profiles.loc[comparisons['Profile3']]
with pm.Model() as hierarchical_model:
weights = pm.Normal("weights", 0, sd=10., shape=(profiles.shape[1], 1))
probs = tt.nnet.softmax(tt.stack([
tt.dot(first_choice, weights),
tt.dot(second_choice, weights),
tt.dot(third_choice, weights)
], axis=0).T)
cs = [pm.Categorical("Obs%d" % i, probs, observed=(selections['Individual%i' % i] - 1).values) for i in xrange(1, 11)]
with hierarchical_model:
hierarchical_trace = pm.sample(40000, pm.Metropolis(), tune=2000)
pm.plots.traceplot(hierarchical_trace)
| mit |
zfrenchee/pandas | pandas/tests/io/parser/test_parsers.py | 6 | 5181 | # -*- coding: utf-8 -*-
import os
import pandas.util.testing as tm
from pandas import read_csv, read_table, DataFrame
from pandas.core.common import AbstractMethodError
from pandas._libs.lib import Timestamp
from pandas.compat import StringIO
from .common import ParserTests
from .header import HeaderTests
from .comment import CommentTests
from .dialect import DialectTests
from .quoting import QuotingTests
from .usecols import UsecolsTests
from .skiprows import SkipRowsTests
from .index_col import IndexColTests
from .na_values import NAvaluesTests
from .converters import ConverterTests
from .c_parser_only import CParserTests
from .parse_dates import ParseDatesTests
from .compression import CompressionTests
from .mangle_dupes import DupeColumnTests
from .multithread import MultithreadTests
from .python_parser_only import PythonParserTests
from .dtypes import DtypeTests
class BaseParser(CommentTests, CompressionTests,
ConverterTests, DialectTests,
DtypeTests, DupeColumnTests,
HeaderTests, IndexColTests,
MultithreadTests, NAvaluesTests,
ParseDatesTests, ParserTests,
SkipRowsTests, UsecolsTests,
QuotingTests):
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def float_precision_choices(self):
raise AbstractMethodError(self)
def setup_method(self, method):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
self.csv_shiftjs = os.path.join(self.dirpath, 'sauron.SHIFT_JIS.csv')
class TestCParserHighMemory(BaseParser, CParserTests):
engine = 'c'
low_memory = False
float_precision_choices = [None, 'high', 'round_trip']
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
kwds['low_memory'] = self.low_memory
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
kwds['low_memory'] = self.low_memory
return read_table(*args, **kwds)
class TestCParserLowMemory(BaseParser, CParserTests):
engine = 'c'
low_memory = True
float_precision_choices = [None, 'high', 'round_trip']
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
kwds['low_memory'] = self.low_memory
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
kwds['low_memory'] = True
return read_table(*args, **kwds)
class TestPythonParser(BaseParser, PythonParserTests):
engine = 'python'
float_precision_choices = [None]
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
return read_table(*args, **kwds)
class TestUnsortedUsecols(object):
def test_override__set_noconvert_columns(self):
# GH 17351 - usecols needs to be sorted in _setnoconvert_columns
# based on the test_usecols_with_parse_dates test from usecols.py
from pandas.io.parsers import CParserWrapper, TextFileReader
s = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == 'integer':
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
parser = MyTextFileReader()
parser.options = {'usecols': [0, 2, 3],
'parse_dates': parse_dates,
'delimiter': ','}
parser._engine = MyCParserWrapper(StringIO(s), **parser.options)
df = parser.read()
tm.assert_frame_equal(df, expected)
| bsd-3-clause |
bibsian/database-development | test/manual_test_tablemodel.py | 1 | 5598 | from PyQt4 import QtCore, QtGui
import pytest
import pytestqt
import numpy as np
import pandas as pd
import sys
from Views import ui_dialog_table_preview as uiprev
rootpath = os.path.dirname(os.path.dirname( __file__ ))
end = os.path.sep
sys.path.append(os.path.realpath(os.path.dirname(
rootpath)))
os.chdir(rootpath)
@pytest.fixture
def df():
return pd.read_csv(
rootpath + end + 'test' + end + 'Datasets_manual_test' + end +
'splitcolumn_data_test.csv'
)
@pytest.fixture
def PandasTableModelEdit():
class PandasTableModelEdit(QtCore.QAbstractTableModel):
log_change = QtCore.pyqtSignal(object)
'''
This class is an abstract table class from Qt to visualize
data in a table format and using the pandas dataframe
as object that supply the data to be visualized.
To Do: Nothing
Last edit: Removed the ability to edit the table
'''
def __init__(self, data, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self.__data = np.array(data.values)
self.__cols = data.columns
self.r, self.c = np.shape(self.__data)
def rowCount(self, parent=None):
return self.r
def columnCount(self, parent=None):
return self.c
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self.__cols[section]
elif orientation == QtCore.Qt.Vertical:
return section
def data(self, index, role):
if role == QtCore.Qt.UserRole:
index = None
return pd.DataFrame(self.__data, columns=self.__cols)
else:
pass
if index.isValid():
if role == QtCore.Qt.DisplayRole:
return self.__data[index.row(), index.column()]
def flags(self, index):
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable |\
QtCore.Qt.ItemIsEditable
def setData(self, index, value, role=QtCore.Qt.EditRole):
if role == QtCore.Qt.EditRole:
og_value = self.__data(index, QtCore.Qt.DisplayRole)
self.__data[index.row(), index.column()] = value
self.dataChanged.emit(index, index)
self.log_change.emit(
{'cell_changes':{og_value: value}})
return True
return False
#def removeRows(self, rowstart, rowend, parent=QtCore.QModelIndex()):
# self.beginRemoveRows(
# QtCore.QModelIndex(), rowstart, rowend+1)
# self.__data = np.delete(
# self.__data, np.s_[rowstart:rowend+1], axis=0)
# self.endRemoveRows()
def event(self, event):
if (event.key() == QtCore.Qt.Key_Return):
print('Presed Enter')
raise KeyError
return QtCore.QAbStractTableModel.event(self, event)
return PandasTableModelEdit
@pytest.fixture
def Preview(df, PandasTableModelEdit):
class TablePreview(QtGui.QDialog, uiprev.Ui_Dialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.btnCancel.clicked.connect(self.close)
self.model = PandasTableModelEdit(df)
self.model.log_change.connect(self.write_to_log)
self.tabviewPreview.setModel(self.model)
# Double click header to change column name
self.tabviewPreview.horizontalHeader().sectionDoubleClicked.connect(
self.changeHorizontalHeader)
# Resize column widths to fit words
self.tabviewPreview.resizeColumnsToContents()
# Right click on verticalHeader
#self.tabviewPreview.setContextMenuPolicy(
# QtCore.Qt.CustomContextMenu
#)
#self.tabviewPreview.customContextMenuRequested.connect(
# self.on_context_menu)
# Context menu for delete action
#self.popMenu = QtGui.QMenu(self)
#self.popMenu.addAction(QtGui.QAction('delete', self))
#self.popMenu.addSeparator()
#def on_context_menu(self, point):
# ''' Method to initiate the deltion of rows'''
# # show context menu
# self.popMenu.exec_(
# self.tabviewPreview.mapToGlobal(point))
# indeces = list(
# set([x.row() for x in
# self.tabviewPreview.selectedIndexes()]))
# self.model.removeRows(indeces[0], indeces[-1])
# LEFT OFF HERE. Trying to get the delete rows feature working
@QtCore.pyqtSlot(object)
def write_to_log(self, change):
print(change)
def changeHorizontalHeader(self, index):
print(index)
oldHeader = df.iloc[:,index].name
newHeader, ok = QtGui.QInputDialog.getText(
self, 'Input', 'New Column Label:')
if ok:
df_updated = df.rename(columns={oldHeader:newHeader})
self.tabviewPreview.setModel(
PandasTableModelEdit(df_updated))
self.write_to_log(
{'column_change': {oldHeader:newHeader}})
return TablePreview()
def test_dialog_site(qtbot, Preview):
Preview.show()
qtbot.addWidget(Preview)
qtbot.stopForInteraction()
| mit |
zihua/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
seg/2016-ml-contest | CarlosFuerte/ML_Submission.py | 1 | 7840 | # coding: utf-8
# # Machine Learning Contest
# By: Kris Darnell & David Tang
#
# Test run with a larger sized neural network. Contest is described [here](https://github.com/seg/2016-ml-contest).
# In[1]:
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pandas import set_option
set_option("display.max_rows", 10)
pd.options.mode.chained_assignment = None
# Loading Data
filename = 'facies_vectors.csv'
training_data = pd.read_csv(filename)
training_data.fillna(training_data.mean(),inplace=True) # Remove NaN with mean value
training_data
# Converts to category
training_data['Well Name'] = training_data['Well Name'].astype('category')
training_data['Formation'] = training_data['Formation'].astype('category')
training_data['Well Name'].unique()
training_data.describe()
# In[2]:
# Plotting stuff
# Hex color codes
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00',
'#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS',
'WS', 'D','PS', 'BS']
#facies_color_map is a dictionary that maps facies labels
#to their respective colors
facies_color_map = {} # Dictionary # enumerate puts out ind=0, label=SS, and loops through the whole thing
for ind, label in enumerate(facies_labels):
facies_color_map[label] = facies_colors[ind]
def label_facies(row, labels):
return labels[ row['Facies'] -1]
training_data.loc[:,'FaciesLabels'] = training_data.apply(lambda row: label_facies(row, facies_labels), axis=1)
correct_facies_labels = training_data['Facies'].values
feature_vectors = training_data.drop(['Well Name','Facies','FaciesLabels'], axis=1)
feature_vectors.describe()
feature_vectors.insert(1,'FormationNum',0)
for ii, formation in enumerate(feature_vectors['Formation'].unique()):
feature_vectors.FormationNum[feature_vectors.Formation == formation] = ii
feature_vectors = feature_vectors.drop(['Formation'], axis = 1)
# ***
# Normalizing and splitting data
# In[3]:
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
scaler = preprocessing.StandardScaler().fit(feature_vectors)
scaled_features = scaler.transform(feature_vectors)
X_train, X_test, y_train, y_test = train_test_split(
scaled_features, correct_facies_labels, test_size=0.2, random_state=42)
#%% Use tpot
from tpot import TPOTClassifier
from sklearn.pipeline import make_pipeline, make_union
from sklearn.ensemble import ExtraTreesClassifier, VotingClassifier
#tpot = TPOTClassifier(generations=5, population_size=20, verbosity=2,
# max_eval_time_mins = 20, max_time_mins=100, scoring='f1_micro')
#tpot.fit(X_train, y_train)
#print(tpot.score(X_test, y_test))
#tpot.export('tpot_mnist_pipeline.py')
# In[4]:
#
#from sklearn.neural_network import MLPClassifier
#
#sizes = (200,100,100)
#clfNN = MLPClassifier(solver='lbfgs', alpha=.015,
# hidden_layer_sizes=sizes, random_state=15)
#clfOne = OneVsRestClassifier(MLPClassifier(solver='lbfgs', alpha=.015,
# hidden_layer_sizes=sizes, random_state=15), n_jobs = -1)
#
#clfNN.fit(X_train,y_train)
#clfOne.fit(X_train,y_train)
#
#predicted_NN = clfNN.predict(X_test)
#predicted_One = clfOne.predict(X_test)
#%% Use TPOT to find best parameters/models
clfExtra = make_pipeline(
ExtraTreesClassifier(criterion="gini", max_features=0.53, n_estimators=500))
clfExtra.fit(X_train, y_train)
predicted = clfExtra.predict(X_test)
#%%
from sklearn.metrics import confusion_matrix
from classification_utilities import display_cm, display_adj_cm
conf = confusion_matrix(y_test,predicted)
display_cm(conf,facies_labels,hide_zeros=True)
def accuracy(conf):
total_correct = 0.
nb_classes = conf.shape[0]
for i in np.arange(0,nb_classes):
total_correct += conf[i][i]
acc = total_correct/sum(sum(conf))
return acc
adjacent_facies = np.array([[1], [0,2], [1], [4], [3,5], [4,6,7], [5,7], [5,6,8], [6,7]])
def accuracy_adjacent(conf, adjacent_facies):
nb_classes = conf.shape[0]
total_correct = 0.
for i in np.arange(0,nb_classes):
total_correct += conf[i][i]
for j in adjacent_facies[i]:
total_correct += conf[i][j]
return total_correct / sum(sum(conf))
from sklearn.metrics import f1_score, make_scorer
fscorer_micro = make_scorer(f1_score, average = 'micro')
fscorer_micro = make_scorer(f1_score, average = 'macro')
print('Facies classification accuracy = %f' % accuracy(conf))
print('Adjacent facies classification accuracy = %f' % accuracy_adjacent(conf, adjacent_facies))
# ## Load Validation Set
#%% Retrain on whole dataset
clfExtra.fit(scaled_features, correct_facies_labels)
#%%
filename = 'validation_data_nofacies.csv'
validationFull = pd.read_csv(filename)
validationFull.insert(1,'FormationNum',0)
for ii, formation in enumerate(feature_vectors['Formation'].unique()):
validationFull.FormationNum[validationFull.Formation == formation] = ii
validation = validationFull.drop(['Formation', 'Well Name'], axis = 1)
# Normalize data
scaled_validation = scaler.transform(validation)
validation_output = clfExtra.predict(scaled_validation)
#validation_output = clf_final.predict(scaled_validation)
# In[6]:
def make_facies_log_plot(logs, facies_colors):
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1) # Makes it a nx1, repeating values along an dimension
f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im=ax[5].imshow(cluster, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[5])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-1):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)
# In[11]:
get_ipython().magic('matplotlib inline')
validationFull['Facies']=validation_output
make_facies_log_plot(
validationFull[validationFull['Well Name']=='STUART'],
facies_colors=facies_colors)
make_facies_log_plot(
validationFull[validationFull['Well Name']=='CRAWFORD'],
facies_colors=facies_colors)
# In[12]:
validationFull.to_csv('TangDarnell.csv')
| apache-2.0 |
qifeigit/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
harisbal/pandas | pandas/tseries/frequencies.py | 4 | 16616 | # -*- coding: utf-8 -*-
from datetime import timedelta
from pandas.compat import zip
from pandas import compat
import re
import numpy as np
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.common import (
is_period_arraylike,
is_timedelta64_dtype,
is_datetime64_dtype)
from pandas.core.algorithms import unique
from pandas.tseries.offsets import DateOffset
from pandas._libs.tslibs import Timedelta, Timestamp
import pandas._libs.tslibs.frequencies as libfreqs
from pandas._libs.tslibs.frequencies import ( # noqa, semi-public API
get_freq, get_base_alias, get_to_timestamp_base, get_freq_code,
FreqGroup,
is_subperiod, is_superperiod)
from pandas._libs.tslibs.ccalendar import MONTH_ALIASES, int_to_weekday
import pandas._libs.tslibs.resolution as libresolution
from pandas._libs.tslibs.resolution import Resolution
from pandas._libs.tslibs.fields import build_field_sarray
from pandas._libs.tslibs.conversion import tz_convert
from pandas._libs.algos import unique_deltas
from pytz import AmbiguousTimeError
RESO_NS = 0
RESO_US = 1
RESO_MS = 2
RESO_SEC = 3
RESO_MIN = 4
RESO_HR = 5
RESO_DAY = 6
_ONE_MICRO = 1000
_ONE_MILLI = (_ONE_MICRO * 1000)
_ONE_SECOND = (_ONE_MILLI * 1000)
_ONE_MINUTE = (60 * _ONE_SECOND)
_ONE_HOUR = (60 * _ONE_MINUTE)
_ONE_DAY = (24 * _ONE_HOUR)
# ---------------------------------------------------------------------
# Offset names ("time rules") and related functions
from pandas._libs.tslibs.offsets import _offset_to_period_map # noqa:E402
from pandas.tseries.offsets import (Nano, Micro, Milli, Second, # noqa
Minute, Hour,
Day, BDay, CDay, Week, MonthBegin,
MonthEnd, BMonthBegin, BMonthEnd,
QuarterBegin, QuarterEnd, BQuarterBegin,
BQuarterEnd, YearBegin, YearEnd,
BYearBegin, BYearEnd, prefix_mapping)
try:
cday = CDay()
except NotImplementedError:
cday = None
#: cache of previously seen offsets
_offset_map = {}
def get_period_alias(offset_str):
""" alias to closest period strings BQ->Q etc"""
return _offset_to_period_map.get(offset_str, None)
_name_to_offset_map = {'days': Day(1),
'hours': Hour(1),
'minutes': Minute(1),
'seconds': Second(1),
'milliseconds': Milli(1),
'microseconds': Micro(1),
'nanoseconds': Nano(1)}
def to_offset(freq):
"""
Return DateOffset object from string or tuple representation
or datetime.timedelta object
Parameters
----------
freq : str, tuple, datetime.timedelta, DateOffset or None
Returns
-------
delta : DateOffset
None if freq is None
Raises
------
ValueError
If freq is an invalid frequency
See Also
--------
pandas.DateOffset
Examples
--------
>>> to_offset('5min')
<5 * Minutes>
>>> to_offset('1D1H')
<25 * Hours>
>>> to_offset(('W', 2))
<2 * Weeks: weekday=6>
>>> to_offset((2, 'B'))
<2 * BusinessDays>
>>> to_offset(datetime.timedelta(days=1))
<Day>
>>> to_offset(Hour())
<Hour>
"""
if freq is None:
return None
if isinstance(freq, DateOffset):
return freq
if isinstance(freq, tuple):
name = freq[0]
stride = freq[1]
if isinstance(stride, compat.string_types):
name, stride = stride, name
name, _ = libfreqs._base_and_stride(name)
delta = get_offset(name) * stride
elif isinstance(freq, timedelta):
delta = None
freq = Timedelta(freq)
try:
for name in freq.components._fields:
offset = _name_to_offset_map[name]
stride = getattr(freq.components, name)
if stride != 0:
offset = stride * offset
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq))
else:
delta = None
stride_sign = None
try:
splitted = re.split(libfreqs.opattern, freq)
if splitted[-1] != '' and not splitted[-1].isspace():
# the last element must be blank
raise ValueError('last element must be blank')
for sep, stride, name in zip(splitted[0::4], splitted[1::4],
splitted[2::4]):
if sep != '' and not sep.isspace():
raise ValueError('separator must be spaces')
prefix = libfreqs._lite_rule_alias.get(name) or name
if stride_sign is None:
stride_sign = -1 if stride.startswith('-') else 1
if not stride:
stride = 1
if prefix in Resolution._reso_str_bump_map.keys():
stride, name = Resolution.get_stride_from_decimal(
float(stride), prefix
)
stride = int(stride)
offset = get_offset(name)
offset = offset * int(np.fabs(stride) * stride_sign)
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq))
if delta is None:
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq))
return delta
def get_offset(name):
"""
Return DateOffset object associated with rule name
Examples
--------
get_offset('EOM') --> BMonthEnd(1)
"""
if name not in libfreqs._dont_uppercase:
name = name.upper()
name = libfreqs._lite_rule_alias.get(name, name)
name = libfreqs._lite_rule_alias.get(name.lower(), name)
else:
name = libfreqs._lite_rule_alias.get(name, name)
if name not in _offset_map:
try:
split = name.split('-')
klass = prefix_mapping[split[0]]
# handles case where there's no suffix (and will TypeError if too
# many '-')
offset = klass._from_name(*split[1:])
except (ValueError, TypeError, KeyError):
# bad prefix or suffix
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(name))
# cache
_offset_map[name] = offset
return _offset_map[name]
getOffset = get_offset
# ---------------------------------------------------------------------
# Period codes
def infer_freq(index, warn=True):
"""
Infer the most likely frequency given the input index. If the frequency is
uncertain, a warning will be printed.
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
if passed a Series will use the values of the series (NOT THE INDEX)
warn : boolean, default True
Returns
-------
freq : string or None
None if no discernible frequency
TypeError if the index is not datetime-like
ValueError if there are less than three values.
"""
import pandas as pd
if isinstance(index, ABCSeries):
values = index._values
if not (is_datetime64_dtype(values) or
is_timedelta64_dtype(values) or
values.dtype == object):
raise TypeError("cannot infer freq from a non-convertible dtype "
"on a Series of {dtype}".format(dtype=index.dtype))
index = values
if is_period_arraylike(index):
raise TypeError("PeriodIndex given. Check the `freq` attribute "
"instead of using infer_freq.")
elif is_timedelta64_dtype(index):
# Allow TimedeltaIndex and TimedeltaArray
inferer = _TimedeltaFrequencyInferer(index, warn=warn)
return inferer.get_freq()
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError("cannot infer freq from a non-convertible index "
"type {type}".format(type=type(index)))
index = index.values
if not isinstance(index, pd.DatetimeIndex):
try:
index = pd.DatetimeIndex(index)
except AmbiguousTimeError:
index = pd.DatetimeIndex(index.asi8)
inferer = _FrequencyInferer(index, warn=warn)
return inferer.get_freq()
class _FrequencyInferer(object):
"""
Not sure if I can avoid the state machine here
"""
def __init__(self, index, warn=True):
self.index = index
self.values = np.asarray(index).view('i8')
# This moves the values, which are implicitly in UTC, to the
# the timezone so they are in local time
if hasattr(index, 'tz'):
if index.tz is not None:
self.values = tz_convert(self.values, 'UTC', index.tz)
self.warn = warn
if len(index) < 3:
raise ValueError('Need at least 3 dates to infer frequency')
self.is_monotonic = (self.index.is_monotonic_increasing or
self.index.is_monotonic_decreasing)
@cache_readonly
def deltas(self):
return unique_deltas(self.values)
@cache_readonly
def deltas_asi8(self):
return unique_deltas(self.index.asi8)
@cache_readonly
def is_unique(self):
return len(self.deltas) == 1
@cache_readonly
def is_unique_asi8(self):
return len(self.deltas_asi8) == 1
def get_freq(self): # noqa:F811
"""
Find the appropriate frequency string to describe the inferred
frequency of self.values
Returns
-------
freqstr : str or None
"""
if not self.is_monotonic or not self.index.is_unique:
return None
delta = self.deltas[0]
if _is_multiple(delta, _ONE_DAY):
return self._infer_daily_rule()
# Business hourly, maybe. 17: one day / 65: one weekend
if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]):
return 'BH'
# Possibly intraday frequency. Here we use the
# original .asi8 values as the modified values
# will not work around DST transitions. See #8772
elif not self.is_unique_asi8:
return None
delta = self.deltas_asi8[0]
if _is_multiple(delta, _ONE_HOUR):
# Hours
return _maybe_add_count('H', delta / _ONE_HOUR)
elif _is_multiple(delta, _ONE_MINUTE):
# Minutes
return _maybe_add_count('T', delta / _ONE_MINUTE)
elif _is_multiple(delta, _ONE_SECOND):
# Seconds
return _maybe_add_count('S', delta / _ONE_SECOND)
elif _is_multiple(delta, _ONE_MILLI):
# Milliseconds
return _maybe_add_count('L', delta / _ONE_MILLI)
elif _is_multiple(delta, _ONE_MICRO):
# Microseconds
return _maybe_add_count('U', delta / _ONE_MICRO)
else:
# Nanoseconds
return _maybe_add_count('N', delta)
@cache_readonly
def day_deltas(self):
return [x / _ONE_DAY for x in self.deltas]
@cache_readonly
def hour_deltas(self):
return [x / _ONE_HOUR for x in self.deltas]
@cache_readonly
def fields(self):
return build_field_sarray(self.values)
@cache_readonly
def rep_stamp(self):
return Timestamp(self.values[0])
def month_position_check(self):
return libresolution.month_position_check(self.fields,
self.index.dayofweek)
@cache_readonly
def mdiffs(self):
nmonths = self.fields['Y'] * 12 + self.fields['M']
return unique_deltas(nmonths.astype('i8'))
@cache_readonly
def ydiffs(self):
return unique_deltas(self.fields['Y'].astype('i8'))
def _infer_daily_rule(self):
annual_rule = self._get_annual_rule()
if annual_rule:
nyears = self.ydiffs[0]
month = MONTH_ALIASES[self.rep_stamp.month]
alias = '{prefix}-{month}'.format(prefix=annual_rule, month=month)
return _maybe_add_count(alias, nyears)
quarterly_rule = self._get_quarterly_rule()
if quarterly_rule:
nquarters = self.mdiffs[0] / 3
mod_dict = {0: 12, 2: 11, 1: 10}
month = MONTH_ALIASES[mod_dict[self.rep_stamp.month % 3]]
alias = '{prefix}-{month}'.format(prefix=quarterly_rule,
month=month)
return _maybe_add_count(alias, nquarters)
monthly_rule = self._get_monthly_rule()
if monthly_rule:
return _maybe_add_count(monthly_rule, self.mdiffs[0])
if self.is_unique:
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
day = int_to_weekday[self.rep_stamp.weekday()]
return _maybe_add_count(
'W-{day}'.format(day=day), days / 7)
else:
return _maybe_add_count('D', days)
if self._is_business_daily():
return 'B'
wom_rule = self._get_wom_rule()
if wom_rule:
return wom_rule
def _get_annual_rule(self):
if len(self.ydiffs) > 1:
return None
if len(unique(self.fields['M'])) > 1:
return None
pos_check = self.month_position_check()
return {'cs': 'AS', 'bs': 'BAS',
'ce': 'A', 'be': 'BA'}.get(pos_check)
def _get_quarterly_rule(self):
if len(self.mdiffs) > 1:
return None
if not self.mdiffs[0] % 3 == 0:
return None
pos_check = self.month_position_check()
return {'cs': 'QS', 'bs': 'BQS',
'ce': 'Q', 'be': 'BQ'}.get(pos_check)
def _get_monthly_rule(self):
if len(self.mdiffs) > 1:
return None
pos_check = self.month_position_check()
return {'cs': 'MS', 'bs': 'BMS',
'ce': 'M', 'be': 'BM'}.get(pos_check)
def _is_business_daily(self):
# quick check: cannot be business daily
if self.day_deltas != [1, 3]:
return False
# probably business daily, but need to confirm
first_weekday = self.index[0].weekday()
shifts = np.diff(self.index.asi8)
shifts = np.floor_divide(shifts, _ONE_DAY)
weekdays = np.mod(first_weekday + np.cumsum(shifts), 7)
return np.all(((weekdays == 0) & (shifts == 3)) |
((weekdays > 0) & (weekdays <= 4) & (shifts == 1)))
def _get_wom_rule(self):
# wdiffs = unique(np.diff(self.index.week))
# We also need -47, -49, -48 to catch index spanning year boundary
# if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all():
# return None
weekdays = unique(self.index.weekday)
if len(weekdays) > 1:
return None
week_of_months = unique((self.index.day - 1) // 7)
# Only attempt to infer up to WOM-4. See #9425
week_of_months = week_of_months[week_of_months < 4]
if len(week_of_months) == 0 or len(week_of_months) > 1:
return None
# get which week
week = week_of_months[0] + 1
wd = int_to_weekday[weekdays[0]]
return 'WOM-{week}{weekday}'.format(week=week, weekday=wd)
class _TimedeltaFrequencyInferer(_FrequencyInferer):
def _infer_daily_rule(self):
if self.is_unique:
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
wd = int_to_weekday[self.rep_stamp.weekday()]
alias = 'W-{weekday}'.format(weekday=wd)
return _maybe_add_count(alias, days / 7)
else:
return _maybe_add_count('D', days)
def _is_multiple(us, mult):
return us % mult == 0
def _maybe_add_count(base, count):
if count != 1:
assert count == int(count)
count = int(count)
return '{count}{base}'.format(count=count, base=base)
else:
return base
| bsd-3-clause |
mjescobar/RF_Estimation | Clustering/helpers/sobel/sobel.py | 2 | 3004 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sobel.py
#
# Copyright 2014 Carlos "casep" Sepulveda <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from scipy import ndimage
import sys # system lib
import os # operative system lib
import matplotlib.pyplot as plt
import argparse #argument parsing
import scipy.io # input output lib (for save matlab matrix)
import numpy
import matplotlib.cm as cm # plot lib
parser = argparse.ArgumentParser(prog='pca.py',
description='Performs PCA',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sourceFolder',
help='Source folder',
type=str, required=True)
parser.add_argument('--outputFolder',
help='Output folder',
type=str, required=True)
args = parser.parse_args()
#Source folder of the files with the timestamps
sourceFolder = args.sourceFolder
# Check for trailing / on the folder
if sourceFolder[-1] != '/':
sourceFolder+='/'
if not os.path.exists(sourceFolder):
print ''
print 'Source folder does not exists ' + sourceFolder
sys.exit()
#Source folder of the files with the timestamps
outputFolder = args.outputFolder
# Check for trailing / on the folder
if outputFolder[-1] != '/':
outputFolder+='/'
if not os.path.exists(outputFolder):
try:
os.makedirs(outputFolder)
except:
print ''
print 'Unable to create folder ' + outputFolder
sys.exit()
def main():
for unitFile in os.listdir(sourceFolder):
if os.path.isdir(sourceFolder+unitFile):
unitName = unitFile.rsplit('_', 1)[0]
print unitName
staMatrixFile = scipy.io.loadmat(sourceFolder+unitFile+'/stavisual_lin_array_'+unitName+'.mat')
staMatrix = staMatrixFile['STAarray_lin']
xLength = staMatrix.shape[0]
yLength = staMatrix.shape[1]
zLength = staMatrix.shape[2]
for zAxis in range(zLength):
print 'desde disco'
fig = plt.figure()
fig.set_size_inches(1, 1)
data = staMatrix[:,:,zAxis]
#plt.pcolormesh( staMatrix[:,:,zAxis],vmin = 0,vmax = 255, cmap=cm.jet )
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
plt.set_cmap(cm.jet)
ax.imshow(data, aspect = 'auto')
plt.savefig(outputFolder+unitName+str(zAxis)+".png",format='png',dpi=31)
plt.close()
return 0
if __name__ == '__main__':
main()
| gpl-2.0 |
silky/sms-tools | lectures/09-Sound-description/plots-code/features.py | 25 | 2965 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import os, sys
import json
from scipy.cluster.vq import vq, kmeans, whiten
def fetchDataDetails(inputDir, descExt = '.json'):
dataDetails = {}
for path, dname, fnames in os.walk(inputDir):
for fname in fnames:
if descExt in fname.lower():
rname, cname, sname = path.split('/')
if not dataDetails.has_key(cname):
dataDetails[cname]={}
fDict = json.load(open(os.path.join(rname, cname, sname, fname),'r'))
dataDetails[cname][sname]={'file': fname, 'feature':fDict}
return dataDetails
def plotFeatures(inputDir, descInput = ('',''), anotOn =0):
#mfcc descriptors are an special case for us as its a vector not a value
descriptors = ['', '']
mfccInd = [-1 , -1]
if "mfcc" in descInput[0]:
featType, featName, stats, ind = descInput[0].split('.')
descriptors[0] = featType+'.'+featName+'.'+stats
mfccInd[0] = int(ind)
else:
descriptors[0] = descInput[0]
if "mfcc" in descInput[1]:
featType, featName, stats, ind = descInput[1].split('.')
descriptors[1] = featType+'.'+featName+'.'+stats
mfccInd[1] = int(ind)
else:
descriptors[1] = descInput[1]
dataDetails = fetchDataDetails(inputDir)
colors = ['r', 'g', 'c', 'b', 'k', 'm', 'y']
plt.figure(1, figsize=(9.5, 6))
plt.hold(True)
legArray = []
catArray = []
for ii, category in enumerate(dataDetails.keys()):
catArray.append(category)
for soundId in dataDetails[category].keys():
filepath = os.path.join(inputDir, category, soundId, dataDetails[category][soundId]['file'])
descSound = json.load(open(filepath, 'r'))
if not descSound.has_key(descriptors[0]) or not descSound.has_key(descriptors[1]):
print "Please provide descriptors which are extracted and saved before"
return -1
if "mfcc" in descriptors[0]:
x_cord = descSound[descriptors[0]][0][mfccInd[0]]
else:
x_cord = descSound[descriptors[0]][0]
if "mfcc" in descriptors[1]:
y_cord = descSound[descriptors[1]][0][mfccInd[1]]
else:
y_cord = descSound[descriptors[1]][0]
plt.scatter(x_cord,y_cord, c = colors[ii], s=50, hold = True, alpha=0.75)
if anotOn==1:
plt.annotate(soundId, xy=(x_cord, y_cord), xytext=(x_cord, y_cord))
circ = Line2D([0], [0], linestyle="none", marker="o", alpha=0.75, markersize=10, markerfacecolor=colors[ii])
legArray.append(circ)
plt.ylabel(descInput[1], fontsize =16)
plt.xlabel(descInput[0], fontsize =16)
plt.legend(legArray, catArray ,numpoints=1,bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=len(catArray), mode="expand", borderaxespad=0.)
plt.savefig('features.png')
plt.show()
########################
plotFeatures('freesound-sounds', descInput = ('lowlevel.spectral_centroid.mean','lowlevel.mfcc.mean.2'), anotOn =0)
| agpl-3.0 |
JrtPec/opengrid | opengrid/recipes/carpetplot.py | 2 | 3681 |
# coding: utf-8
# ## This script shows the visualization of electricity, water and gas consumption using carpet plots
#
# To get started, first run the 'Synchronize data' script
#
# #### Imports and paths
# In[ ]:
import os
import time
import pandas as pd
from opengrid import config
from opengrid.library import plotting
from opengrid.library import houseprint
c=config.Config()
try:
if os.path.exists(c.get('tmpo', 'data')):
path_to_tmpo_data = c.get('tmpo', 'data')
except:
path_to_tmpo_data = None
# configuration for the plots
DEV = c.get('env', 'type') == 'dev' # DEV is True if we are in development environment, False if on the droplet
print("Environment configured for development: {}".format(DEV))
if not DEV:
# production environment: don't try to display plots
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import MinuteLocator, HourLocator, DateFormatter, AutoDateLocator, num2date
if DEV:
if c.get('env', 'plots') == 'inline':
get_ipython().magic(u'matplotlib inline')
else:
get_ipython().magic(u'matplotlib qt')
else:
pass # don't try to render plots
plt.rcParams['figure.figsize'] = 16,8
# path to data
path_to_data = c.get('data', 'folder')
if not os.path.exists(path_to_data):
raise IOError("Provide your path to the data in your config.ini file. ")
else:
path_to_fig = os.path.join(path_to_data, 'figures')
if not os.path.isdir(path_to_fig): os.makedirs(path_to_fig)
# In[ ]:
c.get('data','folder')
# ### Loading meta data and user variables
# In[ ]:
# Load houseprint from cache if possible, otherwise build it from source
try:
hp_filename = os.path.join(c.get('data', 'folder'), 'hp_anonymous.pkl')
hp = houseprint.load_houseprint_from_file(hp_filename)
print("Houseprint loaded from {}".format(hp_filename))
except Exception as e:
print(e)
print("Because of this error we try to build the houseprint from source")
hp = houseprint.Houseprint()
end = pd.Timestamp(time.time(), unit='s')
start = end - pd.Timedelta('21 days')
# ### Water sensors
# In[ ]:
water_sensors = hp.get_sensors(sensortype='water')
print("{} water sensors".format(len(water_sensors)))
# In[ ]:
for sensor in water_sensors:
ts = sensor.get_data(head=start, tail=end)
if not ts.dropna().empty:
plotting.carpet(ts, title=' - '.join([sensor.device.key, sensor.description, sensor.key]), zlabel=r'Flow [l/min]')
plt.savefig(os.path.join(path_to_fig, 'carpet_'+sensor.type+'_'+sensor.key), dpi=100)
if not DEV:
plt.close()
# ### Gas sensors
# In[ ]:
gas_sensors = hp.get_sensors(sensortype=('gas'))
print("{} gas sensors".format(len(gas_sensors)))
# In[ ]:
for sensor in gas_sensors:
ts = sensor.get_data(head=start, tail=end)
if not ts.dropna().empty:
plotting.carpet(ts, title=' - '.join([sensor.device.key, sensor.description, sensor.key]), zlabel=r'Gas consumption [W]')
plt.savefig(os.path.join(path_to_fig, 'carpet_'+sensor.type+'_'+sensor.key), dpi=100)
if not DEV:
plt.close()
# ### Electricity sensors
# In[ ]:
elec_sensors = hp.get_sensors(sensortype=('electricity'))
print("{} electricity sensors".format(len(elec_sensors)))
# In[ ]:
for sensor in elec_sensors:
ts = sensor.get_data(head=start, tail=end)
if not ts.dropna().empty:
plotting.carpet(ts, title=' - '.join([sensor.device.key, sensor.description, sensor.key]), zlabel=r'Power [W]')
plt.savefig(os.path.join(path_to_fig, 'carpet_'+sensor.type+'_'+sensor.key), dpi=100)
if not DEV:
plt.close()
# In[ ]:
| apache-2.0 |
handroissuazo/tensorflow | tensorflow/contrib/factorization/python/ops/gmm.py | 2 | 5711 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering using tf.Learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class GMM(estimator.Estimator):
"""An estimator for GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
config=None):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
config: See Estimator
"""
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
super(GMM, self).__init__(
model_fn=self._model_builder(), model_dir=model_dir, config=config)
def predict_assignments(self, input_fn=None, batch_size=None, outputs=None):
"""See BaseEstimator.predict."""
results = self.predict(input_fn=input_fn,
batch_size=batch_size,
outputs=outputs)
for result in results:
yield result[GMM.ASSIGNMENTS]
def score(self, input_fn=None, batch_size=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
batch_size: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
results = self.evaluate(input_fn=input_fn, batch_size=batch_size,
steps=steps)
return np.sum(results[GMM.SCORES])
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())],
1)
return features
def _model_builder(self):
"""Creates a model function."""
def _model_fn(features, labels, mode):
"""Model function."""
assert labels is None, labels
(all_scores, model_predictions, losses, training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
eval_metric_ops = {
GMM.SCORES: _streaming_sum(loss),
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss, train_op=training_op)
return _model_fn
| apache-2.0 |
LLNL/spack | var/spack/repos/builtin/packages/py-yt/package.py | 5 | 3411 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyYt(PythonPackage):
"""Volumetric Data Analysis
yt is a python package for analyzing and visualizing
volumetric, multi-resolution data from astrophysical
simulations, radio telescopes, and a burgeoning
interdisciplinary community.
"""
homepage = "http://yt-project.org"
url = "https://github.com/yt-project/yt/archive/yt-3.5.0.tar.gz"
git = "https://github.com/yt-project/yt.git"
version("develop", branch="master")
version("develop-4.0", branch="yt-4.0")
version('3.5.1', sha256='cdc0ecb153e737d74820581f311d1be7b6f1a7ee065ad69706470939db88b041')
version('3.5.0', sha256='548598912adba72b782b7422d40d1d12a8c1a6cd064281a9a537fdb2a5af89fc')
version('3.4.1', sha256='b9a73ade3726a8163fc992999c8c1010ca89473131901fe4d48b820ab2ced486')
version('3.4.0', sha256='2120793a76864cf3165b2b7290ef719e358fa57501ee8721941e7cfc434cfb2b')
version('3.3.5', sha256='2ebe4bbefd9f5367563ce4d7eb87d3f6ef0de1f97ed1c03106d9541e71b7e1ca')
version('3.3.4', sha256='2842bab891cfbf3269a3c4bd8f22fef23c9a15a790ba48c6490730cb51ce9b0e')
version('3.3.3', sha256='7b9244089e92b1d32cef791cd72760bb8c80b391eaec29672a5377c33f932d88')
version('3.3.2', sha256='d323419ad3919e86d2af1738c846021fd7f5b5dc5c06059cdf3a2bc63226466a')
version('3.3.1', sha256='7ac68d5e05e2b57fb3635f1027f3201094f3547d584e72ab55fedbfd3bc09a36')
version('3.3.0', sha256='e6be799c0d9a83a06649f0d77a61ad9c23b94b34f94e16724e2b18f5c7513c33')
version('3.2.3', sha256='96476d17e9ce35f0d4380b2ddb398fe729e39f1f3894602ff07e49844541e5ca')
version('3.2.2', sha256='498ed77b3dae8c54929602d4931f3c3e0a3420a9b500cbd870f50b1e0efea8c3')
variant("astropy", default=True, description="enable astropy support")
variant("h5py", default=True, description="enable h5py support")
variant("scipy", default=True, description="enable scipy support")
variant("rockstar", default=False, description="enable rockstar support")
depends_on("py-astropy", type=('build', 'run'), when="+astropy")
depends_on("py-cython", type=('build', 'run'))
depends_on("py-h5py", type=('build', 'run'), when="+h5py")
depends_on("py-ipython", type=('build', 'run'))
depends_on("py-ipython@:6.99", type=('build', 'run'), when="^python@:2.99")
depends_on("py-matplotlib", type=('build', 'run'))
depends_on("py-numpy", type=('build', 'run'))
depends_on("py-scipy", type=('build', 'run'), when="+scipy")
depends_on("py-setuptools", type=('build', 'run'))
depends_on("py-sympy", type=('build', 'run'))
depends_on("rockstar@yt", type=('build', 'run'), when="+rockstar")
depends_on("[email protected]:2.8,3.4:")
@run_before('install')
def prep_yt(self):
if '+rockstar' in self.spec:
with open('rockstar.cfg', 'w') as rockstar_cfg:
rockstar_cfg.write(self.spec['rockstar'].prefix)
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
# The Python interpreter path can be too long for this
# yt = Executable(join_path(prefix.bin, "yt"))
# yt("--help")
python(join_path(self.prefix.bin, "yt"), "--help")
| lgpl-2.1 |
mikebenfield/scikit-learn | sklearn/utils/tests/test_random.py | 85 | 7349 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
ftuyama/TEEG | mindwave/parser.py | 2 | 8227 | import bluetooth
import struct
import time
import pandas as pd
from datetime import datetime
"""
This interface library is designed to be used from very different contexts.
The general idea is that the Mindwave modules in the headset (and other devices)
talk a common binary protocol, which is entirely one-sided from headset to device/
computer, with one exception (explained later). The means of transport however
does vary. The original MindWave headset had 2.4Ghz wireless connection, using a
proprietary USB dongle/receiver. This receiver is mounted as a serial console in
Linux. It also requires extra commands to connect and disconnect.
The MindWave mobile uses bluetooth, which I would recommend over the 2.4Ghz version.
There have been hacks with arduinos hooked up to the Thinkgear AM modules directly.
Not only are the technical means of data transport different, your application needs
one of several possible means of regularly reading the data.
In the EuroPython 2014 talk "Brainwaves for Hackers" I demonstrated a way to do this
in the IPython Notebook, and that only involved a blocking read from a bluetooth socket at
certain intervals. Pygame works the same way.
There are more sophisticated event loops out there, like in Kivy, Gevent or Tornado.
That are the reasons why there is a parser module that can be fed a stream of bytes.
You can add recorders to the parser, which take care of analyzing the parsed data.
There is for example one recorder which converts the parsed data into Pandas
Timeseries. But doing that dozens of times per second is too much work for weak
processors, like in the Raspberry Pi, so there you would probably derive your own
parser.
"""
def queue_to_series(a, freq="s"):
t = pd.date_range(end=datetime.now(), freq=freq, periods=len(a))
return pd.Series(a, index=t)
class ThinkGearParser(object):
def __init__(self, recorders=None):
self.recorders = []
if recorders is not None:
self.recorders += recorders
self.input_data = ""
self.parser = self.parse()
self.parser.next()
def feed(self, data):
for c in data:
self.parser.send(ord(c))
for recorder in self.recorders:
recorder.finish_chunk()
self.input_data += data
def dispatch_data(self, key, value):
for recorder in self.recorders:
recorder.dispatch_data(key, value)
def parse(self):
"""
This generator parses one byte at a time.
"""
times = []
while 1:
byte = yield
if byte == 0xaa:
byte = yield # This byte should be "\aa" too
if byte == 0xaa:
# packet synced by 0xaa 0xaa
packet_length = yield
packet_code = yield
if packet_code == 0xd4:
# standing by
self.state = "standby"
elif packet_code == 0xd0:
self.state = "connected"
elif packet_code == 0xd2:
data_len = yield
headset_id = yield
headset_id += yield
self.dongle_state = "disconnected"
else:
self.sending_data = True
left = packet_length - 2
while left > 0:
if packet_code == 0x80: # raw value
row_length = yield
a = yield
b = yield
value = struct.unpack("<h", chr(b)+chr(a))[0]
self.dispatch_data("raw", value)
left -= 2
elif packet_code == 0x02: # Poor signal
a = yield
left -= 1
elif packet_code == 0x04: # Attention (eSense)
a = yield
if a > 0:
v = struct.unpack("b", chr(a))[0]
if 0 < v <= 100:
self.dispatch_data("attention", v)
left -= 1
elif packet_code == 0x05: # Meditation (eSense)
a = yield
if a > 0:
v = struct.unpack("b", chr(a))[0]
if 0 < v <= 100:
self.dispatch_data("meditation", v)
left -= 1
elif packet_code == 0x16: # Blink Strength
self.current_blink_strength = yield
if self.current_blink_strength > 0:
self.dispatch_data("blink", self.current_blink_strength)
left -= 1
elif packet_code == 0x83:
vlength = yield
self.current_vector = []
for row in range(8):
a = yield
b = yield
c = yield
value = a*255*255+b*255+c
left -= vlength
self.dispatch_data(
"bands", self.current_vector)
packet_code = yield
else:
pass # sync failed
else:
pass # sync failed
class TimeSeriesRecorder:
def __init__(self, file_name=None):
self.meditation = pd.Series()
self.attention = pd.Series()
self.raw = pd.Series()
self.blink = pd.Series()
self.poor_signal = pd.Series()
self.attention_queue = []
self.meditation_queue = []
self.poor_signal_queue = []
self.blink_queue = []
self.raw_queue = []
if file_name is not None:
self.store = pd.HDFStore(file_name)
else:
self.store = None
def dispatch_data(self, key, value):
if key == "attention":
self.attention_queue.append(value)
# Blink and "poor signal" is only sent when a blink or poor signal is detected
# So fake continuous signal as zeros.
self.blink_queue.append(0)
self.poor_signal_queue.append(0)
elif key == "meditation":
self.meditation_queue.append(value)
elif key == "raw":
self.raw_queue.append(value)
elif key == "blink":
self.blink_queue.append(value)
elif key == "poor_signal":
if len(self.poor_signal_queue) > 0:
self.poor_signal_queue[-1] = a
def finish_chunk(self):
""" called periodically to update the timeseries """
self.meditation = pd.concat(
[self.meditation, queue_to_series(self.meditation_queue, freq="s")])
self.attention = pd.concat(
[self.attention, queue_to_series(self.attention_queue, freq="s")])
self.blink = pd.concat(
[self.blink, queue_to_series(self.blink_queue, freq="s")])
self.raw = pd.concat(
[self.raw, queue_to_series(self.raw_queue, freq="1953U")])
self.poor_signal = pd.concat(
[self.poor_signal, queue_to_series(self.poor_signal_queue)])
self.attention_queue = []
self.meditation_queue = []
self.poor_signal_queue = []
self.blink_queue = []
self.raw_queue = []
if self.store is not None:
self.store['attention'] = self.attention
self.store['meditation'] = self.meditation
self.store['raw'] = self.raw
| mit |
Richert/BrainNetworks | BasalGanglia/stn_gpe_2pop_sim.py | 1 | 3349 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pyrates.utility.grid_search import grid_search
from copy import deepcopy
import seaborn as sns
import matplotlib as mpl
linewidth = 1.2
fontsize1 = 10
fontsize2 = 12
markersize1 = 60
markersize2 = 60
dpi = 200
plt.style.reload_library()
plt.style.use('seaborn-whitegrid')
mpl.rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath']
#mpl.rc('text', usetex=True)
mpl.rcParams["font.sans-serif"] = ["Roboto"]
mpl.rcParams["font.size"] = fontsize1
mpl.rcParams["font.weight"] = "bold"
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.titlesize'] = fontsize2
mpl.rcParams['axes.titleweight'] = 'bold'
mpl.rcParams['axes.labelsize'] = 'large'
mpl.rcParams['axes.labelweight'] = 'bold'
mpl.rcParams['xtick.color'] = 'black'
mpl.rcParams['ytick.color'] = 'black'
mpl.rcParams['ytick.alignment'] = 'center'
mpl.rcParams['legend.fontsize'] = fontsize1
# parameter definitions
#######################
# simulation parameters
dt = 1e-4
dts = 1e-1
T = 2050.0
# model parameters
k = 10.0
param_grid = {
'k_ee': [0.4*k],
'k_pe': [5.0*k],
'k_ep': [1.5*k],
'k_pp': [1.0*k],
'eta_e': [12.0],
'eta_p': [2.0],
'delta_e': [2.0],
'delta_p': [10.0],
'tau_e': [13.0],
'tau_p': [19.0],
'tau_ampa_r': [0.8],
'tau_ampa_d': [3.7],
'tau_gabaa_r': [0.5],
'tau_gabaa_d': [5.0],
'tau_stn': [2.0]
}
param_grid = pd.DataFrame.from_dict(param_grid)
param_map = {
'k_ee': {'vars': ['weight'], 'edges': [('stn', 'stn')]},
'k_pe': {'vars': ['weight'], 'edges': [('stn', 'gpe_p')]},
'k_pp': {'vars': ['weight'], 'edges': [('gpe_p', 'gpe_p')]},
'k_ep': {'vars': ['weight'], 'edges': [('gpe_p', 'stn')]},
'eta_e': {'vars': ['stn_syns_op/eta_e'], 'nodes': ['stn']},
'eta_p': {'vars': ['gpe_proto_syns_op/eta_i'], 'nodes': ['gpe_p']},
'delta_e': {'vars': ['stn_syns_op/delta_e'], 'nodes': ['stn']},
'delta_p': {'vars': ['gpe_proto_syns_op/delta_i'], 'nodes': ['gpe_p']},
'tau_e': {'vars': ['stn_syns_op/tau_e'], 'nodes': ['stn']},
'tau_p': {'vars': ['gpe_proto_syns_op/tau_i'], 'nodes': ['gpe_p']},
'tau_ampa_r': {'vars': ['gpe_proto_syns_op/tau_ampa_r', 'stn_syns_op/tau_ampa_r'], 'nodes': ['gpe_p', 'stn']},
'tau_ampa_d': {'vars': ['gpe_proto_syns_op/tau_ampa_d', 'stn_syns_op/tau_ampa_d'], 'nodes': ['gpe_p', 'stn']},
'tau_gabaa_r': {'vars': ['gpe_proto_syns_op/tau_gabaa_r', 'stn_syns_op/tau_gabaa_r'], 'nodes': ['gpe_p', 'stn']},
'tau_gabaa_d': {'vars': ['gpe_proto_syns_op/tau_gabaa_d', 'stn_syns_op/tau_gabaa_d'], 'nodes': ['gpe_p', 'stn']},
'tau_stn': {'vars': ['stn_syns_op/tau_gabaa'], 'nodes': ['stn']}
}
# simulations
#############
results, result_map = grid_search(
circuit_template="config/stn_gpe/stn_gpe_2pop",
param_grid=param_grid,
param_map=param_map,
simulation_time=T,
step_size=dt,
permute=True,
sampling_step_size=dts,
inputs={
#'stn/stn_op/ctx': ctx,
#'str/str_dummy_op/I': stria
},
outputs={'r_e': 'stn/stn_syns_op/R_e', 'r_p': 'gpe_p/gpe_proto_syns_op/R_i'},
init_kwargs={
'backend': 'numpy', 'solver': 'scipy', 'step_size': dt},
method='RK45'
)
results = results*1e3
results.plot()
plt.show()
| apache-2.0 |
thomasdouenne/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/examples/transports/compute_compare/compute_ticpe_cas_type.py | 4 | 1812 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 11:30:44 2015
@author: thomas.douenne
"""
import datetime
import pandas as pd
from openfisca_france_indirect_taxation.tests import base
from openfisca_france_indirect_taxation.examples.utils_example import graph_builder_line, save_dataframe_to_graph
"""Nous simulons les montants de ticpe payés par un ménage selon le type de véhicule dont il dispose
Nous prenons un ménage dont les dépenses annuelles en carburants s'élèveraient à 1000 euros
C'est en dessous de la moyenne de nos samples (plutôt autour de 1500)"""
index = range(2000, 2014)
columns = ['si une essence et une diesel', 'si seulement vehicules diesel', 'si seulement vehicules essence']
depenses_ticpe_pour_1000_euros_carbu = pd.DataFrame(index = index, columns = columns)
for element in columns:
if element == 'si seulement vehicules essence':
dies = 0
else:
dies = 1
if element == 'si seulement vehicules diesel':
ess = 0
else:
ess = 1
for year in range(2000, 2014):
year = year
simulation = base.tax_benefit_system.new_scenario().init_single_entity(
period = year,
personne_de_reference = dict(
birth = datetime.date(year - 40, 1, 1),
),
menage = dict(
depenses_carburants = 1000,
veh_essence = ess,
veh_diesel = dies,
),
).new_simulation(debug = True)
depenses_ticpe_pour_1000_euros_carbu.loc[depenses_ticpe_pour_1000_euros_carbu.index == year, element] = \
simulation.calculate('ticpe_totale')
graph_builder_line(depenses_ticpe_pour_1000_euros_carbu)
save_dataframe_to_graph(depenses_ticpe_pour_1000_euros_carbu, 'cas_type_1000€_carbu.csv')
| agpl-3.0 |
Og192/Python | sklearnLearning/statisticalAndSupervisedLearning/adaboost.py | 2 | 2852 | print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show() | gpl-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/scipy/stats/_binned_statistic.py | 4 | 25912 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import callable, xrange
from scipy._lib._numpy_compat import suppress_warnings
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),
array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),
array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, density=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])
>>> ret.statistic
array([[ 2., 1.],
[ 1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std','min','max']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if(statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
smin = np.zeros(Ndim)
smax = np.zeros(Ndim)
for i in xrange(Ndim):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in xrange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in xrange(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into, in each dimension
sampBin = [
np.digitize(sample[:, i], edges[i])
for i in xrange(Ndim)
]
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in xrange(Ndim):
# Find the rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
binnumbers = np.ravel_multi_index(sampBin, nbin)
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
(flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.median(values[vv, binnumbers == i])
elif statistic == 'min':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.min(values[vv, binnumbers == i])
elif statistic == 'max':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.max(values[vv, binnumbers == i])
elif callable(statistic):
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
null = statistic([])
except:
null = np.nan
result.fill(null)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = statistic(values[vv, binnumbers == i])
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, nbin))
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = [slice(None)] + Ndim * [slice(1, -1)]
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if(expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`reulst`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
| gpl-3.0 |
saihttam/kaggle-axa | SPRegressionDriver.py | 1 | 5885 | import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
# from sklearn.preprocessing import PolynomialFeatures, MinMaxScaler
# from sklearn.decomposition import PCA
# from sklearn.pipeline import Pipeline
from random import sample, seed
from sklearn.preprocessing import StandardScaler
from SparseFilter import SparseFilter
class RegressionDriver(object):
"""Class for Regression-based analysis of Driver traces"""
def __init__(self, driver, datadict, numberofrows=200):
"""Initialize by providing a (positive) driver example and a dictionary of (negative) driver references."""
seed(42)
self.driver = driver
self.numfeatures = self.driver.num_features
self.numrawfeatures = self.driver.num_rawfeatures
featurelist = []
rawfeaturelist = []
self.__clf = GradientBoostingRegressor(n_estimators=200)
self.__indexlist = []
for trace in self.driver.traces:
self.__indexlist.append(trace.identifier)
featurelist.append(trace.features)
temp = trace.rawfeatures
for i in range(len(trace.rawfeatures), self.numrawfeatures):
temp.append(-1e7)
rawfeaturelist.append(temp)
# Initialize train and test np arrays
self.__traindata = np.asarray(featurelist)
self.__testdata = np.asarray(featurelist)
self.__rawtraindata = np.asarray(rawfeaturelist)
self.__rawtestdata = np.asarray(rawfeaturelist)
self.__trainlabels = np.ones((self.__traindata.shape[0],))
data = np.empty((0, self.numfeatures), float)
rawdata = np.empty((0, self.numrawfeatures), float)
# print rawdata.shape
# print data.shape
setkeys = datadict.keys()
if driver.identifier in setkeys:
setkeys.remove(driver.identifier)
else:
setkeys = sample(setkeys, len(setkeys) - 1)
for key in setkeys:
if key != driver.identifier:
rand_smpl = [datadict[key]['feat'][i] for i in sorted(sample(xrange(len(datadict[key]['feat'])), numberofrows))]
data = np.append(data, np.asarray(rand_smpl), axis=0)
raw_rand_smpl = [datadict[key]['raw'][i] for i in sorted(sample(xrange(len(datadict[key]['raw'])), numberofrows))]
temp = np.asarray(raw_rand_smpl)
if temp.shape[1] < self.numrawfeatures:
z = np.zeros((temp.shape[0], self.numrawfeatures - temp.shape[1]), dtype=temp.dtype)
temp = np.concatenate((temp, z), axis=1)
rawdata = np.append(rawdata, temp, axis=0)
else:
# print temp.shape
ix = range(temp.shape[0])
iy = range(self.numrawfeatures)
# print self.numrawfeatures
newtemp = temp[:, :self.numrawfeatures]
# print newtemp.shape
# print rawdata.shape
rawdata = np.append(rawdata, newtemp, axis=0)
# print rawdata.shape
# print self.__rawtraindata.shape
self.__rawtraindata = np.append(self.__rawtraindata, rawdata, axis=0)
self.__traindata = np.append(self.__traindata, data, axis=0)
self.__trainlabels = np.append(self.__trainlabels, np.zeros((data.shape[0],)), axis=0)
self.__y = np.ones((self.__testdata.shape[0],))
def classify(self):
"""Perform classification"""
train_X = np.asarray(self.__rawtraindata)
train_y = np.asarray(self.__trainlabels)
test_X = np.asarray(self.__rawtestdata)
train_feat_X = np.asarray(self.__traindata)
test_feat_X = np.asarray(self.__testdata)
# print train_feat_X.shape
# print test_feat_X.shape
scaler = StandardScaler().fit(np.r_[train_X, test_X])
train_X = scaler.transform(train_X)
test_X = scaler.transform(test_X)
## train a sparse filter on both train and test data
sf = SparseFilter(n_features=20, n_iterations=1000)
sf.fit(np.r_[train_X, test_X])
train_sf_X = sf.transform(train_X)
test_sf_X = sf.transform(test_X)
print train_sf_X
print test_sf_X
ss = StandardScaler()
train_combined_X = ss.fit_transform(np.c_[train_sf_X, train_feat_X])
test_combined_X = ss.transform(np.c_[test_sf_X, test_feat_X])
self.__clf.fit(train_combined_X, train_y.ravel())
self.__y = self.__clf.predict(test_combined_X)
feature_importance = self.__clf.feature_importances_
feature_importance = 100.0 * (feature_importance / feature_importance.max())
print feature_importance
def toKaggle(self):
"""Return string in Kaggle submission format"""
returnstring = ""
for i in xrange(len(self.__indexlist) - 1):
returnstring += "%d_%d,%.6f\n" % (self.driver.identifier, self.__indexlist[i], self.__y[i])
returnstring += "%d_%d,%.6f" % (self.driver.identifier, self.__indexlist[len(self.__indexlist)-1], self.__y[len(self.__indexlist)-1])
return returnstring
def validate(self, datadict):
from sklearn.metrics import roc_auc_score
testdata = np.empty((0, self.numfeatures), float)
y_true = np.empty((0,), float)
for key in datadict.keys():
currenttestdata = np.asarray(datadict[key])
testdata = np.append(testdata, currenttestdata, axis=0)
if key != self.driver.identifier:
y_true = np.append(y_true, np.zeros((currenttestdata.shape[0],)), axis=0)
else:
y_true = np.append(y_true, np.ones((currenttestdata.shape[0],)), axis=0)
y_score = self.__clf.predict(testdata)
result = roc_auc_score(y_true, y_score)
return result
| bsd-2-clause |
spookylukey/pandas-highcharts | pandas_highcharts/core.py | 1 | 7572 | # -*- coding: utf-8 -*-
import pandas
import copy
_pd2hc_kind = {
"bar": "column",
"barh": "bar",
"area": "area",
"line": "line",
"pie": "pie"
}
def pd2hc_kind(kind):
if kind not in _pd2hc_kind:
raise ValueError("%(kind)s plots are not yet supported" % locals())
return _pd2hc_kind[kind]
_pd2hc_linestyle = {
"-": "Solid",
"--": "Dash",
"-.": "DashDot",
":": "Dot"
}
def pd2hc_linestyle(linestyle):
if linestyle not in _pd2hc_linestyle:
raise ValueError("%(linestyle)s linestyles are not yet supported" % locals())
return _pd2hc_linestyle[linestyle]
def json_encode(obj):
return pandas.io.json.dumps(obj)
def serialize(df, output_type="javascript", chart_type="default", *args, **kwargs):
def serialize_chart(df, output, *args, **kwargs):
output["chart"] = {}
if 'render_to' in kwargs:
output['chart']['renderTo'] = kwargs['render_to']
if "figsize" in kwargs:
output["chart"]["width"] = kwargs["figsize"][0]
output["chart"]["height"] = kwargs["figsize"][1]
if "kind" in kwargs:
output["chart"]["type"] = pd2hc_kind(kwargs["kind"])
if kwargs.get('polar'):
output['chart']['polar'] = True
def serialize_colors(df, output, *args, **kwargs):
pass
def serialize_credits(df, output, *args, **kwargs):
pass
def serialize_data(df, output, *args, **kwargs):
pass
def serialize_drilldown(df, output, *args, **kwargs):
pass
def serialize_exporting(df, output, *args, **kwargs):
pass
def serialize_labels(df, output, *args, **kwargs):
pass
def serialize_legend(df, output, *args, **kwargs):
output["legend"] = {
"enabled": kwargs.get("legend", True)
}
def serialize_loading(df, output, *args, **kwargs):
pass
def serialize_navigation(df, output, *args, **kwargs):
pass
def serialize_noData(df, output, *args, **kwargs):
pass
def serialize_pane(df, output, *args, **kwargs):
pass
def serialize_plotOptions(df, output, *args, **kwargs):
pass
def serialize_series(df, output, *args, **kwargs):
def is_secondary(c, **kwargs):
return c in kwargs.get("secondary_y", [])
if kwargs.get('sort_columns'):
df = df.sort_index()
series = df.to_dict('series')
output["series"] = []
for name, data in series.items():
if df[name].dtype.kind in "biufc":
sec = is_secondary(name, **kwargs)
d = {
"name": name if not sec or not kwargs.get("mark_right", True) else name + " (right)",
"yAxis": int(sec),
"data": list(zip(df.index, data.tolist()))
}
if kwargs.get('polar'):
d['data'] = [v for k, v in d['data']]
if kwargs.get("kind") == "area" and kwargs.get("stacked", True):
d["stacking"] = 'normal'
if kwargs.get("style"):
d["dashStyle"] = pd2hc_linestyle(kwargs["style"].get(name, "-"))
output["series"].append(d)
def serialize_subtitle(df, output, *args, **kwargs):
pass
def serialize_title(df, output, *args, **kwargs):
if "title" in kwargs:
output["title"] = {"text": kwargs["title"]}
def serialize_tooltip(df, output, *args, **kwargs):
if 'tooltip' in kwargs:
output['tooltip'] = kwargs['tooltip']
def serialize_xAxis(df, output, *args, **kwargs):
output["xAxis"] = {}
if df.index.name:
output["xAxis"]["title"] = {"text": df.index.name}
if df.index.dtype.kind in "M":
output["xAxis"]["type"] = "datetime"
if df.index.dtype.kind == 'O':
output['xAxis']['categories'] = sorted(list(df.index)) if kwargs.get('sort_columns') else list(df.index)
if kwargs.get("grid"):
output["xAxis"]["gridLineWidth"] = 1
output["xAxis"]["gridLineDashStyle"] = "Dot"
if kwargs.get("loglog") or kwargs.get("logx"):
output["xAxis"]["type"] = 'logarithmic'
if "xlim" in kwargs:
output["xAxis"]["min"] = kwargs["xlim"][0]
output["xAxis"]["max"] = kwargs["xlim"][1]
if "rot" in kwargs:
output["xAxis"]["labels"] = {"rotation": kwargs["rot"]}
if "fontsize" in kwargs:
output["xAxis"].setdefault("labels", {})["style"] = {"fontSize": kwargs["fontsize"]}
if "xticks" in kwargs:
output["xAxis"]["tickPositions"] = kwargs["xticks"]
def serialize_yAxis(df, output, *args, **kwargs):
yAxis = {}
if kwargs.get("grid"):
yAxis["gridLineWidth"] = 1
yAxis["gridLineDashStyle"] = "Dot"
if kwargs.get("loglog") or kwargs.get("logy"):
yAxis["type"] = 'logarithmic'
if "ylim" in kwargs:
yAxis["min"] = kwargs["ylim"][0]
yAxis["max"] = kwargs["ylim"][1]
if "rot" in kwargs:
yAxis["labels"] = {"rotation": kwargs["rot"]}
if "fontsize" in kwargs:
yAxis.setdefault("labels", {})["style"] = {"fontSize": kwargs["fontsize"]}
if "yticks" in kwargs:
yAxis["tickPositions"] = kwargs["yticks"]
output["yAxis"] = [yAxis]
if kwargs.get("secondary_y"):
yAxis2 = copy.deepcopy(yAxis)
yAxis2["opposite"] = True
output["yAxis"].append(yAxis2)
def serialize_zoom(df, output, *args, **kwargs):
if "zoom" in kwargs:
if kwargs["zoom"] not in ("x", "y", "xy"):
raise ValueError("zoom must be in ('x', 'y', 'xy')")
output["chart"]["zoomType"] = kwargs["zoom"]
output = {}
df_copy = copy.deepcopy(df)
if "x" in kwargs:
df_copy.index = df_copy.pop(kwargs["x"])
if kwargs.get("use_index", True) is False:
df_copy = df_copy.reset_index()
if "y" in kwargs:
df_copy = pandas.DataFrame(df_copy, columns=kwargs["y"])
serialize_chart(df_copy, output, *args, **kwargs)
serialize_colors(df_copy, output, *args, **kwargs)
serialize_credits(df_copy, output, *args, **kwargs)
serialize_data(df_copy, output, *args, **kwargs)
serialize_drilldown(df_copy, output, *args, **kwargs)
serialize_exporting(df_copy, output, *args, **kwargs)
serialize_labels(df_copy, output, *args, **kwargs)
serialize_legend(df_copy, output, *args, **kwargs)
serialize_loading(df_copy, output, *args, **kwargs)
serialize_navigation(df_copy, output, *args, **kwargs)
serialize_noData(df_copy, output, *args, **kwargs)
serialize_pane(df_copy, output, *args, **kwargs)
serialize_plotOptions(df_copy, output, *args, **kwargs)
serialize_series(df_copy, output, *args, **kwargs)
serialize_subtitle(df_copy, output, *args, **kwargs)
serialize_title(df_copy, output, *args, **kwargs)
serialize_tooltip(df_copy, output, *args, **kwargs)
serialize_xAxis(df_copy, output, *args, **kwargs)
serialize_yAxis(df_copy, output, *args, **kwargs)
serialize_zoom(df_copy, output, *args, **kwargs)
if output_type == "dict":
return output
if output_type == "json":
return json_encode(output)
if chart_type == "stock":
return "new Highcharts.StockChart(%s);" % json_encode(output)
return "new Highcharts.Chart(%s);" % json_encode(output)
| mit |
YinongLong/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 38 | 6118 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_pickle(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
yield self.check_pickle, metric, kwargs
for metric in self.bool_metrics:
yield self.check_pickle_bool, metric
def check_pickle_bool(self, metric):
dm = DistanceMetric.get_metric(metric)
D1 = dm.pairwise(self.X1_bool)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1_bool)
assert_array_almost_equal(D1, D2)
def check_pickle(self, metric, kwargs):
dm = DistanceMetric.get_metric(metric, **kwargs)
D1 = dm.pairwise(self.X1)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1)
assert_array_almost_equal(D1, D2)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
ithemal/Ithemal | learning/pytorch/loss_reports/plot.py | 1 | 11880 | #!/usr/bin/env python3
from matplotlib import pyplot as plt
from typing import List, NamedTuple, Union, Optional, Tuple
import argparse
import numpy as np
import os
import re
import scipy.ndimage.filters
import subprocess
import time
import matplotlib
matplotlib.rcParams.update({'font.size': 20})
TrainMeasurement = NamedTuple('TrainMeasurement', [
('experiment_name', str),
('epochs', List[int]),
('times', List[float]),
('losses', List[float]),
('trainers', List[int]),
])
TestMeasurement = NamedTuple('TestMeasurement', [
('experiment_name', str),
('times', List[float]),
('losses', List[float]),
])
_DIRNAME = os.path.abspath(os.path.dirname(__file__))
_TRAIN = 'Train'
_TEST = 'Test'
def plot_measurements(train_measurements, test_measurements, has_finished, train_blur, test_blur, plot_trainers, raw_x, save, norm_epoch, min_y, max_y, validation):
# type: (List[TrainMeasurement], List[TestMeasurement], List[bool], float, float, bool, bool, Optional[str], bool) -> None
def get_times_and_losses(measurement, blur):
# type: (Union[TrainMeasurement, TestMeasurement], float) -> Tuple[np.array, np.array]
times = np.array(measurement.times) / 3600
if blur > 0:
losses = scipy.ndimage.filters.gaussian_filter1d(measurement.losses, blur)
else:
losses = measurement.losses
if raw_x:
return np.arange(len(losses)), losses
else:
return times, losses
plt.title('Loss over time')
fig = plt.figure(1, figsize=(12.8, 9.6), dpi=100)
loss_ax = fig.gca()
if plot_trainers:
trainer_ax = loss_ax.twinx()
trainer_ax.set_ylim([1, 6])
trainer_ax.set_ylabel('Number of running trainers')
else:
trainer_ax = None
if norm_epoch:
loss_ax.set_xlabel('Epochs')
else:
loss_ax.set_xlabel('Time in hours')
loss_ax.set_ylim([min_y, max_y])
loss_ax.set_ylabel('Loss')
for idx, (train_measurement, test_measurement, finished) in enumerate(zip(train_measurements, test_measurements, has_finished)):
color = 'C{}'.format(idx)
name = test_measurement.experiment_name
train_times, train_losses = get_times_and_losses(train_measurement, train_blur)
test_times, test_losses = get_times_and_losses(test_measurement, test_blur)
ep_advance = np.where(np.diff(train_measurement.epochs))[0] + 1
new_test_times = np.empty_like(test_times)
max_tr = train_times.max()
if norm_epoch:
prev = 0
prev_x = 0
for k, idx in enumerate(ep_advance):
x = train_times[idx]
idxs = (test_times >= prev_x) & (test_times < x)
old_tests = test_times[idxs]
new_test_times[idxs] = (old_tests - prev_x) / (x - prev_x) + k
train_times[prev:idx] = np.linspace(k, k+1, idx - prev)
prev = idx
prev_x = x
idxs = (test_times >= prev_x)
old_tests = test_times[idxs]
new_test_times[idxs] = (old_tests - prev_x) / (max_tr - prev_x) + len(ep_advance)
train_times[prev:] = np.linspace(len(ep_advance), len(ep_advance)+1, len(train_times) - prev)
test_times = new_test_times
else:
for idx in ep_advance:
x = train_times[idx]
y = train_losses[idx]
loss_ax.plot([x,x], [y - 0.005, y + 0.005], color=color)
loss_ax.plot(train_times, train_losses, label='{} train loss'.format(name), color=color)
if len(test_times) > 0:
loss_ax.plot(test_times, test_losses, linestyle='--', label='{} {} loss'.format(name, 'validation' if validation else 'test'), color=color)
if finished: # or True:
loss_ax.scatter(train_times[-1:], train_losses[-1:], marker='x', color=color)
if trainer_ax is not None:
trainer_ax.plot(train_times, train_measurement.trainers, label='{} trainers'.format(name), color=color)
loss_ax.legend()
if save:
plt.savefig(save)
else:
plt.show()
def synchronize_experiment_files(experiment_name):
# type: (str) -> Tuple[str, List[str], List[bool]]
match = re.match(r'^(?P<experiment_name>.*?)(:?\+(?P<time_count>\d+))?$', experiment_name)
if match is None:
raise ValueError('Unrecognized format: {}'.format(experiment_name))
experiment_name = match.group('experiment_name')
if match.group('time_count'):
time_count = max(int(match.group('time_count')), 1)
else:
time_count = 1
try:
output = subprocess.check_output(['aws', 's3', 'ls', 's3://ithemal-experiments/{}/'.format(experiment_name)]).strip()
except subprocess.CalledProcessError:
raise ValueError('Unknown experiment {}'.format(experiment_name))
if isinstance(output, bytes):
output = output.decode('utf8') # type: ignore
splits = [line.strip().split() for line in output.split('\n')]
times = [split[1][:-1] for split in splits if split[0] == 'PRE']
experiment_times = sorted(times)[-time_count:]
has_finished = [] # type: List[bool]
for experiment_time in experiment_times:
subprocess.check_call(['aws', 's3', 'sync', 's3://ithemal-experiments/{}/{}'.format(experiment_name, experiment_time),
os.path.join(_DIRNAME, 'data', experiment_name, experiment_time),
'--exclude', '*', '--include', 'loss_report.log'])
subprocess.check_call(['aws', 's3', 'sync', 's3://ithemal-experiments/{}/{}/checkpoint_reports'.format(experiment_name, experiment_time),
os.path.join(_DIRNAME, 'data', experiment_name, experiment_time, 'checkpoint_reports')])
has_validation_results_code = subprocess.call(
['aws', 's3', 'ls', 's3://ithemal-experiments/{}/{}/validation_results.txt'.format(experiment_name, experiment_time)],
stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'),
)
has_finished.append(has_validation_results_code == 0)
return experiment_name, experiment_times, has_finished
def extract_train_measurement(experiment_name, user_provided_name, experiment_time):
# type: (str, str) -> TrainMeasurement
fname = os.path.join(_DIRNAME, 'data', experiment_name, experiment_time, 'loss_report.log')
epochs = []
times = []
losses = []
trainers = []
with open(fname) as f:
for line in f.readlines():
split = line.split()
epochs.append(int(split[0]))
times.append(float(split[1]))
losses.append(float(split[2]))
trainers.append(int(split[3]))
return TrainMeasurement(
user_provided_name,
np.array(epochs),
np.array(times),
np.array(losses),
np.array(trainers),
)
def extract_test_measurement(experiment_name, user_provided_name, experiment_time):
# type: (str, str) -> TestMeasurement
checkpoint_fname_pat = re.compile('(?P<time>\d+\.\d+).report')
times = []
losses = []
checkpoint_reports_dir = os.path.join(_DIRNAME, 'data', experiment_name, experiment_time, 'checkpoint_reports')
for checkpoint_report in os.listdir(checkpoint_reports_dir):
checkpoint_report = os.path.basename(checkpoint_report)
match = checkpoint_fname_pat.search(checkpoint_report)
if not match:
raise ValueError('Invalid checkpoint report name {} (in {}/{})'.format(checkpoint_report, experiment_name, experiment_time))
elapsed_time = float(match.group('time'))
with open(os.path.join(checkpoint_reports_dir, checkpoint_report)) as f:
line = f.readlines()[-1]
loss = float(line[1:line.index(']')])
times.append(elapsed_time)
losses.append(loss)
times = np.array(times)
losses = np.array(losses)
sorted_idxs = np.argsort(times)
times = times[sorted_idxs]
losses = losses[sorted_idxs]
return TestMeasurement(user_provided_name, times, losses)
def get_measurements(experiments, names):
# type: (List[str], List[str]) -> Tuple[List[TrainMeasurement], List[TestMeasurement], List[bool]]
train_measurements = [] # type: List[TrainMeasurement]
test_measurements = [] # type: List[TestMeasurement]
has_finished = [] # type: List[bool]
if not names:
names = experiments
assert len(names) == len(experiments)
for experiment_name, user_name in zip(experiments, names):
name, experiment_times, finished = synchronize_experiment_files(experiment_name)
has_finished.extend(finished)
for experiment_time in experiment_times:
train_measurements.append(extract_train_measurement(name, user_name, experiment_time))
test_measurements.append(extract_test_measurement(name, user_name, experiment_time))
return train_measurements, test_measurements, has_finished
def main():
# type: () -> None
parser = argparse.ArgumentParser()
parser.add_argument('--train-blur', type=float, default=25)
parser.add_argument('--test-blur', type=float, default=0.5)
parser.add_argument('--min-y', type=float, default=0.0)
parser.add_argument('--max-y', type=float, default=0.4)
parser.add_argument('experiments', nargs='+')
parser.add_argument('--names', nargs='+')
parser.add_argument('--trainers', default=False, action='store_true')
parser.add_argument('--no-test', default=False, action='store_true')
parser.add_argument('--raw-x', default=False, action='store_true')
parser.add_argument('--sort', default=False, action='store_true')
parser.add_argument('--validation', default=False, action='store_true')
parser.add_argument('--norm-epoch', default=False, action='store_true')
parser.add_argument('--shortest-trainer', default=False, action='store_true')
parser.add_argument('--save')
args = parser.parse_args()
train_measurements, test_measurements, has_finished = get_measurements(args.experiments, args.names)
if args.no_test:
test_measurements = list(TestMeasurement(m.experiment_name, [], []) for m in test_measurements)
if args.sort:
idxs = np.argsort([-np.mean(m.losses[len(m.losses)//2:]) for m in train_measurements])
train_measurements = [train_measurements[i] for i in idxs]
test_measurements = [test_measurements[i] for i in idxs]
has_finished = [has_finished[i] for i in idxs]
if args.shortest_trainer:
shortest_epoch = min(measurement.epochs[-1] for measurement in train_measurements)
for tridx, (tr, te) in enumerate(zip(train_measurements, test_measurements)):
try:
cut_idx = next(i for (i, e) in enumerate(tr.epochs) if e > shortest_epoch)
except StopIteration:
continue
train_measurements[tridx] = TrainMeasurement(
tr.experiment_name,
tr.epochs[:cut_idx],
tr.times[:cut_idx],
tr.losses[:cut_idx],
tr.trainers[:cut_idx],
)
cut_time = train_measurements[tridx].times[-1]
try:
cut_idx = next(i for (i, t) in enumerate(te.times) if t > cut_time)
except StopIteration:
continue
test_measurements[tridx] = TestMeasurement(
te.experiment_name,
te.times[:cut_idx],
te.losses[:cut_idx],
)
plot_measurements(train_measurements, test_measurements, has_finished, args.train_blur, args.test_blur, args.trainers, args.raw_x, args.save, args.norm_epoch, args.min_y, args.max_y, args.validation)
if __name__ == '__main__':
main()
| mit |
TiedNets/TiedNets | plot_sim_steps.py | 1 | 3566 | __author__ = 'Agostino Sturaro'
import os
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import shared_functions as sf
from PyPDF2 import PdfFileMerger
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser # ver. < 3.0
config = ConfigParser()
conf_path = os.path.normpath(
'C:/Users/sturaroa/Documents/Simulations/exp_1000n_test_2/rnd_atk/realistic/instance_0/run_0.ini')
config.read(conf_path)
# copypaste
area_size = 5
margin = area_size * 0.02
dist_perc = 0.16
base_graphs_dir = os.path.normpath(config.get('paths', 'netw_dir'))
step_graphs_dir = os.path.normpath(config.get('paths', 'results_dir'))
netw_a_fname = config.get('paths', 'netw_a_fname')
netw_b_fname = config.get('paths', 'netw_b_fname')
netw_inter_fname = config.get('paths', 'netw_inter_fname')
steps_index_fname = config.get('paths', 'run_stats_fname')
times = []
# opened in text-mode; all EOLs are converted to '\n'
steps_index = os.path.normpath(os.path.join(step_graphs_dir, steps_index_fname))
# open file skipping the first line, then read values by column
my_data = np.genfromtxt(steps_index, delimiter='\t', skip_header=1, dtype=None)
times = sf.get_unnamed_numpy_col(my_data, 0)
# read base graphs
original_A = nx.read_graphml(os.path.join(base_graphs_dir, netw_a_fname))
original_B = nx.read_graphml(os.path.join(base_graphs_dir, netw_b_fname))
original_I = nx.read_graphml(os.path.join(base_graphs_dir, netw_inter_fname))
# map used to separate nodes of the 2 networks (e.g. draw A nodes on the left side and B nodes on the right)
pos_shifts_by_netw = {original_A.graph['name']: {'x': 0, 'y': 0},
original_B.graph['name']: {'x': area_size + area_size * dist_perc, 'y': 0}}
print('times ' + str(times)) # debug
# draw graphs for eachs step
pdf_fpaths = []
for time in times:
print('time ' + str(time))
# copypaste
plt.figure(figsize=(15 + 1.6, 10))
plt.xlim(-margin, area_size * 2 + area_size * dist_perc + margin)
plt.ylim(-margin, area_size + margin)
A = nx.read_graphml(os.path.join(step_graphs_dir, str(time) + '_' + netw_a_fname))
sf.paint_netw_graph(A, original_A, {'power': 'r', 'generator': 'r', 'transmission_substation': 'plum',
'distribution_substation': 'magenta'}, 'r')
B = nx.read_graphml(os.path.join(step_graphs_dir, str(time) + '_' + netw_b_fname))
sf.paint_netw_graph(B, original_B, {'communication': 'b', 'controller': 'c', 'relay': 'b'}, 'b',
pos_shifts_by_netw[B.graph['name']])
I = nx.read_graphml(os.path.join(step_graphs_dir, str(time) + '_' + netw_inter_fname))
edge_col_per_type = {'power': 'r', 'generator': 'r', 'transmission_substation': 'plum',
'distribution_substation': 'magenta', 'communication': 'b', 'controller': 'c', 'relay': 'b'}
# sf.paint_inter_graph(I, original_I, 'orange', pos_shifts_by_netw, edge_col_per_type)
pdf_fpaths.append(os.path.join(step_graphs_dir, str(time) + '_full.pdf'))
plt.savefig(pdf_fpaths[-1]) # get last element in list
# plt.show()
plt.close() # free memory
# merge produced pdf files
merger = PdfFileMerger()
merger.append(file(os.path.join(base_graphs_dir, '_full.pdf'), 'rb'))
for fpath in pdf_fpaths:
merger.append(file(fpath, 'rb')) # best way to avoid keeping files open
merger.write(os.path.join(step_graphs_dir, '_merge.pdf'))
merger.close() # free memory
# remove partial files
for fpath in pdf_fpaths:
os.remove(fpath)
| gpl-3.0 |
glennq/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
raysanders/astrophysics | chartcsv.py | 1 | 1325 | #chartcsv.py
#plots graphs from time series or folded phase data
#see test.csv for format of data file
#Ray Sanders - [email protected]
#
#importing the required libraries
# coding: utf-8
import matplotlib
import pylab
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# Read data from a CSV file. Click here to download.
# IMPORTANT!!!! THE FILE NEEDS TO BE IN X,Y FORMAT!!!
r = mlab.csv2rec('./test.csv')
# Create a figure with size in inches.
# format is width, height
fig = Figure(figsize=(8,4))
# Create a canvas and add the figure to it.
canvas = FigureCanvas(fig)
# Create a subplot.
ax = fig.add_subplot(111)
# Set the title.
#ax.set_title('Test',fontsize=14)
# Set the X Axis label.
ax.set_xlabel('X',fontsize=12)
# Set the Y Axis label.
ax.set_ylabel('Y',fontsize=12)
# Display Grid.
#ax.grid(True,linestyle='-',color='0.75')
# Generate the Scatter Plot.
ax.scatter(r.x,r.y, c='black', marker='o',s=1);
# this sets limits on y axis if needed
ax.set_ylim(5.0, 8.5)
# this inverts the y axis
ax.set_ylim(ax.get_ylim()[::-1])
#
# set x limit if needed
ax.set_xlim(0.0, 2.0)
#
plt.show()
# Save the generated Scatter Plot to a PNG file.
canvas.print_figure('test.png',dpi=300)
| mit |
xwolf12/scikit-learn | sklearn/ensemble/gradient_boosting.py | 126 | 65552 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
tomlof/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 73 | 1854 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
zfrenchee/pandas | pandas/io/formats/common.py | 16 | 1094 | # -*- coding: utf-8 -*-
"""
Common helper methods used in different submodules of pandas.io.formats
"""
def get_level_lengths(levels, sentinel=''):
"""For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
----------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True for x in levels[0]]
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result
| bsd-3-clause |
RexValkering/socialforcemodel | research_files/continuous_test.py | 1 | 9950 | import socialforcemodel as sfm
import numpy as np
import matplotlib.pyplot as plt
import os
import csv
import psutil
from pympler import asizeof, tracker
np.seterr(all='raise')
try:
import progressbar
except ImportError, e:
print "Progressbar package not found. Please run 'pip install progressbar'"
exit()
def sensor(world, position, sensor_range):
peds = world.quadtree.get_pedestrians_in_range(position, sensor_range)
actual_peds = set()
range_squared = sensor_range**2
for p in peds:
if ((p.position[0] - position[0])**2 +
(p.position[1] - position[1])**2) <= range_squared:
actual_peds.add(p)
results = {}
results['count'] = len(actual_peds)
if len(actual_peds):
average_speed = 0.0
for p in actual_peds:
average_speed += p.speed
results['average_speed'] = average_speed / len(actual_peds)
else:
results['average_speed'] = 0.0
return results
def sensor_far(world):
return sensor(world, [14.0, 5.0], 2.0)
def sensor_near(world):
return sensor(world, [8.0, 5.0], 2.0)
def plot(item, measurements, fig, subplot=111):
ax = fig.add_subplot(subplot)
ax.scatter(range(len(measurements)), measurements)
ax.set_title('average ' + item[2])
def main(args):
barrier_start = 50.0
barrier_points = [[50.0, 1.0], [50.0, 4.0]]
barrier_time = args.barriertime
mean, theta, sigma = 0.0, 0.05, 0.005
measurements = []
for r in range(args.repetitions):
barrier_state = 0
if os.path.exists("hddm/{}_pedestrians_{}.csv".format(args.outfile, r)):
print "Already done, continue..."
continue
with open("hddm/{}_pedestrians_{}.csv".format(args.outfile, r), "w") as ped_outfile:
ped_writer = csv.writer(ped_outfile)
ped_writer.writerow(['p', 'mass', 'radius', 'desired_velocity', 'maximum_velocity'])
with open("hddm/{}_measurements_{}.csv".format(args.outfile, r), "w") as csv_outfile:
csv_writer = csv.writer(csv_outfile)
csv_writer.writerow(['t', 'p', 'pos_x', 'pos_y', 'vel_x', 'vel_y', 'speed', 'local_density', 'local_velocity_variance'])
all_pedestrians = set()
if not os.path.exists("img"):
os.makedirs("img")
if not os.path.exists("img/" + args.outfile):
os.makedirs("img/" + args.outfile)
if not os.path.exists("measurements"):
os.makedirs("measurements")
measurements.append({
't': [],
'count_near': [],
'count_far': [],
'speed_near': [],
'speed_far': []
})
loader = sfm.ParameterLoader(args.file)
world = loader.world
if args.pedestrian_file != '':
with open(args.pedestrian_file) as infile:
import pickle
data = pickle.load(infile)
# exit()
for p in data:
ped = sfm.Pedestrian(group=world.groups[0],
radius=p['radius'],
mass=p['mass'],
desired_velocity=p['desired_velocity'],
maximum_velocity=p['maximum_velocity'],
relaxation_time=p['relaxation_time'],
target_path=p['target_path'],
start=p['position'])
ped.velocity = p['velocity']
ped.next_velocity = p['velocity']
ped.speed = p['speed']
ped.next_speed = p['speed']
world.groups[0].add_pedestrian(ped)
print "Imported {} pedestrians".format(len(world.groups[0].get_pedestrians()))
world.update()
world.groups[0].spawn_max = args.max_pedestrians
# world.groups[0].set_ornstein_uhlenbeck_process(self, 0, 0.05, 1.0):
for group in world.groups:
group.set_ornstein_uhlenbeck_process(mean, theta, sigma)
bar = progressbar.ProgressBar()
for step in bar(range(args.steps)):
if not world.step():
break
world.update()
for group in world.groups:
for p in group.get_pedestrians():
all_pedestrians.add(p)
# if step % 5 == 0:
# figure = world.plot()
# figure.savefig("img/" + args.outfile + "/" + str((step + 1) // 5).zfill(4) + ".png",
# bbox_inches = 'tight',
# pad_inches = 0.1)
# figure.clear()
# plt.close(figure)
# if step % 5 == 0:
# near = sensor_near(world)
# far = sensor_far(world)
# measurements[r]['t'].append(world.time)
# measurements[r]['count_near'].append(near['count'])
# measurements[r]['count_far'].append(far['count'])
# measurements[r]['speed_near'].append(near['average_speed'])
# measurements[r]['speed_far'].append(far['average_speed'])
# print len(all_pedestrians)
# Cleanup to avoid high memory usage.
if step % 200 == 0:
# tr.print_diff()
# process = psutil.Process(os.getpid())
# print "Before:", process.memory_info().rss
# print len(all_pedestrians)
# Get all pedestrians no longer in simulation.
current_pedestrians = set()
for group in world.groups:
current_pedestrians = current_pedestrians.union(group.get_pedestrians())
retired_pedestrians = all_pedestrians - current_pedestrians
# Write all pedestrian data to file.
with open("hddm/{}_pedestrians_{}.csv".format(args.outfile, r), "a") as ped_outfile:
with open("hddm/{}_measurements_{}.csv".format(args.outfile, r), "a") as csv_outfile:
ped_writer = csv.writer(ped_outfile)
csv_writer = csv.writer(csv_outfile)
for p in retired_pedestrians:
m = p.measurements
row = [p.id, "%.4f" % p.mass, "%.4f" % p.radius,
"%.4f" % p.desired_velocity, "%.4f" % p.maximum_velocity]
ped_writer.writerow(row)
for p in all_pedestrians:
m = p.measurements
for arr in m:
s = arr['self']
row = ["%.2f" % s['time'], p.id, "%.4f" % s['position'][0], "%.4f" % s['position'][1],
"%.4f" % s['velocity'][0], "%.4f" % s['velocity'][1], "%.4f" % s['speed'],
"%.4f" % arr['forces']['local_density'], "%.4f" % arr['forces']['local_velocity_variance']]
csv_writer.writerow(row)
# Empty all data.
p.measurements = []
# Remove pedestrians from local variables.
all_pedestrians = current_pedestrians
# process = psutil.Process(os.getpid())
# print "After:", process.memory_info().rss
if barrier_state == 0 and barrier_time != 0 and world.time > barrier_start:
barrier_state = 1
world.add_obstacle(sfm.Obstacle(barrier_points))
elif barrier_state == 1 and world.time > barrier_start + barrier_time:
barrier_state = 2
del world.obstacles[-1]
histogram = None
# Write all pedestrian data to file.
with open("hddm/{}_pedestrians_{}.csv".format(args.outfile, r), "a") as ped_outfile:
with open("hddm/{}_measurements_{}.csv".format(args.outfile, r), "a") as csv_outfile:
ped_writer = csv.writer(ped_outfile)
csv_writer = csv.writer(csv_outfile)
for p in all_pedestrians:
if p.id == 0:
histogram = [m['self']['random'] for m in p.measurements]
m = p.measurements
row = [p.id, "%.4f" % p.mass, "%.4f" % p.radius,
"%.4f" % p.desired_velocity, "%.4f" % p.maximum_velocity]
ped_writer.writerow(row)
m = p.measurements
for arr in m:
s = arr['self']
row = ["%.2f" % s['time'], p.id, "%.4f" % s['position'][0], "%.4f" % s['position'][1],
"%.4f" % s['velocity'][0], "%.4f" % s['velocity'][1], "%.4f" % s['speed'],
"%.4f" % arr['forces']['local_density'], "%.4f" % arr['forces']['local_velocity_variance']]
csv_writer.writerow(row)
# plt.clf()
# plt.hist(histogram)
# plt.show()
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('file', help='YAML-file')
parser.add_argument('-s', '--steps', help='Number of steps', type=int, default=500)
parser.add_argument('-o', '--outfile', help='File for measurements', default='measurements')
parser.add_argument('-p', '--pedestrian_file', help='Pedestrian file', default='')
parser.add_argument('-m', '--max_pedestrians', help='max pedestrians', type=int, default=100)
parser.add_argument('-r', '--repetitions', default=1, type=int)
parser.add_argument('-b', '--barriertime', default=0, type=int)
args = parser.parse_args(sys.argv[1:])
main(args) | mit |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/networkx-1.9.1-py2.7.egg/share/doc/networkx-1.9.1/examples/drawing/unix_email.py | 62 | 2683 | #!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2005 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges_iter(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
| gpl-2.0 |
OshynSong/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
Vimos/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 58 | 1803 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, .2]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
tomlof/scikit-learn | sklearn/mixture/tests/test_bayesian_mixture.py | 84 | 17929 | # Author: Wei Xue <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.special import gammaln
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_almost_equal
from sklearn.mixture.bayesian_mixture import _log_dirichlet_norm
from sklearn.mixture.bayesian_mixture import _log_wishart_norm
from sklearn.mixture import BayesianGaussianMixture
from sklearn.mixture.tests.test_gaussian_mixture import RandomData
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_greater_equal, ignore_warnings
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
PRIOR_TYPE = ['dirichlet_process', 'dirichlet_distribution']
def test_log_dirichlet_norm():
rng = np.random.RandomState(0)
weight_concentration = rng.rand(2)
expected_norm = (gammaln(np.sum(weight_concentration)) -
np.sum(gammaln(weight_concentration)))
predected_norm = _log_dirichlet_norm(weight_concentration)
assert_almost_equal(expected_norm, predected_norm)
def test_log_wishart_norm():
rng = np.random.RandomState(0)
n_components, n_features = 5, 2
degrees_of_freedom = np.abs(rng.rand(n_components)) + 1.
log_det_precisions_chol = n_features * np.log(range(2, 2 + n_components))
expected_norm = np.empty(5)
for k, (degrees_of_freedom_k, log_det_k) in enumerate(
zip(degrees_of_freedom, log_det_precisions_chol)):
expected_norm[k] = -(
degrees_of_freedom_k * (log_det_k + .5 * n_features * np.log(2.)) +
np.sum(gammaln(.5 * (degrees_of_freedom_k -
np.arange(0, n_features)[:, np.newaxis])), 0))
predected_norm = _log_wishart_norm(degrees_of_freedom,
log_det_precisions_chol, n_features)
assert_almost_equal(expected_norm, predected_norm)
def test_bayesian_mixture_covariance_type():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
covariance_type = 'bad_covariance_type'
bgmm = BayesianGaussianMixture(covariance_type=covariance_type,
random_state=rng)
assert_raise_message(ValueError,
"Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% covariance_type, bgmm.fit, X)
def test_bayesian_mixture_weight_concentration_prior_type():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
bad_prior_type = 'bad_prior_type'
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=bad_prior_type, random_state=rng)
assert_raise_message(ValueError,
"Invalid value for 'weight_concentration_prior_type':"
" %s 'weight_concentration_prior_type' should be in "
"['dirichlet_process', 'dirichlet_distribution']"
% bad_prior_type, bgmm.fit, X)
def test_bayesian_mixture_weights_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 5, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of weight_concentration_prior
bad_weight_concentration_prior_ = 0.
bgmm = BayesianGaussianMixture(
weight_concentration_prior=bad_weight_concentration_prior_,
random_state=0)
assert_raise_message(ValueError,
"The parameter 'weight_concentration_prior' "
"should be greater than 0., but got %.3f."
% bad_weight_concentration_prior_,
bgmm.fit, X)
# Check correct init for a given value of weight_concentration_prior
weight_concentration_prior = rng.rand()
bgmm = BayesianGaussianMixture(
weight_concentration_prior=weight_concentration_prior,
random_state=rng).fit(X)
assert_almost_equal(weight_concentration_prior,
bgmm.weight_concentration_prior_)
# Check correct init for the default value of weight_concentration_prior
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(1. / n_components, bgmm.weight_concentration_prior_)
def test_bayesian_mixture_means_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 3, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of mean_precision_prior
bad_mean_precision_prior_ = 0.
bgmm = BayesianGaussianMixture(
mean_precision_prior=bad_mean_precision_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'mean_precision_prior' should be "
"greater than 0., but got %.3f."
% bad_mean_precision_prior_,
bgmm.fit, X)
# Check correct init for a given value of mean_precision_prior
mean_precision_prior = rng.rand()
bgmm = BayesianGaussianMixture(
mean_precision_prior=mean_precision_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_precision_prior, bgmm.mean_precision_prior_)
# Check correct init for the default value of mean_precision_prior
bgmm = BayesianGaussianMixture(random_state=rng).fit(X)
assert_almost_equal(1., bgmm.mean_precision_prior_)
# Check raise message for a bad shape of mean_prior
mean_prior = rng.rand(n_features + 1)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'means' should have the shape of ",
bgmm.fit, X)
# Check correct init for a given value of mean_prior
mean_prior = rng.rand(n_features)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_prior, bgmm.mean_prior_)
# Check correct init for the default value of bemean_priorta
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(X.mean(axis=0), bgmm.mean_prior_)
def test_bayesian_mixture_precisions_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of degrees_of_freedom_prior
bad_degrees_of_freedom_prior_ = n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=bad_degrees_of_freedom_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'degrees_of_freedom_prior' should be "
"greater than %d, but got %.3f."
% (n_features - 1, bad_degrees_of_freedom_prior_),
bgmm.fit, X)
# Check correct init for a given value of degrees_of_freedom_prior
degrees_of_freedom_prior = rng.rand() + n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior,
bgmm.degrees_of_freedom_prior_)
# Check correct init for the default value of degrees_of_freedom_prior
degrees_of_freedom_prior_default = n_features
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior_default,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior_default,
bgmm.degrees_of_freedom_prior_)
# Check correct init for a given value of covariance_prior
covariance_prior = {
'full': np.cov(X.T, bias=1) + 10,
'tied': np.cov(X.T, bias=1) + 5,
'diag': np.diag(np.atleast_2d(np.cov(X.T, bias=1))) + 3,
'spherical': rng.rand()}
bgmm = BayesianGaussianMixture(random_state=rng)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.covariance_prior = covariance_prior[cov_type]
bgmm.fit(X)
assert_almost_equal(covariance_prior[cov_type],
bgmm.covariance_prior_)
# Check raise message for a bad spherical value of covariance_prior
bad_covariance_prior_ = -1.
bgmm = BayesianGaussianMixture(covariance_type='spherical',
covariance_prior=bad_covariance_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'spherical covariance_prior' "
"should be greater than 0., but got %.3f."
% bad_covariance_prior_,
bgmm.fit, X)
# Check correct init for the default value of covariance_prior
covariance_prior_default = {
'full': np.atleast_2d(np.cov(X.T)),
'tied': np.atleast_2d(np.cov(X.T)),
'diag': np.var(X, axis=0, ddof=1),
'spherical': np.var(X, axis=0, ddof=1).mean()}
bgmm = BayesianGaussianMixture(random_state=0)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.fit(X)
assert_almost_equal(covariance_prior_default[cov_type],
bgmm.covariance_prior_)
def test_bayesian_mixture_check_is_fitted():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
# Check raise message
bgmm = BayesianGaussianMixture(random_state=rng)
X = rng.rand(n_samples, n_features)
assert_raise_message(ValueError,
'This BayesianGaussianMixture instance is not '
'fitted yet.', bgmm.score, X)
def test_bayesian_mixture_weights():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
# Case Dirichlet distribution for the weight concentration prior type
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_distribution",
n_components=3, random_state=rng).fit(X)
expected_weights = (bgmm.weight_concentration_ /
np.sum(bgmm.weight_concentration_))
assert_almost_equal(expected_weights, bgmm.weights_)
assert_almost_equal(np.sum(bgmm.weights_), 1.0)
# Case Dirichlet process for the weight concentration prior type
dpgmm = BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_process",
n_components=3, random_state=rng).fit(X)
weight_dirichlet_sum = (dpgmm.weight_concentration_[0] +
dpgmm.weight_concentration_[1])
tmp = dpgmm.weight_concentration_[1] / weight_dirichlet_sum
expected_weights = (dpgmm.weight_concentration_[0] / weight_dirichlet_sum *
np.hstack((1, np.cumprod(tmp[:-1]))))
expected_weights /= np.sum(expected_weights)
assert_almost_equal(expected_weights, dpgmm.weights_)
assert_almost_equal(np.sum(dpgmm.weights_), 1.0)
@ignore_warnings(category=ConvergenceWarning)
def test_monotonic_likelihood():
# We check that each step of the each step of variational inference without
# regularization improve monotonically the training set of the bound
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=20)
n_components = rand_data.n_components
for prior_type in PRIOR_TYPE:
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type=covar_type,
warm_start=True, max_iter=1, random_state=rng, tol=1e-4)
current_lower_bound = -np.infty
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(600):
prev_lower_bound = current_lower_bound
current_lower_bound = bgmm.fit(X).lower_bound_
assert_greater_equal(current_lower_bound, prev_lower_bound)
if bgmm.converged_:
break
assert(bgmm.converged_)
def test_compare_covar_type():
# We can compare the 'full' precision with the other cov_type if we apply
# 1 iter of the M-step (done during _initialize_parameters).
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
X = rand_data.X['full']
n_components = rand_data.n_components
for prior_type in PRIOR_TYPE:
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='full',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
full_covariances = (
bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis, np.newaxis])
# Check tied_covariance = mean(full_covariances, 0)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='tied',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
tied_covariance = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(tied_covariance, np.mean(full_covariances, 0))
# Check diag_covariance = diag(full_covariances)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='diag',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
diag_covariances = (bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis])
assert_almost_equal(diag_covariances,
np.array([np.diag(cov)
for cov in full_covariances]))
# Check spherical_covariance = np.mean(diag_covariances, 0)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='spherical',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
spherical_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(
spherical_covariances, np.mean(diag_covariances, 1))
@ignore_warnings(category=ConvergenceWarning)
def test_check_covariance_precision():
# We check that the dot product of the covariance and the precision
# matrices is identity.
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components, n_features = 2 * rand_data.n_components, 2
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(n_components=n_components,
max_iter=100, random_state=rng, tol=1e-3,
reg_covar=0)
for covar_type in COVARIANCE_TYPE:
bgmm.covariance_type = covar_type
bgmm.fit(rand_data.X[covar_type])
if covar_type == 'full':
for covar, precision in zip(bgmm.covariances_, bgmm.precisions_):
assert_almost_equal(np.dot(covar, precision),
np.eye(n_features))
elif covar_type == 'tied':
assert_almost_equal(np.dot(bgmm.covariances_, bgmm.precisions_),
np.eye(n_features))
elif covar_type == 'diag':
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones((n_components, n_features)))
else:
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones(n_components))
@ignore_warnings(category=ConvergenceWarning)
def test_invariant_translation():
# We check here that adding a constant in the data change correctly the
# parameters of the mixture
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=100)
n_components = 2 * rand_data.n_components
for prior_type in PRIOR_TYPE:
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
bgmm1 = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=n_components, max_iter=100, random_state=0,
tol=1e-3, reg_covar=0).fit(X)
bgmm2 = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=n_components, max_iter=100, random_state=0,
tol=1e-3, reg_covar=0).fit(X + 100)
assert_almost_equal(bgmm1.means_, bgmm2.means_ - 100)
assert_almost_equal(bgmm1.weights_, bgmm2.weights_)
assert_almost_equal(bgmm1.covariances_, bgmm2.covariances_)
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter3/fig_cauchy_median_mean.py | 3 | 4370 | r"""
Median and Mean for Cauchy distribution
---------------------------------------
Figure 3.12.
The bottom panel shows a sample of N points drawn from a Cauchy distribution
with :math:`\mu = 0` and :math:`\gamma=2`. The top panel shows the sample
median, sample mean, and two robust estimates of the location parameter
(see text) as a function of the sample size (only points to the left from
a given sample size are used). Note that the sample mean is not a good
estimator of the distribution's location parameter. Though the mean appears
to converge as N increases, this is deceiving: because of the large tails
in the Cauchy distribution, there is always a high likelihood of a far-flung
point affecting the sample mean. This behavior is markedly different from a
Gaussian distribution where the probability of such "outliers" is much smaller.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy import optimize
from scipy.stats import cauchy, norm
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def robust_mean_mixture(x):
"""Compute the mean via a mixture of two Gaussians
One Gaussian accounts for outliers, and one Gaussian accounts for
the true distribution. This cannot be computed analytically, so
it uses scipy's function optimization
"""
if len(x) == 1:
return x
x = x.ravel()
mu_bg = np.mean(x)
sig_bg = 3 * np.std(x)
likelihood = lambda v: -np.sum(np.log(norm.pdf(x, v[0], v[1])
+ norm.pdf(x, mu_bg, sig_bg)))
v0 = np.array([0, 30])
v_best = optimize.fmin(likelihood, v0, disp=False)
return v_best[0]
def robust_mean_iterated(x, sigma_cut=3):
"""Compute the robust mean iteratively
After computing the mean, points further than 3 sigma from the mean
are removed and the result is repeated until convergence.
"""
flag = np.ones(x.shape, dtype=bool)
n_to_keep = x.size
while True:
xf = x[flag]
mu = xf.mean()
sig = xf.std()
if len(xf) == 1:
break
x_sig = abs((x - mu) / sig)
too_far = (x_sig > sigma_cut)
flag[too_far] = False
n_flag = flag.sum()
if n_flag == n_to_keep:
break
else:
n_to_keep = n_flag
return mu
#------------------------------------------------------------
# Create the distribution and compute means and medians
np.random.seed(6)
mu = 0
gamma = 2
xi = cauchy(mu, gamma).rvs(100)
Nrange = np.arange(1, len(xi) + 1)
mean = [np.mean(xi[:N]) for N in Nrange]
median = [np.median(xi[:N]) for N in Nrange]
mean_mixture = [robust_mean_mixture(xi[:N]) for N in Nrange]
mean_iter = [robust_mean_iterated(xi[:N]) for N in Nrange]
#------------------------------------------------------------
# Plot the results as a function of number of points
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(hspace=0.05)
# first plot the mean
ax = fig.add_subplot(211)
ax.plot(Nrange, mean, '-.b', label='mean')
ax.plot(Nrange, median, '-k', label='median')
ax.plot(Nrange, mean_mixture, ':r', label='robust mean (mixture)')
ax.plot(Nrange, mean_iter, '--g', label='robust mean (sigma-clip)')
ax.plot(Nrange, 0 * Nrange, '-', c='gray', lw=0.5)
ax.set_xlim(0, 100)
ax.set_ylim(-7, 7)
ax.legend(loc=4, ncol=2, frameon=False)
ax.set_ylabel('Value')
ax.xaxis.set_major_formatter(plt.NullFormatter())
# now plot the median
ax = fig.add_subplot(212)
ax.scatter(Nrange, xi, lw=0, s=10, c='k')
ax.plot(Nrange, 0 * Nrange, '-', c='gray')
ax.set_xlim(0, 100)
ax.set_ylim(-75, 75)
ax.set_xlabel('Sample Size')
ax.set_ylabel('Value')
plt.show()
| bsd-2-clause |
kdebrab/pandas | pandas/tests/sparse/test_reshape.py | 12 | 1088 | import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
@pytest.fixture
def sparse_df():
return pd.SparseDataFrame({0: {0: 1}, 1: {1: 1}, 2: {2: 1}}) # eye
@pytest.fixture
def multi_index3():
return pd.MultiIndex.from_tuples([(0, 0), (1, 1), (2, 2)])
def test_sparse_frame_stack(sparse_df, multi_index3):
ss = sparse_df.stack()
expected = pd.SparseSeries(np.ones(3), index=multi_index3)
tm.assert_sp_series_equal(ss, expected)
def test_sparse_frame_unstack(sparse_df):
mi = pd.MultiIndex.from_tuples([(0, 0), (1, 0), (1, 2)])
sparse_df.index = mi
arr = np.array([[1, np.nan, np.nan],
[np.nan, 1, np.nan],
[np.nan, np.nan, 1]])
unstacked_df = pd.DataFrame(arr, index=mi).unstack()
unstacked_sdf = sparse_df.unstack()
tm.assert_numpy_array_equal(unstacked_df.values, unstacked_sdf.values)
def test_sparse_series_unstack(sparse_df, multi_index3):
frame = pd.SparseSeries(np.ones(3), index=multi_index3).unstack()
tm.assert_sp_frame_equal(frame, sparse_df)
| bsd-3-clause |
poorboy44/Data-Science-45min-Intros | support-vector-machines-101/svm-example.py | 26 | 2219 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
import sys
import pandas as pd
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.svm import SVC
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError as e:
sys.stderr.write("seaborn not installed. Using default matplotlib templates.")
# cobbled together from refs:
# http://scikit-learn.org/stable/auto_examples/svm/plot_iris.html
# http://scikit-learn.org/stable/auto_examples/svm/plot_separating_hyperplane.html
if len(sys.argv) > 1:
samples = int( sys.argv[1] )
c_std=2.0
else:
samples = 10
c_std=1.0
X, y = make_blobs(n_samples=samples, cluster_std=c_std, centers=2)
# make a plotting grid
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# svm
clf = SVC(kernel='linear').fit(X, y)
# predict all points in grid
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# separating plane and margins
w = clf.coef_[0]
a = -w[0] / w[1]
xxx = np.linspace(x_min, x_max)
yyy = a * xxx - (clf.intercept_[0]) / w[1]
# calculate the large margin boundaries defined by the support vectors
b = clf.support_vectors_[0]
yyy_down = a * xxx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yyy_up = a * xxx + (b[1] - a * b[0])
# plot margins
plt.figure(figsize=(8,6))
plt.plot(xxx, yyy, 'k-', linewidth=1)
plt.plot(xxx, yyy_down, 'k--', linewidth=1)
plt.plot(xxx, yyy_up, 'k--', linewidth=1)
# plot decision contours
Z = Z.reshape(xx.shape)
#plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.contourf(xx, yy, Z, alpha=0.25)
# plot data
plt.scatter(X[:, 0], X[:, 1],
s=100,
c=y,
alpha=0.8,
cmap=plt.cm.Paired
)
# plot support vectors
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=300,
facecolors='none'
)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xlabel('x')
plt.ylabel('y')
# SHOW ALL THE THINGS
plt.show()
| unlicense |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Geneva_Rot_inst/Geneva_Rot_inst_age0/UV2.py | 33 | 7365 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [18, #1549
19, #1640
20, #1665
21, #1671
23, #1750
24, #1860
25, #1888
26, #1907
27, #2297
28, #2321
29, #2471
30, #2326
31, #2335
32, #2665
33, #2798
34] #2803
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("UV Lines Continued", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('UV_Lines_cntd.pdf')
plt.clf()
| gpl-2.0 |
scrollback/kuma | vendor/packages/ipython/docs/examples/kernel/plotting_frontend.py | 7 | 1263 | """An example of how to use IPython1 for plotting remote parallel data
The two files plotting_frontend.ipy and plotting_backend.py go together.
To run this example, first start the IPython controller and 4
engines::
ipcluster -n 4
Then start ipython in pylab mode::
ipython -pylab
Then a simple "run plotting_frontend.ipy" in IPython will run the
example. When this is done, all the variables (such as number, downx, etc.)
are available in IPython, so for example you can make additional plots.
"""
import numpy as N
from pylab import *
from IPython.kernel import client
# Get an IPython1 client
rc = client.MultiEngineClient()
rc.get_ids()
# Run the simulation on all the engines
rc.run('plotting_backend.py')
# Bring back the data
number = rc.pull('number')
d_number = rc.pull('d_number')
downx = rc.gather('downx')
downy = rc.gather('downy')
downpx = rc.gather('downpx')
downpy = rc.gather('downpy')
print "number: ", sum(number)
print "downsampled number: ", sum(d_number)
# Make a scatter plot of the gathered data
# These calls to matplotlib could be replaced by calls to pygist or
# another plotting package.
figure(1)
scatter(downx, downy)
xlabel('x')
ylabel('y')
figure(2)
scatter(downpx, downpy)
xlabel('px')
ylabel('py')
show() | mpl-2.0 |
almarklein/bokeh | examples/plotting/server/burtin.py | 2 | 4800 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
from bokeh.plotting import *
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
x = np.zeros(len(df))
y = np.zeros(len(df))
output_server("burtin")
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=[-420, 420], y_range=[-420, 420],
min_border=0, outline_line_color="black",
background_fill="#f0e1d2", border_fill="#f0e1d2")
p.line(x+1, y+1, alpha=0)
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [gram_color[gram] for gram in df.gram]
p.annular_wedge(
x, y, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
p.annular_wedge(x, y, inner_radius, rad(df.penicillin),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=drug_color['Penicillin'])
p.annular_wedge(x, y, inner_radius, rad(df.streptomycin),
-big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,
color=drug_color['Streptomycin'])
p.annular_wedge(x, y, inner_radius, rad(df.neomycin),
-big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,
color=drug_color['Neomycin'])
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(x, y, radius=radii, fill_color=None, line_color="white")
p.text(x[:-1], radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(x, y, inner_radius-10, outer_radius+10,
-big_angle+angles, -big_angle+angles, color="black")
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color.keys()),
text_font_size="9pt", text_align="left", text_baseline="middle")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
show(p)
| bsd-3-clause |
Kate-Willett/Climate_Explorer | PYTHON/MergeAggRegridERA5.py | 1 | 49513 | #!/usr/local/sci/bin/python
# PYTHON3.6.1
#
# Author: Kate Willett
# Created: 18 Mar 2020
# Last update: 18 Mar 2020
# Location: /data/users/hadkw/WORKING_HADISDH/UPDATE2019/PROGS/PYTHON/
# GitHub: https://github.com/Kate-Willett/HadISDH_Build
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# THIS CODE DOES MANY THINGS
# NOTE THAT FOR ANY 1BY1 OUTPUT IT REGRIDS TO BE 89.5 to -89.5 rather than 90 - -90 (180 boxes rather than 181!!!)
# AND ROLLS LONGITUDE TO -179.5 to 179.5
#
# AT THE MOMENT THIS ASSUMES COMPLETE FIELDS SO WON'T WORK FOR SST!!!
#
# ANOTHER ISSUE IS LAND / SEA MASKING - TOO MUCH LAND COVER, TOO MUCH SEA COVER - SO THERE WILL BE CONTAMINATION!
# I COMPUTE ANOMALIES AT 1by1 RES BEFORE REGRIDDING TO 5by5 TO MINIMISE THIS>
#
#
# This code reads in the ERA5 months of 1by1 daily or monthly variables
# (e.g., T, Td and Surface Pressure etc) for the full time period
#
# It averages to pentad means, anoms, clims and stdevs and saves to netCDF:
# days since 19790101 (float), 180 lats 89.5 to -89.5, 360 lons -179.5 to 179.5, <var>2m
# It averages to monthly means, anoms, clims and stdevs and saves to netCDF:
# days since 19790101 (float), 180 lats 89.5 to -89.5, 360 lons -179.5 to 179.5, <var>2m
# It regrids to 5by5 monthly means, anoms, clims and stdevs and saves to netCDF
# days since 19790101 (int), 36 lats -87.5 to 87.5, 72 lons -177.5 to 177.5, actuals
# For anomalies it also creates a land only and ocean only set of grids (based on ERA5 land sea mask at 1by1 degree) to save along side the complete
# fields
# anomalies_land, anomalies_sea
# Climatology is 1981-2010
#
# This can cope with an annual update and just append to existing files
#
# use submit_spice_ERA5download.bash and get_era5.py to download ERA5 months
# This may require a key to be set up in .ecmwfapirc annually - obtained from logging in to ECMWF
# https://confluence.ecmwf.int/display/WEBAPI/How+to+retrieve+ECMWF+Public+Datasets
# It also requires ecmwfapi to be downloaded and in the directory as you are running to code from
#
# Copy previous years of monthly ERA5 data from the previous
# UPDATE<yyyy>/OTHERDATA/<var>2m_daily_1by1_ERA5_data_1979<yyyy>.nc
# UPDATE<yyyy>/OTHERDATA/<var>2m_pentad_1by1_ERA5_data_1979<yyyy>.nc
# UPDATE<yyyy>/OTHERDATA/<var>2m_monthly_1by1_ERA5_data_1979<yyyy>.nc
# UPDATE<yyyy>/OTHERDATA/<var>2m_monthly_5by5_ERA5_data_1979<yyyy>.nc
# to OTHERDATA/
#
# <references to related published material, e.g. that describes data set>
#
# -----------------------
# LIST OF MODULES
# -----------------------
# inbuilt:
# from datetime import datetime
# import matplotlib.pyplot as plt
# import numpy as np
# from matplotlib.dates import date2num,num2date
# import sys, os
# from scipy.optimize import curve_fit,fsolve,leastsq
# from scipy import pi,sqrt,exp
# from scipy.special import erf
# import scipy.stats
# from math import sqrt,pi
# import struct
# from netCDF4 import Dataset
# from netCDF4 import stringtoarr # for putting strings in as netCDF variables
# import pdb
#
# Kates:
# import CalcHums - written by kate Willett to calculate humidity variables
# import TestLeap - written by Kate Willett to identify leap years
# from ReadNetCDF import GetGrid4 - written by Kate Willett to pull out netCDF data
# from ReadNetCDF import GetGrid4Slice - written by Kate Willett to pull out a slice of netCDF data
# from GetNiceTimes import make_days_since
#
#-------------------------------------------------------------------
# DATA
# -----------------------
# ERA5 1by1 daily gridded data for each year and month
# THIS CODE ALWAYS WORKS FROM DAILY 1BY1 TO START
# EITHER TO BUILD ALL OUTPUTS FROM SCRATCH FROM INDIVIDUAL MONTHS:
# ERA5 = /data/users/hadkw/WORKING_HADISDH/UPDATE<yyyy>/OTHERDATA/<yyyy><mm>_daily_<variable>.nc
# OR TO APPEND NEW INDIVIDUAL MONTHS TO EXISTING OUTPUTS
# ERA5 = /data/users/hadkw/WORKING_HADISDH/UPDATE<yyyy>/OTHERDATA/<yyyy><mm>_daily_<variable>.nc
# ERA5OLD = /data/users/hadkw/WORKING_HADISDH/UPDATE<yyyy>/OTHERDATA/<var>2m_daily_1by1_ERA5_1979<yyyy-1>.nc
# ERA5OLD = /data/users/hadkw/WORKING_HADISDH/UPDATE<yyyy>/OTHERDATA/<var>2m_pentad_1by1_ERA5_1979<yyyy-1>.nc
# ERA5OLD = /data/users/hadkw/WORKING_HADISDH/UPDATE<yyyy>/OTHERDATA/<var>2m_monthly_1by1_ERA5_1979<yyyy-1>.nc
# ERA5OLD = /data/users/hadkw/WORKING_HADISDH/UPDATE<yyyy>/OTHERDATA/<var>2m_monthly_5by5_ERA5_1979<yyyy-1>.nc
#
# It also needs a land -sea mask so use the ERA5 one ( created by get_era5_lsm.py)
# LSM = /data/users//hadkw/WORKING_HADISDH/UPDATE<yyyy>/OTHERDATA/197901_hourly_land_sea_mask_ERA5.nc
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# First make sure the New ERA-Interim data are in the right place.
# Also check all editables in this file are as you wish
# module load scitools/default-current
# python MergeAggRegridERA5.py
#
# -----------------------
# OUTPUT
# -----------------------
# New ERA5 data for 1979 to present
# NewERA<var>:
# /data/users/hadkw/WORKING_HADISDH/UPDATE<yyyy>/OTHERDATA/<var>2m_daily_1by1_ERA5_1979<yyyy>.nc
# /data/users/hadkw/WORKING_HADISDH/UPDATE<yyyy>/OTHERDATA/<var>2m_pentad_1by1_ERA5_1979<yyyy>.nc
# /data/users/hadkw/WORKING_HADISDH/UPDATE<yyyy>/OTHERDATA/<var>2m_monthly_1by1_ERA5_1979<yyyy>.nc
# /data/users/hadkw/WORKING_HADISDH/UPDATE<yyyy>/OTHERDATA/<var>2m_monthly_5by5_ERA5_1979<yyyy>.nc
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 1 (18 Mar 2020)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# START
#************************************************************************
# inbuilt:
import datetime as dt
from datetime import datetime
#import matplotlib.pyplot as plt
import numpy as np
#from matplotlib.dates import date2num,num2date
#import sys, os
#from scipy.optimize import curve_fit,fsolve,leastsq
#from scipy import pi,sqrt,exp
#from scipy.special import erf
#import scipy.stats
#from math import sqrt,pi
#import struct
from netCDF4 import Dataset
from netCDF4 import stringtoarr # for putting strings in as netCDF variables
import pdb
# Kates:
import CalcHums
import TestLeap
from ReadNetCDF import GetGrid4
from ReadNetCDF import GetGrid4Slice
from GetNiceTimes import MakeDaysSince
### START OF EDITABLES ###############################
# Set up initial run choices
# Start and end years
actstyr = 1979 # start year of dataset
styr = 1979 # start year of data to work with e.g., same as edyr
edyr = 2019
thisedyr = 2019
stmon = 1
edmon = 12
## Set up output variables - for q, e, RH, dpd, Tw we will need to read in multiple input files
#Var = 'q' # this can be 't','td','q','rh','e','dpd','tw','ws','slp','sp','uv','sst'
# Is this a new run or an update?
ThisProg = 'Build'
# Update for updating an existing file ( from new 1by1 dailies and preious outputs daily monthly or pentad)
# Build for building from scratch (from 1by1 dailies)
# THIS AUTOMATICALLY REGRIDS LATS TO BE 180 RATHER THAN 181!!!
# Is this ERA-Interim or ERA5?
ThisRean = 'ERA5' # 'ERA5' or 'ERA-Interim'
# Do you want to create anomalies and if so, what climatology period? We will output absolutes anyway
#MakeAnoms = 1 # 1 for create anomalies (and clim and stdev fields), 0 for do NOT create anomalies
ClimStart = 1981 # any year but generally 1981
ClimEnd = 2010 # any year but generally 2010
### END OF EDITABLES ################
# Set up file paths and other necessary things
#if (MakeAnoms == 1): # set the filename string for anomalies
# AnomsStr = 'anoms'+str(ClimStart)+'-'+str(ClimEnd)+'_'
#else:
# AnomsStr = ''
# Set up file locations
updateyy = str(thisedyr)[2:4]
updateyyyy = str(thisedyr)
workingdir = '/data/users/hadkw/WORKING_HADISDH/UPDATE'+updateyyyy
## RANDOM BIT FOR TESTING
#edyr=1980
# dictionary: chosen and output var, actual var, ERA5 read in name
VarDict = dict([('q',['q','specific_humidity']),
('rh',['RH','relative_humidity']),
('e',['e','vapour_pressure']),
('t',['T','2m_temperature']),
('tw',['Tw','wetbulb_temperature']),
('td',['Td','2m_dewpoint_temperature']),
('dpd',['DPD','dewpoint_depression']),
('p',['P','surface_pressure']),
('slp',['SLP','sea_level_pressure'])])
# Dictionary for looking up variable names for netCDF read in of variables
NameDict = dict([('q','q2m'),
('rh','rh2m'),
('e','e2m'),
('tw','tw2m'),
('t','t2m'),
('td','td2m'),
('dpd','dpd2m'),
('slp','msl'),
('p','p2m'), # does this matter that it is p not sp?
('uv',['u10','v10']), # this one might not work
('ws','si10'),
('sst','sst')])
LandMask = workingdir+'/OTHERDATA/197901_hourly_land_sea_mask_ERA5.nc' # 0 = 100% sea, 1 = 100% land - no islands!, latitude, longitude, land_area_fraction, -87.5 to 87.5, -177.5 to 177.5
# Set up variables
mdi = -1e30
# Required variable names for reading in from ERA-Interim
LatInfo = ['latitude']
LonInfo = ['longitude']
# Dictionary for looking up variable standard (not actually always standard!!!) names for netCDF output of variables
StandardNameDict = dict([('q','specific_humidity'),
('rh','relative_humidity'),
('e','vapour_pressure'),
('tw','wetbulb_temperature'),
('t','drybulb_temperature'),
('td','dewpoint_temperature'),
('dpd','dewpoint_depression'),
('slp','mean_sea_level_pressure'),
('p','surface_pressure'), # does this matter that its p and not sp?
('uv',['10_metre_U_wind_component','10_metre_V_wind_component']), # this one might not work
('ws','10_metre_windspeed'),
('sst','sea_surface_temperature')])
# Dictionary for looking up variable long names for netCDF output of variables
LongNameDict = dict([('q','2m specific humidity from 1by1 hrly T, Td and p '+ThisRean),
('rh','2m relative humidity from 1by1 hrly T, Td and p '+ThisRean),
('e','2m vapour pressure from 1by1 hrly T, Td and p '+ThisRean),
('tw','2m wetbulb temperature from 1by1 hrly T, Td and p '+ThisRean),
('t','2m drybulb temperature from 1by1 hrly T '+ThisRean),
('td','2m dewpoint temperature from 1by1 hrly Td '+ThisRean),
('dpd','2m dewpoint depression from 1by1 hrly T, Td and p '+ThisRean),
('slp','2m mean_sea level pressure from 1by1 hrly msl '+ThisRean),
('p','2m surface pressure from 1by1 hrly sp '+ThisRean), # does this matter that its p not sp
('uv',['10 metre U wind component from 1by1 hrly '+ThisRean,'10 metre V wind component from 1by1 6hrly'+ThisRean]), # this one might not work
('ws','10 metre windspeed from 1by1 hrly'+ThisRean),
('sst','sea surface temperature from 1by1 hrly'+ThisRean)])
# Dictionary for looking up unit of variables
UnitDict = dict([('q','g/kg'),
('rh','%'), # made this % rather than %rh as iris doesn't seem to like it
('e','hPa'),
('tw','deg C'),
('t','deg C'),
('td','deg C'),
('dpd','deg C'),
('slp','hPa'),
('p','hPa'), # does this matter that it is p not sp
('uv','m/s'),
('ws','m/s'),
('sst','deg C')])
nyrs = (edyr+1)-styr
nmons = nyrs*12
npts = nyrs*73
ndays = (dt.date(edyr+1,1,1).toordinal() - dt.date(styr,1,1).toordinal())
# set up nlons and nlats depending on what we are reading in and out
nlonsIn = 360
nlatsIn= 18 # ERA style to have grids over the poles rather than up to the poles
nlonsOut1 = 360
nlatsOut1 = 180 # ERA style to have grids over the poles rather than up to the poles but this will be changed here with Build or Regrid
nlonsOut5 = 72 # assuming this is correct
nlatsOut5 = 36 # assuming this is correct
#************************************************************
# SUBROUTINES
#************************************************************
# RegridField
def RegridField(TheOldData,TheMDI):
'''
This function does a simple regridding of data by averaging over the larger gridboxes
NO COSINE WEIGHTING FOR LATITUDE!!!!
NOTE:
FOR OutputGrid = 5by5 THIS AUTOMATICALLY FLIPS LATITUDE AND ROLLS LONGITUDE TO BE -87.5 to 87.5 and -177,5 to 177.5
FOR OutputGrid = 1by1 THIS JUST REGRIDS LATITUDE FROM 181 boxes 90 to -90 TO 180 boxes 89.5 to -89.5 and rolls longitude to -179.5 to 179.5
Assumes input grid is always 1by1
INPUTS:
TheOldData[:,:,:] - time, lat, long numpy array of complete field in original grid resolution
OUTPUTS:
TheNewData[:,:,:] - time, lat, long numpy array of complete field in new grid resolution
I'm hoping that things set above are seen by the function rather than being passed explicitly
'''
# Set up the desired output array
TheNewData = np.empty((len(TheOldData[:,0,0]),36,72),dtype = float)
TheNewData.fill(TheMDI)
# Then we know we're reading in my converted ERA5 data which has 180 lats and already has lons rolled 180 degrees.
# flip lats to go south to north
# regrid to 5by5
# Regrid to 5by5 by simple averaging
# Data input here should already be 89.5 to -89.5 lat and -179.5 to 179.5 long!!!
StLt = 0
EdLt = 0
# Loop through the OutputGrid (5by5) lats and lons
for ltt in range(36):
# create pointers to the five lats to average over
StLt = np.copy(EdLt)
EdLt = EdLt + 5
StLn = 0
EdLn = 0
for lnn in range(72):
# create pointers to the five lons to average over
StLn = np.copy(EdLn)
EdLn = EdLn + 5
#print(ltt,lnn,StLt,EdLt,StLn,EdLn)
# Loop over each time point
for mm in range(len(TheNewData[:,0,0])):
# Create a subarr first so that we can deal with missing data
subarr = TheOldData[mm,StLt:EdLt,StLn:EdLn]
gots = np.where(subarr > TheMDI)
if (len(gots[0]) > 0):
# FILL THE LATITUDES BACKWARDS SO THAT THIS REVERSES THEM!!!
TheNewData[mm,35-ltt,lnn] = np.mean(subarr[gots])
#pdb.set_trace()
return TheNewData
#************************************************************
# BuildFieldOLD - this is only done at the daily level
def BuildFieldOLD(BuildType, TheVar, DirStr, InFileStr, TheStYr, TheEdYr,InFileOLD = ''):
''' function for building complete reanalyses files over the period specified
this can be very computationally expensive so do it by year
This requires initial reanalysis data to be read in in chunks of 1 month previously
INPUTS:
BuildType - 'Update' or 'Build'
TheVar - string lower case character of q, rh, t, td, dpd, tw, e, msl, sp, ws +2m or appropriate
InFileStr - string of dir+file string to read in
TheStYr = integer start year of data - assume Jan 1st (0101) start
TheEdYr = integer end year of data - assume Dec 31st (1231) end
InFileOLD = optional string for old data file to be read in
OUTPUTS:
TheNewData[:,:,:] - time, lat, long numpy array of complete field in new time resolution
'''
# Should just be able to read in the data and then append to build complete file
nyrs = (TheEdYr - TheStYr) + 1
if (BuildType == 'Build'):
FuncStYr = TheStYr
NewDataArr = np.array(()) # This will be set up on first read in - this has len() of 0!!!
elif (BuildType == 'Update'):
FuncStYr = TheEdYr
# Now read in the old data to start array to append to
NewDataArr,Latitudes,Longitudes = GetGrid4(InFileOLD,[TheVar],['latitude'],['longitude'])
for y in np.arange(FuncStYr,TheEdYr+1):
# Get actual year we're working on
print('Working Year: ',y)
# Loop through each month or pentad depending on thing
for m in range(12):
## string for file name
mm = '%02i' % (m+1)
# Now read in the old data to start array to append to
TmpDataArr,Latitudes,Longitudes = GetGrid4(DirStr+str(y)+mm+InFileStr,[TheVar],['latitude'],['longitude'])
if (len(NewDataArr) == 0):
# this is the start of the build
NewDataArr = np.copy(TmpDataArr)
else:
NewDataArr = np.append(NewDataArr,np.copy(TmpDataArr),0)
#print('Check built array')
#pdb.set_trace()
return NewDataArr
#************************************************************
# GetDaily - this is only done at the daily level
def GetDaily(TheVar, DirStr, InFileStr, TheYr):
''' function for building complete reanalyses files over the period specified
this can be very computationally expensive so do it by year
This requires initial reanalysis data to be read in in chunks of 1 month previously
INPUTS:
TheVar - string lower case character of q, rh, t, td, dpd, tw, e, msl, sp, ws +2m or appropriate
InFileStr - string of dir+file string to read in
TheYr = integer start year of data - assume Jan 1st (0101) start
OUTPUTS:
TheNewData[:,:,:] - time, lat, long numpy array of complete field in new time resolution
'''
NewDataArr = np.array(()) # This will be set up on first read in - this has len() of 0!!!
# Loop through each month or pentad depending on thing
for m in range(12):
## string for file name
mm = '%02i' % (m+1)
# Now read in the old data to start array to append to
TmpDataArr,Latitudes,Longitudes = GetGrid4(DirStr+str(TheYr)+mm+InFileStr,[TheVar],['latitude'],['longitude'])
if (len(NewDataArr) == 0):
# this is the start of the build
NewDataArr = np.copy(TmpDataArr)
else:
NewDataArr = np.append(NewDataArr,np.copy(TmpDataArr),0)
return NewDataArr
#************************************************************
# MaskLandS
def MaskLandS(TheDataArray,LandMaskFile,TheMDI):
''' This function takes in any field (anoms, clims, actuals etc)
and returns a land-masked (lsm < 0.5 - do not want inland lakes!!!) for sea and a sea-masked (lsm > 0) for land '''
# Read in land mask file
LSMArr,Latitudes,Longitudes = GetGrid4(LandMaskFile,['lsm'],['latitude'],['longitude'])
LSMArr = np.reshape(LSMArr,(1,len(LSMArr[:,0]),len(LSMArr[0,:])))
# loop through each time step and mask
for tt in range(len(TheDataArray[:,0,0])):
# create a temporary array for this time step with an added time dimension of 1
TmpArr = np.reshape(TheDataArray[tt,:,:],(1,len(TheDataArray[0,:,0]),len(TheDataArray[0,0,:])))
# Set up temporary MDI land and sea arrays
LandTmp = np.empty_like(TmpArr,dtype = float)
LandTmp[:,:,:] = TheMDI
SeaTmp = np.empty_like(TmpArr,dtype = float)
SeaTmp[:,:,:] = TheMDI
# Fill the land and sea arrays - trying more inclusive approach
LandTmp[np.where(LSMArr > 0.)] = TmpArr[np.where(LSMArr > 0.)]
SeaTmp[np.where(LSMArr < 0.5)] = TmpArr[np.where(LSMArr < 0.5)]
# LandTmp[np.where(LSMArr >= 0.25)] = TmpArr[np.where(LSMArr >= 0.25)]
# SeaTmp[np.where(LSMArr <= 0.75)] = TmpArr[np.where(LSMArr <= 0.75)]
# Now build complete array
if (tt == 0):
LandData = LandTmp
SeaData = SeaTmp
else:
LandData = np.append(LandData,np.copy(LandTmp),0)
SeaData = np.append(SeaData,np.copy(SeaTmp),0)
#print('Test land sea mask')
#pdb.set_trace()
LSMArr = 0
return LandData, SeaData
#************************************************************
# MakeMonths
def MakeMonths(TheDataArr,TheStYr,TheEdYr):
''' THis function takes a 3D array of daily data and
averages it up to monthly means '''
# Set up date bits and bobs
nyrs = TheEdYr - TheStYr + 1
nmns = nyrs*12
# Set up empty arrays
MonthlyData = np.empty((nmns,len(TheDataArr[0,:,0]),len(TheDataArr[0,0,:])))
mncounter = 0 # counter for months
mnst = 0 # counter for days in months
mned = 0#
# Loop through each year
for y in np.arange(TheStYr, TheEdYr+1):
for m in np.arange(1,13):
if (m < 12):
mndays = (dt.date(y,m+1,1).toordinal() - dt.date(y,m,1).toordinal())
else:
mndays = (dt.date(y+1,1,1).toordinal() - dt.date(y,m,1).toordinal())
mnst = mned
mned = mned + mndays
MonthlyData[mncounter,:,:] = np.mean(TheDataArr[mnst:mned,:,:],0)
mncounter += 1
# if (mncounter == 1):
#
# print('Test Months: ')
# pdb.set_trace()
# # Loop through each gridbox
# for ln in range(len(TheDataArr[0,0,:])):
#
# for lt in range(len(TheDataArr[0,:,0])):
#
# mncounter = 0 # counter for months
# mnst = 0 # counter for days in months
# mned = 0#
#
# # Loop through each year
# for y in np.arange(TheStYr, TheEdYr+1):
#
# for m in np.arange(1,13):
#
# if (m < 12):
#
# mndays = (dt.date(y,m+1,1).toordinal() - dt.date(y,m,1).toordinal())#
#
# else:
#
# mndays = (dt.date(y+1,1,1).toordinal() - dt.date(y,m,1).toordinal())#
#
# mnst = mned
# mned = mned + mndays#
#
# MonthlyData[mncounter,lt,ln] = np.mean(TheDataArr[mnst:mned,lt,ln])
# mncounter += 1
#
## if (mncounter == 1):
##
## print('Test Months: ')
## pdb.set_trace()
return MonthlyData
#************************************************************
# MakePentads
def MakePentads(TheDataArr,TheStYr,TheEdYr):
''' This function takes a 3D array of daily data and
averages it up to pentad means '''
# Set up date bits and bobs
nyrs = TheEdYr - TheStYr + 1
npts = nyrs*73
# Set up empty arrays
PentadData = np.empty((npts,len(TheDataArr[0,:,0]),len(TheDataArr[0,0,:])))
# Loop through each gridbox
for ln in range(len(TheDataArr[0,0,:])):
for lt in range(len(TheDataArr[0,:,0])):
ptst = 0 # counter for pentads
pted = 0
yrst = 0 # pointer for start of year
yred = 0 # pointer for end of year
# Loop through each year
for y in np.arange(TheStYr, TheEdYr+1):
yrdays = (dt.date(y+1,1,1).toordinal() - dt.date(y,1,1).toordinal())
# these don't change when yred changes so do not need to be copies
yrst = yred
yred = yrst + yrdays
ptst = pted
pted = pted + 73
# Easy if its not a leap year
if (yrdays == 365):
tmpdata = np.reshape(TheDataArr[yrst:yred,lt,ln],(73,5))
# get means over each 5 day period
PentadData[ptst:pted,lt,ln] = np.mean(tmpdata,1)
# if (y == TheStYr):
#
# print('Test year pentads: ')
# pdb.set_trace()
elif (yrdays == 366): # a LEAP year
tmpdata = TheDataArr[yrst:yred,lt,ln]
# get means over each 5 day period
PentadData[ptst:ptst+11,lt,ln] = np.mean(np.reshape(tmpdata[0:55],(11,5)),1)
PentadData[ptst+11,lt,ln] = np.mean(tmpdata[55:61])
PentadData[ptst+12:pted,lt,ln] = np.mean(np.reshape(tmpdata[61:366],(61,5)),1)
#print('Test LEAP year pentads: ',y,yrdays)
#pdb.set_trace()
return PentadData
#************************************************************
# CreateAnoms
def CreateAnoms(TheClimSt,TheClimEd,TheStYr,TheEdYr,TheInData,TheMDI):
'''
This function takes any grid, any var, any time resolution and computes climatologies/stdevs over given period and then anomalies
INPUTS:
TheClimSt - interger start year of climatology Always Jan start
TheClimEd - integer end year of climatology Always Dec end
TheStYr - integer start year of data to find climatology
TheEdYr - integer end year of data to find climatology
TheInData[:,:,:] - time, lat, lon array of actual values
OUTPUTS:
AllAnomsArr[:,:,:] - time, lat, lon array of anomalies
ClimsArr[:,:,:] - time, lat, lon array of climatologies
StDevsArr[:,:,:] - time, lat, lon array of stdeviations
'''
# Set up for time
nyrs = TheEdYr - TheStYr + 1
ntims = len(TheInData[:,0,0])
if (ntims/nyrs == 12): # rough check - should be 12 obvs
# Monthly data
nclims = 12
elif (ntims/nyrs == 73):
# Pentad data
nclims = 73
elif (ntims/nyrs > 300): # rough check as leap yrs make it screwy
# Daily data
nclims = 365 # Feb 29th will just be ignored to create a clim, use Feb 28th for anomaly
# first create empty arrays
AllAnomsArr = np.empty_like(TheInData)
AllAnomsArr.fill(mdi)
ClimsArr = np.copy(AllAnomsArr[0:nclims,:,:])
StDevsArr = np.copy(AllAnomsArr[0:nclims,:,:])
# loop through gridboxes
for lt in range(len(TheInData[0,:,0])):
for ln in range(len(TheInData[0,0,:])):
# if monthly or pentad then easy peasy
if (nclims < 300):
# pull out gridbox and reform to years by nclims (months or pentads)
SingleSeries = np.reshape(TheInData[:,lt,ln],(nyrs,nclims)) # nyrs rows, nclims columns
ClimsArr[:,lt,ln] = np.mean(SingleSeries[TheClimSt-TheStYr:(TheClimEd-TheStYr)+1,:],0)
StDevsArr[:,lt,ln] = np.std(SingleSeries[TheClimSt-TheStYr:(TheClimEd-TheStYr)+1,:],0)
SingleSeriesAnoms = SingleSeries - ClimsArr[:,lt,ln]
#print('Test pentad and monthlyanomalies, clims and stdevs')
#pdb.set_trace()
AllAnomsArr[:,lt,ln] = np.copy(np.reshape(SingleSeriesAnoms,ntims))
# dailies more tricky
else:
# pull out gridbox and reform to years by nclims (months or pentads)
TmpSeries = TheInData[:,lt,ln]
# Make an nyrs (rows) by 366 days array
SingleSeries = np.empty((nyrs,366),dtype = float)
SingleSeries.fill(TheMDI)
# Now populate year by year
# Non leap years have a missing 29th Feb
StDy = 0
EdDy = 0
for y in range(nyrs):
# Is this a leap year?
TotalDays = dt.date(y+TheStYr+1,1,1).toordinal() - dt.date(y+TheStYr,1,1).toordinal()
#print('Getting daily clims: ',y+TheStYr, TotalDays)
StDy = EdDy
EdDy = StDy + TotalDays
if (TotalDays == 366):
SingleSeries[y,:] = TmpSeries[StDy:EdDy]
else:
SingleSeries[y,0:59] = TmpSeries[StDy:StDy+59]
SingleSeries[y,60:] = TmpSeries[StDy+59:EdDy]
# Calculate clims and stdevs for each of the 365 days ignoring Feb 29th
ClimsArr[0:59,lt,ln] = np.mean(SingleSeries[TheClimSt-TheStYr:(TheClimEd-TheStYr)+1,0:59],0)
StDevsArr[0:59,lt,ln] = np.std(SingleSeries[TheClimSt-TheStYr:(TheClimEd-TheStYr)+1,0:59],0)
ClimsArr[59:,lt,ln] = np.mean(SingleSeries[TheClimSt-TheStYr:(TheClimEd-TheStYr)+1,60:],0)
StDevsArr[59:,lt,ln] = np.std(SingleSeries[TheClimSt-TheStYr:(TheClimEd-TheStYr)+1,60:],0)
#print('Got the clims, stdevs')
#pdb.set_trace()
# Now repopulate year by year
# Non leap years have a missing 29th Feb
StDy = 0
EdDy = 0
SingleSeriesAnoms = np.empty(ntims,dtype = float)
for y in range(nyrs):
# Is this a leap year?
TotalDays = dt.date(y+TheStYr+1,1,1).toordinal() - dt.date(y+TheStYr,1,1).toordinal()
StDy = EdDy
EdDy = StDy + TotalDays
#print('Getting daily anoms: ',y+TheStYr, TotalDays,StDy,EdDy)
if (TotalDays == 366):
SingleSeriesAnoms[StDy:StDy+59] = SingleSeries[y,0:59] - ClimsArr[0:59,lt,ln]
# Use Feb 28th as climatology for 29th
SingleSeriesAnoms[59] = SingleSeries[y,59] - ClimsArr[58,lt,ln]
SingleSeriesAnoms[StDy+60:EdDy] = SingleSeries[y,60:] - ClimsArr[59:,lt,ln]
else:
SingleSeriesAnoms[StDy:StDy+59] = SingleSeries[y,0:59] - ClimsArr[0:59,lt,ln]
SingleSeriesAnoms[StDy+59:EdDy] = SingleSeries[y,60:] - ClimsArr[59:,lt,ln]
#print('Test daily anomalies, clims and stdevs',y+TheStYr, TotalDays)
#pdb.set_trace()
AllAnomsArr[:,lt,ln] = np.copy(SingleSeriesAnoms)
return AllAnomsArr, ClimsArr, StDevsArr
#************************************************************
# WriteNetCDF
def WriteNetCDF(Filename,TheOutputTime, TheOutputGrid, TheOutputVar, TheFullArray, TheFullArrayAnoms, TheLandArrayAnoms, TheOceanArrayAnoms, TheClimsArray, TheStDevsArray,
TheStYr, TheEdYr, TheClimStart, TheClimEnd, TheName, TheStandardName, TheLongName, TheUnit, TheMDI):
'''
This function writes out a NetCDF 4 file
NOTE:
All 1by1 outputs will have lats 89.5 to -89.5 and lons -179.5 to 179.5
All 5by5 outputs will have lats -87.5 to 87.5 and lons -177.5 to 177.5
INPUTS:
FileOut - string file name
TheOutputTime - string monthly or pentad or daily
TheOutputGrid - string 1by1 or 5by5
TheOutputVar - string lower case variable name
TheFullArray[:,:,:] - time, lat, lon array of actual values
TheFullArrayAnoms[:,:,:] - time, lat, lon array of anomalies
TheLandArrayAnoms[:,:,:] - time, lat, lon array of land anomalies
TheOceanArrayAnoms[:,:,:] - time, lat, lon array of ocean anomalies
TheClimsArray[:,:,:] - time(12 or 73), lat, lon array of climatology
TheStDevsArray[:,:,:] - time(12 or 73, lat, lon array of st devs
TheStYr - integer start year assumes Jan start
TheEdYr - integer end year assumes Dec start
TheClimStart - integer start of clim Jan start
TheClimEnd - integer end of clim Dec start
TheName - string short name of var q2m
TheStandardName - string standard name of variable
TheUnit - string unit of variable
TheMDI - missing data indicator
OUTPUTS:
None
'''
# Sort out times in days since 1979-01-01
# Sort out climatology time
if (TheOutputTime == 'monthly'):
nClims = 12
TimPoints = MakeDaysSince(TheStYr,1,TheEdYr,12,'month') # use 'day','month','year'
elif (TheOutputTime == 'pentad'):
nClims = 73
TimPoints = MakeDaysSince(TheStYr,1,TheEdYr,73,'pentad') # use 'day','month','year'
elif (TheOutputTime == 'daily'):
nClims = 365
TimPoints = MakeDaysSince(TheStYr,1,TheEdYr,365,'day') # will work even if its a leap year
nTims = len(TimPoints)
# Sort out Lats, Lons and LatBounds and LonBounds
if (TheOutputGrid == '1by1'):
LatList = np.flip(np.arange(180)-89.5)
LonList = np.arange(360)-179.5
LatBounds = np.empty((len(LatList),2),dtype='float')
LonBounds = np.empty((len(LonList),2),dtype='float')
LatBounds[:,0] = LatList + ((LatList[0]-LatList[1])/2.)
LatBounds[:,1] = LatList - ((LatList[0]-LatList[1])/2.)
LonBounds[:,0] = LonList - ((LonList[1]-LonList[0])/2.)
LonBounds[:,1] = LonList + ((LonList[1]-LonList[0])/2.)
nlatsOut = 180
nlonsOut = 360
elif (TheOutputGrid == '5by5'):
LatList = (np.arange(36)*5)-87.5
LonList = (np.arange(72)*5)-177.5
LatBounds = np.empty((len(LatList),2),dtype='float')
LonBounds = np.empty((len(LonList),2),dtype='float')
LatBounds[:,0] = LatList - ((LatList[1]-LatList[0])/2.)
LatBounds[:,1] = LatList + ((LatList[1]-LatList[0])/2.)
LonBounds[:,0] = LonList - ((LonList[1]-LonList[0])/2.)
LonBounds[:,1] = LonList + ((LonList[1]-LonList[0])/2.)
nlatsOut = 36
nlonsOut = 72
# No need to convert float data using given scale_factor and add_offset to integers - done within writing program (packV = (V-offset)/scale
# Not sure what this does to float precision though...
# Create a new netCDF file - have tried zlib=True,least_significant_digit=3 (and 1) - no difference
ncfw = Dataset(Filename,'w',format='NETCDF4_CLASSIC') # need to try NETCDF4 and also play with compression but test this first
# Set up the dimension names and quantities
ncfw.createDimension('time',nTims)
ncfw.createDimension('latitude',nlatsOut)
ncfw.createDimension('longitude',nlonsOut)
# If there are climatologies to be written then also set up clim dimension
if (len(np.shape(TheClimsArray)) > 1):
if (TheOutputTime == 'monthly'):
ncfw.createDimension('month_time',nClims)
elif (TheOutputTime == 'pentad'):
ncfw.createDimension('pentad_time',nClims)
elif (TheOutputTime == 'daily'):
ncfw.createDimension('day_time',nClims)
# Go through each dimension and set up the variable and attributes for that dimension if needed
MyVarT = ncfw.createVariable('time','f4',('time',))
MyVarT.standard_name = 'time'
MyVarT.long_name = 'time'
MyVarT.units = 'days since 1979-1-1 00:00:00'
MyVarT.start_year = str(TheStYr)
MyVarT.end_year = str(TheEdYr)
MyVarT[:] = TimPoints
MyVarLt = ncfw.createVariable('latitude','f4',('latitude',))
MyVarLt.standard_name = 'latitude'
MyVarLt.long_name = 'gridbox centre latitude'
MyVarLt.units = 'degrees_north'
MyVarLt[:] = LatList
MyVarLn = ncfw.createVariable('longitude','f4',('longitude',))
MyVarLn.standard_name = 'longitude'
MyVarLn.long_name = 'gridbox centre longitude'
MyVarLn.units = 'degrees_east'
MyVarLn[:] = LonList
# If there are climatologies to be written then also set up clim dimension
if (len(np.shape(TheClimsArray)) > 1):
if (TheOutputTime == 'monthly'):
MyVarM = ncfw.createVariable('month_time','i4',('month_time',))
MyVarM.long_name = 'months of the year'
MyVarM.units = 'months'
MyVarM[:] = np.arange(nClims)
elif (TheOutputTime == 'pentad'):
MyVarM = ncfw.createVariable('pentad_time','i4',('pentad_time',))
MyVarM.long_name = 'pentads of the year'
MyVarM.units = 'pentads'
MyVarM[:] = np.arange(nClims)
elif (TheOutputTime == 'daily'):
MyVarM = ncfw.createVariable('day_time','i4',('day_time',))
MyVarM.long_name = 'days of the year'
MyVarM.units = 'days'
MyVarM[:] = np.arange(nClims)
# Go through each variable and set up the variable attributes
# I've added zlib=True so that the file is in compressed form
# I've added least_significant_digit=4 because we do not need to store information beyone 4 significant figures.
MyVarD = ncfw.createVariable(TheName,'f4',('time','latitude','longitude',),fill_value = TheMDI,zlib=True,least_significant_digit=4)
MyVarD.standard_name = TheStandardName
MyVarD.long_name = TheLongName
MyVarD.units = TheUnit
MyVarD.valid_min = np.min(TheFullArray)
MyVarD.valid_max = np.max(TheFullArray)
MyVarD.missing_value = TheMDI
# Provide the data to the variable - depending on howmany dimensions there are
MyVarD[:,:,:] = TheFullArray[:,:,:]
# If there are climatologies etc to be written then also set them up
if (len(np.shape(TheClimsArray)) > 1):
MyVarA = ncfw.createVariable(TheName+'_anoms','f4',('time','latitude','longitude',),fill_value = TheMDI,zlib=True,least_significant_digit=4)
MyVarA.standard_name = TheStandardName+'_anomalies'
MyVarA.long_name = TheLongName+' anomalies from 1981-2010'
MyVarA.units = TheUnit
MyVarA.valid_min = np.min(TheFullArrayAnoms)
MyVarA.valid_max = np.max(TheFullArrayAnoms)
MyVarA.missing_value = TheMDI
# Provide the data to the variable - depending on howmany dimensions there are
MyVarA[:,:,:] = TheFullArrayAnoms[:,:,:]
MyVarAL = ncfw.createVariable(TheName+'_anoms_land','f4',('time','latitude','longitude',),fill_value = TheMDI,zlib=True,least_significant_digit=4)
MyVarAL.standard_name = TheStandardName+'_anomalies'
MyVarAL.long_name = TheLongName+' land anomalies from 1981-2010'
MyVarAL.units = TheUnit
MyVarAL.valid_min = np.min(TheLandArrayAnoms)
MyVarAL.valid_max = np.max(TheLandArrayAnoms)
MyVarAL.missing_value = TheMDI
# Provide the data to the variable - depending on howmany dimensions there are
MyVarAL[:,:,:] = TheLandArrayAnoms[:,:,:]
MyVarAO = ncfw.createVariable(TheName+'_anoms_ocean','f4',('time','latitude','longitude',),fill_value = TheMDI,zlib=True,least_significant_digit=4)
MyVarAO.standard_name = TheStandardName+'_anomalies'
MyVarAO.long_name = TheLongName+' ocean anomalies from 1981-2010'
MyVarAO.units = TheUnit
MyVarAO.valid_min = np.min(TheOceanArrayAnoms)
MyVarAO.valid_max = np.max(TheOceanArrayAnoms)
MyVarAO.missing_value = TheMDI
# Provide the data to the variable - depending on howmany dimensions there are
MyVarAO[:,:,:] = TheOceanArrayAnoms[:,:,:]
if (TheOutputTime == 'monthly'):
MyVarC = ncfw.createVariable(TheName+'_clims','f4',('month_time','latitude','longitude',),fill_value = TheMDI,zlib=True,least_significant_digit=4)
elif (TheOutputTime == 'pentad'):
MyVarC = ncfw.createVariable(TheName+'_clims','f4',('pentad_time','latitude','longitude',),fill_value = TheMDI,zlib=True,least_significant_digit=4)
elif (TheOutputTime == 'daily'):
MyVarC = ncfw.createVariable(TheName+'_clims','f4',('day_time','latitude','longitude',),fill_value = TheMDI,zlib=True,least_significant_digit=4)
MyVarC.standard_name = TheStandardName+'_climatologies'
MyVarC.long_name = TheLongName+' climatology over 1981-2010'
MyVarC.units = TheUnit
MyVarC.valid_min = np.min(TheClimsArray)
MyVarC.valid_max = np.max(TheClimsArray)
MyVarC.missing_value = TheMDI
# Provide the data to the variable - depending on howmany dimensions there are
MyVarC[:,:,:] = TheClimsArray[:,:,:]
if (TheOutputTime == 'monthly'):
MyVarS = ncfw.createVariable(TheName+'_stdevs','f4',('month_time','latitude','longitude',),fill_value = TheMDI,zlib=True,least_significant_digit=4)
elif (TheOutputTime == 'pentad'):
MyVarS = ncfw.createVariable(TheName+'_stdevs','f4',('pentad_time','latitude','longitude',),fill_value = TheMDI,zlib=True,least_significant_digit=4)
elif (TheOutputTime == 'daily'):
MyVarS = ncfw.createVariable(TheName+'_stdevs','f4',('day_time','latitude','longitude',),fill_value = TheMDI,zlib=True,least_significant_digit=4)
MyVarS.standard_name = TheStandardName+'_climatological_standard_deviations'
MyVarS.long_name = TheLongName+' climatological standard deviation over 1981-2010'
MyVarS.units = TheUnit
MyVarS.valid_min = np.min(TheStDevsArray)
MyVarS.valid_max = np.max(TheStDevsArray)
MyVarS.missing_value = TheMDI
# Provide the data to the variable - depending on howmany dimensions there are
MyVarS[:,:,:] = TheStDevsArray[:,:,:]
ncfw.close()
return
#************************************************************
# MAIN
#************************************************************
if __name__ == "__main__":
import argparse
# set up keyword arguments
parser = argparse.ArgumentParser()
parser.add_argument('--var', dest='var', action='store', default='q', type=str,
help='Variable [q]')
## Set up output variables - for q, e, RH, dpd, Tw we will need to read in multiple input files
#Var = 'q' # this can be 't','td','q','rh','e','dpd','tw','ws','slp','sp','uv','sst'
parser.add_argument('--freq', dest='freq', action='store', default='monthly', type=str,
help='Time Frequency [monthly]')
args = parser.parse_args()
print(args)
Var = args.var
Freq = args.freq
InputERA = '_daily_'+VarDict[Var][1]+'.nc'
# only needed if we're updating...
OldERAStrPT = workingdir+'/OTHERDATA/'+NameDict[Var]+'_pentad_1by1_'+ThisRean+'_1979'+str(edyr-1)+'.nc'
OldERAStrMN = workingdir+'/OTHERDATA/'+NameDict[Var]+'_monthly_1by1_'+ThisRean+'_1979'+str(edyr-1)+'.nc'
if (Freq == 'pentad'):
# NewERAStrD1 = workingdir+'/OTHERDATA/'+NameDict[Var]+'_daily_1by1_'+ThisRean+'_1979'+updateyyyy+'.nc'
NewERAStrP1 = workingdir+'/OTHERDATA/'+NameDict[Var]+'_pentad_1by1_'+ThisRean+'_1979'+updateyyyy+'.nc'
else:
NewERAStrM1 = workingdir+'/OTHERDATA/'+NameDict[Var]+'_monthly_1by1_'+ThisRean+'_1979'+updateyyyy+'.nc'
NewERAStrM5 = workingdir+'/OTHERDATA/'+NameDict[Var]+'_monthly_5by5_'+ThisRean+'_1979'+updateyyyy+'.nc'
# What are we working on?
print('Working variable: ',Var)
print('Working frequency: ',Freq)
print('Type of run: ',ThisProg, styr, edyr)
print('Reanalysis: ',ThisRean)
# If its an update read in old pentad fields and old monthly fields to append to,
# then read in year of daily data, process to pentad and monthly and then append
# If its a total build then read in year by year, process to pentads and monthlies and append
if (Freq == 'pentad'):
if (ThisProg == 'Build'):
PentadDataArr = np.array(()) # This will be set up on first read in - this has len() of 0!!!
elif (ThisProg == 'Update'):
# Now read in the old data to start array to append to
PentadDataArr,Latitudes,Longitudes = GetGrid4(InFileOLDPT,[TheVar],['latitude'],['longitude'])
else:
if (ThisProg == 'Build'):
MonthDataArr = np.array(()) # This will be set up on first read in - this has len() of 0!!!
elif (ThisProg == 'Update'):
# Now read in the old data to start array to append to
MonthDataArr,Latitudes,Longitudes = GetGrid4(InFileOLDMN,[TheVar],['latitude'],['longitude'])
# Loop through the years
for years in range(styr,edyr+1):
# Get actual year we're working on
print('Working Year: ',years)
# read in a year of data
# First make up the full daily field for one year - build or update
DailyData = GetDaily(NameDict[Var],workingdir+'/OTHERDATA/ERA5/',InputERA,years)
print('Data all read in')
# TOO TIME/MEMORY HUNGRY
# ## Now make daily anoms, clims, stdevs
# DailyAnoms,DailyClims,DailyStdevs = CreateAnoms(ClimStart,ClimEnd,styr,edyr,DailyData,mdi)
#
# print('Created daily anomalies')
#
# # Now get land and sea masked pentad anomalies, actuals, clims and stdevs
# DailyAnomsLand, DailyAnomsSea = MaskLandS(DailyAnoms,LandMask,mdi)
# #DailyLand, DailySea = MaskLandS(DailyData,LandMask,mdi)
# #DailyClimsLand, DailyClimsSea = MaskLandS(DailyClims,LandMask,mdi)
# #DailyStdevsLand, DailyStdevsSea = MaskLandS(DailyStdevs,LandMask,mdi)
#
# print('Created daily land and sea masks')
#
# # Now save to nc
# WriteNetCDF(NewERAStrD1,'daily', '1by1', Var, DailyData, DailyAnoms, DailyAnomsLand, DailyAnomsSea, DailyClims, DailyStdevs,
# styr, edyr, ClimStart, ClimEnd, NameDict[Var], StandardNameDict[Var], LongNameDict[Var], UnitDict[Var],mdi)
# print('Written out dailies')
# # clean up
# DailyAnoms = 0
# DailyAnomsLand = 0
# DailyAnomsSea = 0
# DailyClims = 0
# DailyStdev = 0
if (Freq == 'pentad'):
# Now make pentads
PentadData = MakePentads(DailyData,years,years)
# clean up
DailyData = 0
print('Created pentads')
if (len(PentadDataArr) == 0):
# this is the start of the build
PentadDataArr = np.copy(PentadData)
else:
PentadDataArr = np.append(PentadDataArr,np.copy(PentadData),0)
else:
# Now make monthlies
MonthData = MakeMonths(DailyData,years,years)
# clean up
DailyData = 0
print('Created months')
#pdb.set_trace()
if (len(MonthDataArr) == 0):
# this is the start of the build
MonthDataArr = np.copy(MonthData)
else:
MonthDataArr = np.append(MonthDataArr,np.copy(MonthData),0)
if (Freq == 'pentad'):
# Now make pentad anoms, clims, stdevs
PentadAnoms,PentadClims,PentadStdevs = CreateAnoms(ClimStart,ClimEnd,actstyr,edyr,PentadDataArr,mdi)
print('Created pentad anomalies')
# pdb.set_trace()
# Now get land and sea masked pentad anomalies, actuals, clims and stdevs
PentadAnomsLand, PentadAnomsSea = MaskLandS(PentadAnoms,LandMask,mdi)
#PentadLand, PentadSea = MaskLandS(PentadData,LandMask,mdi)
#PentadClimsLand, PentadClimsSea = MaskLandS(PentadClims,LandMask,mdi)
#PentadStdevsLand, PentadStdevsSea = MaskLandS(PentadStdevs,LandMask,mdi)
print('Created pentad land and sea masks')
# Now save to nc
WriteNetCDF(NewERAStrP1,'pentad', '1by1', Var, PentadDataArr, PentadAnoms, PentadAnomsLand, PentadAnomsSea, PentadClims, PentadStdevs,
actstyr, edyr, ClimStart, ClimEnd, NameDict[Var], StandardNameDict[Var], LongNameDict[Var], UnitDict[Var],mdi)
# clean up
PentadAnoms = 0
PentadAnomsLand = 0
PentadAnomsSea = 0
PentadClims = 0
PentadStdev = 0
print('Written out pentads')
else:
# Now make monthly anoms, clims, stdevs
MonthAnoms,MonthClims,MonthStdevs = CreateAnoms(ClimStart,ClimEnd,actstyr,edyr,MonthDataArr,mdi)
print('Created month anomalies')
# Now get land and sea masked monthly anomalies, actuals, clims and stdevs
MonthAnomsLand, MonthAnomsSea = MaskLandS(MonthAnoms,LandMask,mdi)
#MonthLand, MonthSea = MaskLandS(MonthData,LandMask,mdi)
#MonthClimsLand, MonthClimsSea = MaskLandS(MonthClims,LandMask,mdi)
#MonthStdevsLand, MonthStdevsSea = MaskLandS(MonthStdevs,LandMask,mdi)
print('Created month land and sea masks')
# Now save to nc
WriteNetCDF(NewERAStrM1,'monthly', '1by1', Var, MonthDataArr, MonthAnoms, MonthAnomsLand, MonthAnomsSea, MonthClims, MonthStdevs,
actstyr, edyr, ClimStart, ClimEnd, NameDict[Var], StandardNameDict[Var], LongNameDict[Var], UnitDict[Var],mdi)
print('Written out months')
# Now make monthly 5by5s - regrid
Month5by5Data = RegridField(MonthDataArr,mdi)
MonthDataArr = 0
print('Created month 5by5s')
#pdb.set_trace()
Month5by5Anoms = RegridField(MonthAnoms,mdi)
MonthAnoms = 0
Month5by5Clims = RegridField(MonthClims,mdi)
MonthClims = 0
Month5by5Stdevs = RegridField(MonthStdevs,mdi)
MonthStdevs = 0
#Month5by5Land = RegridField(MonthLand,mdi)
Month5by5AnomsLand = RegridField(MonthAnomsLand,mdi)
MonthAnomsLand = 0
#Month5by5ClimsLand = RegridField(MonthClimsLand,mdi)
#Month5by5StdevsLand = RegridField(MonthStdevsLand,mdi)
#Month5by5Sea = RegridField(MonthDataSea,mdi)
Month5by5AnomsSea = RegridField(MonthAnomsSea,mdi)
MonthAnomsSea = 0
#Month5by5ClimsSea = RegridField(MonthClimsSea,mdi)
#Month5by5StdevsSea = RegridField(MonthStdevsSea,mdi)
# Now save to nc
WriteNetCDF(NewERAStrM5,'monthly', '5by5', Var, Month5by5Data, Month5by5Anoms, Month5by5AnomsLand, Month5by5AnomsSea, Month5by5Clims, Month5by5Stdevs,
actstyr, edyr, ClimStart, ClimEnd, NameDict[Var], StandardNameDict[Var], LongNameDict[Var], UnitDict[Var],mdi)
print('Written out month 5by5s')
print('And we are done!')
| cc0-1.0 |
DavidTingley/ephys-processing-pipeline | installation/klustaviewa-0.3.0/klustaviewa/views/tests/test_featureview.py | 2 | 1719 | """Unit tests for feature view."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import numpy as np
import numpy.random as rnd
import pandas as pd
from klustaviewa.views.tests.mock_data import (setup, teardown,
nspikes, nclusters, nsamples, nchannels, fetdim)
from kwiklib.dataio import KlustersLoader
from kwiklib.dataio.selection import select
from kwiklib.dataio.tools import check_dtype, check_shape
from klustaviewa import USERPREF
from klustaviewa.views import FeatureView
from klustaviewa.views.tests.utils import show_view, get_data
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_featureview():
keys = ('features,features_background,masks,clusters,clusters_selected,'
'spiketimes,'
'cluster_colors,fetdim,nchannels,nextrafet,duration,freq').split(',')
data = get_data()
kwargs = {k: data[k] for k in keys}
kwargs['operators'] = [
lambda self: self.view.toggle_mask(),
lambda self: self.view.set_wizard_pair((2, 1), (3, 2)),
lambda self: self.view.set_wizard_pair(None, (3, 2)),
lambda self: self.view.set_wizard_pair((3, 2), None),
lambda self: self.view.set_wizard_pair(None, None),
lambda self: (self.close()
if USERPREF['test_auto_close'] != False else None),
]
# Show the view.
show_view(FeatureView, **kwargs)
| gpl-3.0 |
roshantha9/AbstractManycoreSim | src/RunSim_Exp_HEVCTile_Mapping_highCCR.py | 1 | 21862 | import sys, os, csv, pprint, math
import argparse
import numpy as np
import random
import shutil
import time
import json
import datetime, time
## uncomment when running under CLI only version ##
import matplotlib
matplotlib.use('Agg')
from libProcessingElement.LocalScheduler import LocalRRScheduler, \
LocalEDFScheduler, \
LocalMPEG2FrameEDFScheduler, \
LocalMPEG2FrameListScheduler, \
LocalMPEG2FramePriorityScheduler, \
LocalMPEG2FramePriorityScheduler_WithDepCheck, \
LocalHEVCFramePriorityScheduler_WithDepCheck, \
LocalHEVCTilePriorityScheduler_WithDepCheck
from libResourceManager.RMTypes import RMTypes
from libProcessingElement.CPUTypes import CPUTypes
from libResourceManager.Mapper.MapperTypes import MapperTypes
from libTaskDispatcher.TDTypes import TDTypes
from libResourceManager.AdmissionControllerOptions import AdmissionControllerOptions
from libMappingAndScheduling.SemiDynamic.TaskMappingSchemes import TaskMappingSchemes
from libMappingAndScheduling.SemiDynamic.TaskSemiDynamicPrioritySchemes import TaskSemiDynamicPrioritySchemes
from libMappingAndScheduling.SemiDynamic.TaskMappingAndPriAssCombinedSchemes import TaskMappingAndPriAssCombinedSchemes
from libMappingAndScheduling.SemiDynamic.TaskTileMappingAndPriAssCombinedSchemes import TaskTileMappingAndPriAssCombinedSchemes
from libMappingAndScheduling.FullyDynamic.TaskMappingSchemesFullyDyn import TaskMappingSchemesFullyDyn
from libApplicationModel.Task import TaskModel
from util_scripts.gen_res_list import get_res_list
from SimParams import SimParams
import Multicore_MPEG_Model as MMMSim
import libApplicationModel.HEVCWorkloadParams as HEVCWLP
EXP_DATADIR = "experiment_data/hevc_mapping_highccr_test/"
NOC_SIZE = [(3,3), (5,5), (7,7), (9,9), (10,10)]
# name the report filenames
global_tm_fname = "_timeline.png"
global_vs_bs_fname = "_vsbs.js"
global_util_fname = "_util.js"
global_wf_res_fname = "_wfressumm.js"
global_gops_opbuff_fname = "_gopsopbuffsumm.js"
global_rmtbl_dt_fname = "_rmtbldt.js"
global_ibuff_fname = "_ibuff.js"
global_obuff_fname = "_obuff.js"
global_nodetqs_fname = "_nodetqs.js"
global_rmtaskrelease_fname = "_rmtaskrel.js"
global_mappingandpriass_fname = "_mappingandpriass.js"
global_flowscompleted_fname = "_flwcompleted.js"
global_flowscompletedshort_fname = "_flwcompletedshort.js"
global_nodetaskexectime_fname = "_nodetaskexectime.js"
global_schedtestresults_fname = "_schedtestresults.js"
global_utilvsschedresults_fname = "_utilvsschedresults.js"
global_rmtaskmappingtable_fname = "_rmtaskmappingtable.js"
global_rmvolatiletaskmappingtable_fname = "_rmvolatiletaskmappingtable.js"
global_processedctus_fname = "_processedctus.js"
global_taskscompleted_fname = "_taskscompleted.js"
global_mapperexecoverhead_fname = "_mapperexecoverhead.js"
global_smartmmcid_fname = "_smartmmcid.js"
global_jobccrinfo_fname = "_jobccrinfo.js"
global_linkusagereport_fname = "_linkusagereport.js"
global_ppmaperoptstageinfo_fname = "_ppmaperoptstageinfo.js"
# do we use ms signalling or not
def _get_feedback_status(cmbmppri_type):
if cmbmppri_type in [TaskTileMappingAndPriAssCombinedSchemes.TASKTILEMAPPINGANDPRIASSCOMBINED_PRLOWRESFIRST_LOWUTIL_WITHMONITORING_AVGCC_V1,
TaskTileMappingAndPriAssCombinedSchemes.TASKTILEMAPPINGANDPRIASSCOMBINED_PRLOWRESFIRST_MOSTSLACK_WITHMONITORING_AVGCC_V1,
TaskTileMappingAndPriAssCombinedSchemes.TASKTILEMAPPINGANDPRIASSCOMBINED_PRLOWRESFIRST_CLUSTLS_MOSTSLACK_WITHMONITORING_AVGCC]:
return True
else:
return False
###################################################################################################
# SCENARIO based runsim for different types of AC/mappers/CCR/noc size
###################################################################################################
def runSim_TileMapping(
forced_seed = None,
cmbmppri_type=None,
wl_config=None, # we assume 1 vid per wf
):
seed = forced_seed
print "SEED === " + str(seed)
random.seed(seed)
np.random.seed(seed)
# get resolution list
res_list = get_res_list(wl_config)
# fixed params
SimParams.SIM_RUNTIME = 10000
SimParams.HEVC_DUMP_FRAME_DATAFILE = False
SimParams.HEVC_LOAD_FRAME_DATAFILE = False
SimParams.HEVC_FRAME_GENRAND_SEED = seed
SimParams.HEVC_TILELEVEL_SPLITTING_ENABLE = True
SimParams.LOCAL_SCHEDULER_TYPE = LocalHEVCTilePriorityScheduler_WithDepCheck()
SimParams.SIM_ENTITY_RESOURCEMANAGER_CLASS = RMTypes.OPENLOOP
SimParams.SIM_ENTITY_CPUNODE_CLASS = CPUTypes.OPENLOOP_HEVC_TILE_LEVEL
SimParams.TASK_MODEL = TaskModel.TASK_MODEL_HEVC_TILE_LEVEL
SimParams.SIM_ENTITY_MAPPER_CLASS = MapperTypes.OPENLOOP_WITH_HEVCTILE
SimParams.SIM_ENTITY_TASKDISPATCHER_CLASS = TDTypes.OPENLOOP_WITH_HEVCTILE
SimParams.MS_SIGNALLING_NOTIFY_FLOW_COMPLETE_ENABLE = False
SimParams.MS_SIGNALLING_NOTIFY_TASK_COMPLETE_ENABLE = False
SimParams.RESOURCEMANAGER_USE_VOLATILE_TMTBL = True
SimParams.MAPPING_PREMAPPING_ENABLED = True
SimParams.MMC_SMART_NODE_SELECTION_ENABLE = True
SimParams.MMC_SMART_NODE_SELECTION_TYPE = 0 # MMCP-Dist
SimParams.MMC_ENABLE_DATATRANSMISSION_MODELLING = True
SimParams.HEVC_GOPGEN_USEPROBABILISTIC_MODEL = True
SimParams.USE_VIDSTRM_SPECIFIC_FRAMERATE = True
SimParams.NOC_W = 8
SimParams.NOC_H = 8
SimParams.NUM_NODES = (SimParams.NOC_W * SimParams.NOC_H)
# SimParams.DVB_RESOLUTIONS_FIXED = [
# # (3840,2160),
# (2560,1440),
# # (1920,1080),
# # (1280,720),
# # (854,480),
# # (640,360),
# (512,288),
# ]*1
# # #
SimParams.DVB_RESOLUTIONS_FIXED = res_list
SimParams.NUM_WORKFLOWS = len(SimParams.DVB_RESOLUTIONS_FIXED)
SimParams.NUM_INPUTBUFFERS = SimParams.NUM_WORKFLOWS
SimParams.DVB_RESOLUTIONS_SELECTED_RANDOM = False
SimParams.WFGEN_MIN_GOPS_PER_VID = 5
SimParams.WFGEN_MAX_GOPS_PER_VID = 5
SimParams.WFGEN_INITIAL_VID_GAP_MIN = 0.0
SimParams.WFGEN_INITIAL_VID_GAP_MAX = 0.05
CCR_TYPE = "lowcc_normal_mem_considered" # {normal, normal_mem_considered, lowcc_normal_mem_considered}
### for normal CC ###
if CCR_TYPE == "normal":
cc_scale_down = 1.66 # this makes the CCR go High
SimParams.CLSTR_TILE_PARAM_CCR_RANGES_LOW = 0.11 # less than this
SimParams.CLSTR_TILE_PARAM_CCR_RANGES_MED = (0.11, 0.14) # between these values
SimParams.CLSTR_TILE_PARAM_CCR_RANGES_HIGH = 0.14 # higher than this
SimParams.CLSTR_TILE_PARAM_KAUSHIKS_ALGO_COMMS_SCALEUP_FACTOR = 1.0 # we use a scale factor because always the (comm. cost < comp. cost)
SimParams.CLSTR_TILE_PARAM_BGROUP_CCR_RANGES_LOW = 0.11 # less than this
SimParams.CLSTR_TILE_PARAM_BGROUP_CCR_RANGES_MED = (0.11, 0.14) # between these values
SimParams.CLSTR_TILE_PARAM_BGROUP_CCR_RANGES_HIGH = 0.14 # higher than this
SimParams.CLSTR_TILE_PARAM_BGROUP_NGT_HOPS = (4,1,1) # NGT hop count (low, med, high ccr ranges)
### for normal low CC - @0.60 ###
elif CCR_TYPE == "normal_mem_considered":
cc_scale_down = 1.0 # this makes the CCR go High
SimParams.CLSTR_TILE_PARAM_CCR_RANGES_LOW = 0.18 # less than this
SimParams.CLSTR_TILE_PARAM_CCR_RANGES_MED = (0.18, 0.23) # between these values
SimParams.CLSTR_TILE_PARAM_CCR_RANGES_HIGH = 0.23 # higher than this
SimParams.CLSTR_TILE_PARAM_KAUSHIKS_ALGO_COMMS_SCALEUP_FACTOR = 1.0 # we use a scale factor because always the (comm. cost < comp. cost)
SimParams.CLSTR_TILE_PARAM_BGROUP_CCR_RANGES_LOW = 0.18 # less than this
SimParams.CLSTR_TILE_PARAM_BGROUP_CCR_RANGES_MED = (0.18, 0.23) # between these values
SimParams.CLSTR_TILE_PARAM_BGROUP_CCR_RANGES_HIGH = 0.23 # higher than this
SimParams.CLSTR_TILE_PARAM_BGROUP_NGT_HOPS = (4,1,1) # NGT hop count (low, med, high ccr ranges)
### for special very low CC - @0.60*0.1 ###
elif CCR_TYPE == "lowcc_normal_mem_considered":
cc_scale_down = 0.1 # low values makes the CCR go high
SimParams.CLSTR_TILE_PARAM_CCR_RANGES_LOW = 1.8 # less than this
SimParams.CLSTR_TILE_PARAM_CCR_RANGES_MED = (1.8, 2.3) # between these values
SimParams.CLSTR_TILE_PARAM_CCR_RANGES_HIGH = 2.3 # higher than this
SimParams.CLSTR_TILE_PARAM_KAUSHIKS_ALGO_COMMS_SCALEUP_FACTOR = 1.0 # we use a scale factor because always the (comm. cost < comp. cost)
SimParams.CLSTR_TILE_PARAM_BGROUP_CCR_RANGES_LOW = 1.8 # less than this
SimParams.CLSTR_TILE_PARAM_BGROUP_CCR_RANGES_MED = (1.8, 2.3) # between these values
SimParams.CLSTR_TILE_PARAM_BGROUP_CCR_RANGES_HIGH = 2.3 # higher than this
SimParams.CLSTR_TILE_PARAM_BGROUP_NGT_HOPS = (4,1,1) # NGT hop count (low, med, high ccr ranges)
else:
sys.exit("Error - CCR_TYPE: "+ CCR_TYPE)
HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['ICU'] = (HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['ICU'][0]*float(cc_scale_down), HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['ICU'][1]*float(cc_scale_down))
HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['PCU'] = (HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['PCU'][0]*float(cc_scale_down), HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['PCU'][1]*float(cc_scale_down))
HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['BCU'] = (HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['BCU'][0]*float(cc_scale_down), HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['BCU'][1]*float(cc_scale_down))
HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['SkipCU'] = (HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['SkipCU'][0]*float(cc_scale_down), HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['SkipCU'][1]*float(cc_scale_down))
print "HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR:: ---"
pprint.pprint(HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR)
print "-------------"
# -- variable params --
SimParams.COMBINED_MAPPING_AND_PRIASS = cmbmppri_type
SimParams.DYNAMIC_TASK_MAPPING_SCHEME = TaskMappingSchemes.TASKMAPPINGSCHEMES_NONE # this will be overridden
SimParams.DYNAMIC_TASK_PRIASS_SCHEME = TaskSemiDynamicPrioritySchemes.TASKSEMIDYNAMICPRIORITYSCHEMES_NONE
pprint.pprint(SimParams.DVB_RESOLUTIONS_FIXED)
pprint.pprint(SimParams.NUM_WORKFLOWS)
# getting the reporing file name prefix
exp_key = "ac"+str(SimParams.AC_TEST_OPTION) + \
"mp"+str(SimParams.DYNAMIC_TASK_MAPPING_SCHEME)+ \
"pr"+str(SimParams.DYNAMIC_TASK_PRIASS_SCHEME)+ \
"cmb"+str(SimParams.COMBINED_MAPPING_AND_PRIASS) + \
"mmp"+str(SimParams.MMC_SMART_NODE_SELECTION_TYPE)
subdir1 = EXP_DATADIR + wl_config + "/" + exp_key + "/"
subdir2 = subdir1 + "seed_"+str(seed)+"/"
final_subdir = subdir2
fname_prefix = "HEVCTileSplitTest__" + exp_key + "_"
final_fname = fname_prefix+str(SimParams.NOC_H)+"_"+str(SimParams.NOC_W)+"_"
check_fname = _get_fname(final_subdir, final_fname)['taskscompleted_fname']
print "Checking file exists : " + str(check_fname)
if(_check_file_exists(check_fname) == True):
print "Simulation already exists.."
else:
print "----------------------------------------------------------------------------------------------------------------------------"
print subdir2
print "Running HEVCTile_Mapping-runSim_TileMapping-"+ fname_prefix +": num_wf=" + str(SimParams.NUM_WORKFLOWS) + \
", noc_h="+str(SimParams.NOC_H)+","+"noc_w="+str(SimParams.NOC_W) + ", " + \
exp_key + \
", seed="+str(seed)
print "----------------------------------------------------------------------------------------------------------------------------"
print "Start-time(actual): ", datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
env, last_scheduled_task_time = MMMSim.runMainSimulation(initial_rand_seed=seed, dump_workload=False)
env.run(until=last_scheduled_task_time+SimParams.SIM_RUNTIME)
print "Simulation Ended at : %.15f" % env.now
print "End-time: ", datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
_makeDir(final_subdir)
# get filenames
filenames = _get_fname(final_subdir, final_fname)
# name the report filenames
_dump_captured_data(filenames)
def _get_fname(exp_dir, fname_prefix):
tm_fname = exp_dir + fname_prefix + global_tm_fname
vs_bs_fname = exp_dir + fname_prefix + global_vs_bs_fname
util_fname = exp_dir + fname_prefix + global_util_fname
wf_res_fname = exp_dir + fname_prefix + global_wf_res_fname
gops_opbuff_fname = exp_dir + fname_prefix + global_gops_opbuff_fname
rmtbl_dt_fname = exp_dir + fname_prefix + global_rmtbl_dt_fname
ibuff_fname = exp_dir + fname_prefix + global_ibuff_fname
obuff_fname = exp_dir + fname_prefix + global_obuff_fname
nodetqs_fname = exp_dir + fname_prefix + global_nodetqs_fname
rmtaskrelease_fname = exp_dir + fname_prefix + global_rmtaskrelease_fname
mappingandpriass_fname = exp_dir + fname_prefix + global_mappingandpriass_fname
flowscompleted_fname = exp_dir + fname_prefix + global_flowscompleted_fname
flowscompletedshort_fname = exp_dir + fname_prefix + global_flowscompletedshort_fname
nodetaskexectime_fname = exp_dir + fname_prefix + global_nodetaskexectime_fname
schedtestresults_fname = exp_dir + fname_prefix + global_schedtestresults_fname
utilvsschedresults_fname = exp_dir + fname_prefix + global_utilvsschedresults_fname
rmtaskmappingtable_fname = exp_dir + fname_prefix + global_rmtaskmappingtable_fname
rmvolatiletaskmappingtable_fname = exp_dir + fname_prefix + global_rmvolatiletaskmappingtable_fname
processedctus_fname = exp_dir + fname_prefix + global_processedctus_fname
taskscompleted_fname = exp_dir + fname_prefix + global_taskscompleted_fname
mapperexecoverhead_fname = exp_dir + fname_prefix + global_mapperexecoverhead_fname
smartmmcid_fname = exp_dir + fname_prefix + global_smartmmcid_fname
jobccrinfo_fname = exp_dir + fname_prefix + global_jobccrinfo_fname
linkusagereport_fname = exp_dir + fname_prefix + global_linkusagereport_fname
ppmaperoptstageinfo_fname = exp_dir + fname_prefix + global_ppmaperoptstageinfo_fname
result = {
"tm_fname" : tm_fname,
"vs_bs_fname" : vs_bs_fname,
"util_fname" : util_fname,
"wf_res_fname" : wf_res_fname,
"gops_opbuff_fname" : gops_opbuff_fname,
"rmtbl_dt_fname" : rmtbl_dt_fname,
"ibuff_fname" : ibuff_fname,
"obuff_fname" : obuff_fname,
"nodetqs_fname" : nodetqs_fname,
"rmtaskrelease_fname" : rmtaskrelease_fname,
"mappingandpriass_fname" : mappingandpriass_fname,
"flowscompleted_fname" : flowscompleted_fname,
"flowscompletedshort_fname" : flowscompletedshort_fname,
"nodetaskexectime_fname" : nodetaskexectime_fname,
"schedtestresults_fname" : schedtestresults_fname,
"utilvsschedresults_fname" : utilvsschedresults_fname,
"rmtaskmappingtable_fname" : rmtaskmappingtable_fname,
"rmvolatiletaskmappingtable_fname" : rmvolatiletaskmappingtable_fname,
"processedctus_fname" : processedctus_fname,
"taskscompleted_fname" : taskscompleted_fname,
"mapperexecoverhead_fname" : mapperexecoverhead_fname,
"smartmmcid_fname" : smartmmcid_fname,
"jobccrinfo_fname" : jobccrinfo_fname,
"linkusagereport_fname" : linkusagereport_fname,
"ppmaperoptstageinfo_fname" : ppmaperoptstageinfo_fname,
}
return result
def _dump_captured_data(filenames):
(wf_results_summary, gops_in_outputbuff_summary) = MMMSim.SimMon.report_DecodedWorkflows_Summary(timeline_fname=filenames["tm_fname"],
wf_res_summary_fname = filenames["wf_res_fname"],
gops_opbuff_summary_fname = filenames["gops_opbuff_fname"],
rmtbl_dt_summary_fname = filenames["rmtbl_dt_fname"],
output_format = "json",
task_model_type = TaskModel.TASK_MODEL_HEVC_TILE_LEVEL)
MMMSim.SimMon.report_InstUtilisation(dump_to_file=filenames["util_fname"])
MMMSim.SimMon.report_OutputBufferContents(dump_to_file=filenames["obuff_fname"])
#MMMSim.SimMon.report_FlowsCompleted(dump_to_file=filenames["flowscompleted_fname"])
MMMSim.SimMon.report_FlowsCompleted_short(dump_to_file=filenames["flowscompletedshort_fname"])
MMMSim.SimMon.report_HEVC_NumCTU()
MMMSim.SimMon.report_RMTaskMappingTable(dump_to_file=filenames["rmtaskmappingtable_fname"])
MMMSim.SimMon.report_VerifyFlows_HEVCTileLvl()
#MMMSim.SimMon.report_RMVolatileTaskMappingTable(dump_to_file=filenames["rmvolatiletaskmappingtable_fname"])
MMMSim.SimMon.report_NodeTasksCompleted(dump_to_file=filenames["taskscompleted_fname"])
MMMSim.SimMon.report_MappingExecOverhead(dump_to_file=filenames["mapperexecoverhead_fname"])
MMMSim.SimMon.report_JobCCRInfo(dump_to_file=filenames["jobccrinfo_fname"])
#MMMSim.SimMon.report_LinkUsageInfo(dump_to_file=filenames["linkusagereport_fname"])
MMMSim.SimMon.report_PPTileMapperOptStage_Info(dump_to_file=filenames["ppmaperoptstageinfo_fname"])
def _makeDir(directory):
try:
os.stat(directory)
except:
try:
os.makedirs(directory)
except OSError, e:
print str(e)
pass
def _check_file_exists(fname):
return os.path.exists(fname)
# format : "720x576,544x576,528x576,480x576,426x240,320x240,240x180"
def _reslist_convert(str_res_list):
res_list = []
if(str_res_list.count(',')>0):
res_combos = str_res_list.split(',')
if(len(res_combos)>1):
for each_r in res_combos:
res_h_w = each_r.split('x')
int_res_h = int(res_h_w[0])
int_res_w = int(res_h_w[1])
res_list.append((int_res_h, int_res_w))
else:
sys.exit("_reslist_convert:: Error")
else:
res_h_w = str_res_list.split('x')
int_res_h = int(res_h_w[0])
int_res_w = int(res_h_w[1])
res_list.append((int_res_h, int_res_w))
return res_list
############################################################################
############################################################################
## MAIN SCRIPT SECTION
############################################################################
############################################################################
sys.setrecursionlimit(1500)
# collect command line params
parser = argparse.ArgumentParser(__file__, description="Run specified experiment on abstract simulator")
parser = argparse.ArgumentParser(__file__, description="Run specified experiment on abstract simulator")
parser.add_argument("--wl_config", help="workload config", default=None)
parser.add_argument("--cmbmppri_type", help="combined mapping and pri-assignment type", type=int, default=-1)
parser.add_argument("--forced_seed", help="forced seed", type=int, default=-1)
args = parser.parse_args()
pprint.pprint(args)
####################################
## check which experiment to run ##
####################################
# if (args.forced_seed==-1):
# seed=1234
# else:
# seed = args.forced_seed
if (args.wl_config == None) or (args.forced_seed == -1) or (args.cmbmppri_type == -1):
sys.exit("Arguments invalid")
# construct filename
runSim_TileMapping(
forced_seed = args.forced_seed,
cmbmppri_type = args.cmbmppri_type,
wl_config = args.wl_config
)
| gpl-3.0 |
takaakiaoki/PyFoam | PyFoam/Applications/TimelinePlot.py | 3 | 27877 | # ICE Revision: $Id$
"""
Application class that implements pyFoamTimelinePlot.py
"""
import sys
from os import path
from optparse import OptionGroup
from .PyFoamApplication import PyFoamApplication
from PyFoam.RunDictionary.TimelineDirectory import TimelineDirectory
from PyFoam.Basics.SpreadsheetData import WrongDataSize
from PyFoam.ThirdParty.six import print_
from .PlotHelpers import cleanFilename
class TimelinePlot(PyFoamApplication):
def __init__(self,
args=None,
**kwargs):
description="""\
Searches a directory for timelines that were generated by some
functionObject and generates the commands to gnuplot it. As an option
the data can be written to a CSV-file.
"""
PyFoamApplication.__init__(self,
args=args,
description=description,
usage="%prog [options] <casedir>",
nr=1,
changeVersion=False,
interspersed=True,
**kwargs)
def addOptions(self):
data=OptionGroup(self.parser,
"Data",
"Select the data to plot")
self.parser.add_option_group(data)
data.add_option("--fields",
action="append",
default=None,
dest="fields",
help="The fields for which timelines should be plotted. All if unset")
data.add_option("--positions",
action="append",
default=None,
dest="positions",
help="The positions for which timelines should be plotted. Either strings or integers (then the corresponding column number will be used). All if unset")
data.add_option("--write-time",
default=None,
dest="writeTime",
help="If more than one time-subdirectory is stored select which one is used")
data.add_option("--directory-name",
action="store",
default="probes",
dest="dirName",
help="Alternate name for the directory with the samples (Default: %default)")
data.add_option("--reference-directory",
action="store",
default=None,
dest="reference",
help="A reference directory. If fitting timeline data is found there it is plotted alongside the regular data")
data.add_option("--reference-case",
action="store",
default=None,
dest="referenceCase",
help="A reference case where a directory with the same name is looked for. Mutual exclusive with --reference-directory")
time=OptionGroup(self.parser,
"Time",
"Select the times to plot")
self.parser.add_option_group(time)
time.add_option("--time",
action="append",
type="float",
default=None,
dest="time",
help="The times that are plotted (can be used more than once). Has to be specified for bars")
time.add_option("--min-time",
action="store",
type="float",
default=None,
dest="minTime",
help="The smallest time that should be used for lines")
time.add_option("--max-time",
action="store",
type="float",
default=None,
dest="maxTime",
help="The biggest time that should be used for lines")
time.add_option("--reference-time",
action="store_true",
default=False,
dest="referenceTime",
help="Use the time of the reference data for scaling instead of the regular data")
plot=OptionGroup(self.parser,
"Plot",
"How data should be plotted")
self.parser.add_option_group(plot)
plot.add_option("--basic-mode",
type="choice",
dest="basicMode",
default=None,
choices=["bars","lines"],
help="Whether 'bars' of the values at selected times or 'lines' over the whole timelines should be plotted")
vModes=["mag","x","y","z"]
plot.add_option("--vector-mode",
type="choice",
dest="vectorMode",
default="mag",
choices=vModes,
help="How vectors should be plotted. By magnitude or as a component. Possible values are "+str(vModes)+" Default: %default")
plot.add_option("--collect-lines-by",
type="choice",
dest="collectLines",
default="fields",
choices=["fields","positions"],
help="Collect lines for lineplotting either by 'fields' or 'positions'. Default: %default")
output=OptionGroup(self.parser,
"Output",
"Where data should be plotted to")
self.parser.add_option_group(output)
output.add_option("--gnuplot-file",
action="store",
dest="gnuplotFile",
default=None,
help="Write the necessary gnuplot commands to this file. Else they are written to the standard output")
output.add_option("--picture-destination",
action="store",
dest="pictureDest",
default=None,
help="Directory the pictures should be stored to")
output.add_option("--name-prefix",
action="store",
dest="namePrefix",
default=None,
help="Prefix to the picture-name")
output.add_option("--clean-filename",
action="store_true",
dest="cleanFilename",
default=False,
help="Clean filenames so that they can be used in HTML or Latex-documents")
output.add_option("--csv-file",
action="store",
dest="csvFile",
default=None,
help="Write the data to a CSV-file instead of the gnuplot-commands")
output.add_option("--excel-file",
action="store",
dest="excelFile",
default=None,
help="Write the data to a Excel-file instead of the gnuplot-commands")
output.add_option("--pandas-data",
action="store_true",
dest="pandasData",
default=False,
help="Pass the raw data in pandas-format")
output.add_option("--numpy-data",
action="store_true",
dest="numpyData",
default=False,
help="Pass the raw data in numpy-format")
output.add_option("--reference-prefix",
action="store",
dest="refprefix",
default="Reference",
help="Prefix that gets added to the reference lines. Default: %default")
data.add_option("--info",
action="store_true",
dest="info",
default=False,
help="Print info about the sampled data and exit")
output.add_option("--resample",
action="store_true",
dest="resample",
default=False,
help="Resample the reference value to the current x-axis (for CSV and Excel-output)")
output.add_option("--extend-data",
action="store_true",
dest="extendData",
default=False,
help="Extend the data range if it differs (for CSV and Excel-files)")
output.add_option("--silent",
action="store_true",
dest="silent",
default=False,
help="Don't write to screen (with the silent and the compare-options)")
numerics=OptionGroup(self.parser,
"Quantify",
"Metrics of the data and numerical comparisons")
self.parser.add_option_group(numerics)
numerics.add_option("--compare",
action="store_true",
dest="compare",
default=None,
help="Compare all data sets that are also in the reference data")
numerics.add_option("--metrics",
action="store_true",
dest="metrics",
default=None,
help="Print the metrics of the data sets")
numerics.add_option("--use-reference-for-comparison",
action="store_false",
dest="compareOnOriginal",
default=True,
help="Use the reference-data as the basis for the numerical comparison. Otherwise the original data will be used")
def setFile(self,fName):
if self.opts.namePrefix:
fName=self.opts.namePrefix+"_"+fName
if self.opts.pictureDest:
fName=path.join(self.opts.pictureDest,fName)
name=fName
if self.opts.cleanFilename:
name=cleanFilename(fName)
return 'set output "%s"\n' % name
def run(self):
# remove trailing slashif present
if self.opts.dirName[-1]==path.sep:
self.opts.dirName=self.opts.dirName[:-1]
usedDirName=self.opts.dirName.replace("/","_")
timelines=TimelineDirectory(self.parser.getArgs()[0],
dirName=self.opts.dirName,
writeTime=self.opts.writeTime)
reference=None
if self.opts.reference and self.opts.referenceCase:
self.error("Options --reference-directory and --reference-case are mutual exclusive")
if (self.opts.csvFile or self.opts.excelFile or self.opts.pandasData or self.opts.numpyData) and (self.opts.compare or self.opts.metrics):
self.error("Options --csv-file/excel-file/--pandas-data/--numpy-data and --compare/--metrics are mutual exclusive")
if self.opts.reference:
reference=TimelineDirectory(self.parser.getArgs()[0],
dirName=self.opts.reference,
writeTime=self.opts.writeTime)
elif self.opts.referenceCase:
reference=TimelineDirectory(self.opts.referenceCase,
dirName=self.opts.dirName,
writeTime=self.opts.writeTime)
if self.opts.info:
self.setData({'writeTimes' : timelines.writeTimes,
'usedTimes' : timelines.usedTime,
'fields' : timelines.values,
'positions' : timelines.positions(),
'timeRange' : timelines.timeRange()})
if not self.opts.silent:
print_("Write Times : ",timelines.writeTimes)
print_("Used Time : ",timelines.usedTime)
print_("Fields : ",timelines.values,end="")
if len(timelines.vectors)>0:
if not self.opts.silent:
print_(" Vectors: ",timelines.vectors)
self.setData({'vectors':timelines.vectors})
else:
if not self.opts.silent:
print_()
if not self.opts.silent:
print_("Positions : ",timelines.positions())
print_("Time range : ",timelines.timeRange())
if reference:
refData={'writeTimes' : reference.writeTimes,
'fields' : reference.values,
'positions' : reference.positions(),
'timeRange' : reference.timeRange()}
if not self.opts.silent:
print_("\nReference Data")
print_("Write Times : ",reference.writeTimes)
print_("Fields : ",reference.values,end="")
if len(reference.vectors)>0:
if not self.opts.silent:
print_(" Vectors: ",reference.vectors)
refData["vectors"]=reference.vectors
else:
if not self.opts.silent:
print_()
if not self.opts.silent:
print_("Positions : ",reference.positions())
print_("Time range : ",reference.timeRange())
self.setData({"reference":refData})
return 0
if self.opts.fields==None:
self.opts.fields=timelines.values
else:
for v in self.opts.fields:
if v not in timelines.values:
self.error("The requested value",v,"not in possible values",timelines.values)
if self.opts.positions==None:
self.opts.positions=timelines.positions()
else:
pos=self.opts.positions
self.opts.positions=[]
for p in pos:
try:
p=int(p)
if p<0 or p>=len(timelines.positions()):
self.error("Time index",p,"out of range for positons",timelines.positions())
else:
self.opts.positions.append(timelines.positions()[p])
except ValueError:
if p not in timelines.positions():
self.error("Position",p,"not in",timelines.positions())
else:
self.opts.positions.append(p)
if len(self.opts.positions)==0:
self.error("No valid positions")
result="set term png nocrop enhanced \n"
if self.opts.basicMode==None:
self.error("No mode selected. Do so with '--basic-mode'")
elif self.opts.basicMode=='bars':
if self.opts.time==None:
self.error("No times specified for bar-plots")
self.opts.time.sort()
if self.opts.referenceTime and reference!=None:
minTime,maxTime=reference.timeRange()
else:
minTime,maxTime=timelines.timeRange()
usedTimes=[]
hasMin=False
for t in self.opts.time:
if t<minTime:
if not hasMin:
usedTimes.append(minTime)
hasMin=True
elif t>maxTime:
usedTimes.append(maxTime)
break
else:
usedTimes.append(t)
data=timelines.getData(usedTimes,
value=self.opts.fields,
position=self.opts.positions,
vectorMode=self.opts.vectorMode)
# print_(data)
result+="set style data histogram\n"
result+="set style histogram cluster gap 1\n"
result+="set style fill solid border -1\n"
result+="set boxwidth 0.9\n"
result+="set xtics border in scale 1,0.5 nomirror rotate by 90 offset character 0, 0, 0\n"
# set xtic rotate by -45\n"
result+="set xtics ("
for i,p in enumerate(self.opts.positions):
if i>0:
result+=" , "
result+='"%s" %d' % (p,i)
result+=")\n"
for tm in usedTimes:
if abs(float(tm))>1e20:
continue
result+=self.setFile("%s_writeTime_%s_Time_%s.png" % (usedDirName,timelines.usedTime,tm))
result+='set title "Directory: %s WriteTime: %s Time: %s"\n' % (self.opts.dirName.replace("_","\\\\_"),timelines.usedTime,tm)
result+= "plot "
first=True
for val in self.opts.fields:
if first:
first=False
else:
result+=", "
result+='"-" title "%s" ' % val.replace("_","\\\\_")
result+="\n"
for v,t,vals in data:
if t==tm:
for v in vals:
result+="%g\n" % v
result+="e\n"
elif self.opts.basicMode=='lines':
# print_(self.opts.positions)
oPlots=timelines.getDataLocation(value=self.opts.fields,
position=self.opts.positions,
vectorMode=self.opts.vectorMode)
plots=oPlots[:]
rPlots=None
if reference:
rPlots=reference.getDataLocation(value=self.opts.fields,
position=self.opts.positions,
vectorMode=self.opts.vectorMode)
for gp,pos,val,comp,tv in rPlots:
plots.append((gp,
pos,
self.opts.refprefix+" "+val,
comp,
tv))
if self.opts.referenceTime and reference!=None:
minTime,maxTime=reference.timeRange()
else:
minTime,maxTime=timelines.timeRange()
if self.opts.minTime:
minTime=self.opts.minTime
if self.opts.maxTime:
maxTime=self.opts.maxTime
result+= "set xrange [%g:%g]\n" % (minTime,maxTime)
if self.opts.collectLines=="fields":
for val in self.opts.fields:
vname=val
if val in timelines.vectors:
vname+="_"+self.opts.vectorMode
result+=self.setFile("%s_writeTime_%s_Value_%s.png" % (usedDirName,timelines.usedTime,vname))
result+='set title "Directory: %s WriteTime: %s Value: %s"\n' % (self.opts.dirName.replace("_","\\\\_"),timelines.usedTime,vname.replace("_","\\\\\\_"))
result+= "plot "
first=True
for f,v,p,i,tl in plots:
if v==val:
if first:
first=False
else:
result+=" , "
if type(i)==int:
result+= ' "%s" using 1:%d title "%s" with lines ' % (f,i+2,p.replace("_","\\\\_"))
else:
result+= ' "%s" using 1:%s title "%s" with lines ' % (f,i,p.replace("_","\\\\_"))
result+="\n"
elif self.opts.collectLines=="positions":
for pos in self.opts.positions:
result+=self.setFile("%s_writeTime_%s_Position_%s.png" % (usedDirName,timelines.usedTime,pos))
result+='set title "Directory: %s WriteTime: %s Position: %s"\n' % (self.opts.dirName.replace("_","\\\\_"),timelines.usedTime,pos.replace("_","\\\\_"))
result+= "plot "
first=True
for f,v,p,i,tl in plots:
if p==pos:
if first:
first=False
else:
result+=" , "
if type(i)==int:
result+= ' "%s" using 1:%d title "%s" with lines ' % (f,i+2,v.replace("_","\\\\_"))
else:
result+= ' "%s" using 1:%s title "%s" with lines ' % (f,i,v.replace("_","\\\\_"))
result+="\n"
else:
self.error("Unimplemented collection of lines:",self.opts.collectLines)
else:
self.error("Not implemented basicMode",self.opts.basicMode)
if self.opts.csvFile or self.opts.excelFile or self.opts.pandasData or self.opts.numpyData:
if self.opts.basicMode!='lines':
self.error("CSV and Excel-files currently only supported for lines-mode (also Pandas and Numpy-data)")
spread=plots[0][-1]()
usedFiles=set([plots[0][0]])
for line in plots[1:]:
if line[0] not in usedFiles:
usedFiles.add(line[0])
sp=line[-1]()
try:
spread+=sp
except WrongDataSize:
if self.opts.resample:
for n in sp.names()[1:]:
data=spread.resample(sp,
n,
extendData=self.opts.extendData)
try:
spread.append(n,data)
except ValueError:
spread.append(self.opts.refprefix+" "+n,data)
else:
self.warning("Try the --resample-option")
raise
if self.opts.csvFile:
spread.writeCSV(self.opts.csvFile)
if self.opts.excelFile:
spread.getData().to_excel(self.opts.excelFile)
if self.opts.pandasData:
self.setData({"series":spread.getSeries(),
"dataFrame":spread.getData()})
if self.opts.numpyData:
self.setData({"data":spread.data.copy()})
elif self.opts.compare or self.opts.metrics:
statData={}
if self.opts.compare:
statData["compare"]={}
if self.opts.metrics:
statData["metrics"]={}
for p in self.opts.positions:
if self.opts.compare:
statData["compare"][p]={}
if self.opts.metrics:
statData["metrics"][p]={}
if self.opts.basicMode!='lines':
self.error("Compare currently only supported for lines-mode")
if self.opts.compare:
if rPlots==None:
self.error("No reference data specified. Can't compare")
elif len(rPlots)!=len(oPlots):
self.error("Number of original data sets",len(oPlots),
"is not equal to the reference data sets",
len(rPlots))
for i,p in enumerate(oPlots):
pth,val,loc,ind,tl=p
if self.opts.compare:
rpth,rval,rloc,rind,rtl=rPlots[i]
if val!=rval or loc!=rloc or ind!=rind:
self.error("Original data",p,"and reference",rPlots[i],
"do not match")
data=tl()
try:
dataIndex=1+ind
if self.opts.metrics:
if not self.opts.silent:
print_("Metrics for",val,"on",loc,"index",ind,"(Path:",pth,")")
result=data.metrics(data.names()[dataIndex],
minTime=self.opts.minTime,
maxTime=self.opts.maxTime)
statData["metrics"][loc][val]=result
if not self.opts.silent:
print_(" Min :",result["min"])
print_(" Max :",result["max"])
print_(" Average :",result["average"])
print_(" Weighted average :",result["wAverage"])
if not self.opts.compare:
print_("Data size:",data.size())
print_(" Time Range :",result["tMin"],result["tMax"])
if self.opts.compare:
if not self.opts.silent:
print_("Comparing",val,"on",loc,"index",ind,"(path:",pth,")",end="")
ref=rtl()
if self.opts.compareOnOriginal:
if not self.opts.silent:
print_("on original data points")
result=data.compare(ref,
data.names()[dataIndex],
minTime=self.opts.minTime,
maxTime=self.opts.maxTime)
else:
if not self.opts.silent:
print_("on reference data points")
result=ref.compare(data,
data.names()[dataIndex],
minTime=self.opts.minTime,
maxTime=self.opts.maxTime)
statData["compare"][loc][val]=result
if not self.opts.silent:
print_(" Max difference :",result["max"])
print_(" Average difference :",result["average"])
print_(" Weighted average :",result["wAverage"])
print_("Data size:",data.size(),"Reference:",ref.size())
if not self.opts.metrics:
print_(" Time Range :",result["tMin"],result["tMax"])
if not self.opts.silent:
print_()
except TypeError:
if self.opts.vectorMode=="mag":
self.error("Vector-mode 'mag' not supported for --compare and --metrics")
else:
raise
self.setData(statData)
else:
dest=sys.stdout
if self.opts.gnuplotFile:
dest=open(self.opts.gnuplotFile,"w")
dest.write(result)
# Should work with Python3 and Python2
| gpl-2.0 |
lancezlin/pylearn2 | pylearn2/utils/datasets.py | 44 | 9068 | """
Several utilities to evaluate an ALC on the dataset, to iterate over
minibatches from a dataset, or to merge three data with given proportions
"""
# Standard library imports
import logging
import os
import functools
from itertools import repeat
import warnings
# Third-party imports
import numpy
import scipy
from theano.compat.six.moves import reduce, xrange
import theano
try:
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
except ImportError:
warnings.warn("Could not import some dependencies.")
# Local imports
from pylearn2.utils.rng import make_np_rng
logger = logging.getLogger(__name__)
##################################################
# 3D Visualization
##################################################
def do_3d_scatter(x, y, z, figno=None, title=None):
"""
Generate a 3D scatterplot figure and optionally give it a title.
Parameters
----------
x : WRITEME
y : WRITEME
z : WRITEME
figno : WRITEME
title : WRITEME
"""
fig = pyplot.figure(figno)
ax = Axes3D(fig)
ax.scatter(x, y, z)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
pyplot.suptitle(title)
def save_plot(repr, path, name="figure.pdf", title="features"):
"""
.. todo::
WRITEME
"""
# TODO : Maybe run a PCA if shape[1] > 3
assert repr.get_value(borrow=True).shape[1] == 3
# Take the first 3 columns
x, y, z = repr.get_value(borrow=True).T
do_3d_scatter(x, y, z)
# Save the produces figure
filename = os.path.join(path, name)
pyplot.savefig(filename, format="pdf")
logger.info('... figure saved: {0}'.format(filename))
##################################################
# Features or examples filtering
##################################################
def filter_labels(train, label, classes=None):
"""
Filter examples of train for which we have labels
Parameters
----------
train : WRITEME
label : WRITEME
classes : WRITEME
Returns
-------
WRITEME
"""
if isinstance(train, theano.tensor.sharedvar.SharedVariable):
train = train.get_value(borrow=True)
if isinstance(label, theano.tensor.sharedvar.SharedVariable):
label = label.get_value(borrow=True)
if not (isinstance(train, numpy.ndarray) or scipy.sparse.issparse(train)):
raise TypeError('train must be a numpy array, a scipy sparse matrix,'
' or a theano shared array')
# Examples for which any label is set
if classes is not None:
label = label[:, classes]
# Special case for sparse matrices
if scipy.sparse.issparse(train):
idx = label.sum(axis=1).nonzero()[0]
return (train[idx], label[idx])
# Compress train and label arrays according to condition
condition = label.any(axis=1)
return tuple(var.compress(condition, axis=0) for var in (train, label))
def nonzero_features(data, combine=None):
"""
Get features for which there are nonzero entries in the data.
Parameters
----------
data : list of matrices
List of data matrices, either in sparse format or not.
They must have the same number of features (column number).
combine : function, optional
A function to combine elementwise which features to keep.
Default keeps the intersection of each non-zero columns.
Returns
-------
indices : ndarray object
Indices of the nonzero features.
Notes
-----
I would return a mask (bool array) here, but scipy.sparse doesn't appear to
fully support advanced indexing.
"""
if combine is None:
combine = functools.partial(reduce, numpy.logical_and)
# Assumes all values are >0, which is the case for all sparse datasets.
masks = numpy.asarray([subset.sum(axis=0) for subset in data]).squeeze()
nz_feats = combine(masks).nonzero()[0]
return nz_feats
# TODO: Is this a duplicate?
def filter_nonzero(data, combine=None):
"""
Filter non-zero features of data according to a certain combining function
Parameters
----------
data : list of matrices
List of data matrices, either in sparse format or not.
They must have the same number of features (column number).
combine : function
A function to combine elementwise which features to keep.
Default keeps the intersection of each non-zero columns.
Returns
-------
indices : ndarray object
Indices of the nonzero features.
"""
nz_feats = nonzero_features(data, combine)
return [set[:, nz_feats] for set in data]
##################################################
# Iterator object for minibatches of datasets
##################################################
class BatchIterator(object):
"""
Builds an iterator object that can be used to go through the minibatches
of a dataset, with respect to the given proportions in conf
Parameters
----------
dataset : WRITEME
set_proba : WRITEME
batch_size : WRITEME
seed : WRITEME
"""
def __init__(self, dataset, set_proba, batch_size, seed=300):
# Local shortcuts for array operations
flo = numpy.floor
sub = numpy.subtract
mul = numpy.multiply
div = numpy.divide
mod = numpy.mod
# Record external parameters
self.batch_size = batch_size
if (isinstance(dataset[0], theano.Variable)):
self.dataset = [set.get_value(borrow=True) for set in dataset]
else:
self.dataset = dataset
# Compute maximum number of samples for one loop
set_sizes = [set.shape[0] for set in self.dataset]
set_batch = [float(self.batch_size) for i in xrange(3)]
set_range = div(mul(set_proba, set_sizes), set_batch)
set_range = map(int, numpy.ceil(set_range))
# Upper bounds for each minibatch indexes
set_limit = numpy.ceil(numpy.divide(set_sizes, set_batch))
self.limit = map(int, set_limit)
# Number of rows in the resulting union
set_tsign = sub(set_limit, flo(div(set_sizes, set_batch)))
set_tsize = mul(set_tsign, flo(div(set_range, set_limit)))
l_trun = mul(flo(div(set_range, set_limit)), mod(set_sizes, set_batch))
l_full = mul(sub(set_range, set_tsize), set_batch)
self.length = sum(l_full) + sum(l_trun)
# Random number generation using a permutation
index_tab = []
for i in xrange(3):
index_tab.extend(repeat(i, set_range[i]))
# Use a deterministic seed
self.seed = seed
rng = make_np_rng(seed, which_method="permutation")
self.permut = rng.permutation(index_tab)
def __iter__(self):
"""Generator function to iterate through all minibatches"""
counter = [0, 0, 0]
for chosen in self.permut:
# Retrieve minibatch from chosen set
index = counter[chosen]
minibatch = self.dataset[chosen][
index * self.batch_size:(index + 1) * self.batch_size
]
# Increment the related counter
counter[chosen] = (counter[chosen] + 1) % self.limit[chosen]
# Return the computed minibatch
yield minibatch
def __len__(self):
"""Return length of the weighted union"""
return self.length
def by_index(self):
"""Same generator as __iter__, but yield only the chosen indexes"""
counter = [0, 0, 0]
for chosen in self.permut:
index = counter[chosen]
counter[chosen] = (counter[chosen] + 1) % self.limit[chosen]
yield chosen, index
##################################################
# Miscellaneous
##################################################
def minibatch_map(fn, batch_size, input_data, output_data=None,
output_width=None):
"""
Apply a function on input_data, one minibatch at a time.
Storage for the output can be provided. If it is the case,
it should have appropriate size.
If output_data is not provided, then output_width should be specified.
Parameters
----------
fn : WRITEME
batch_size : WRITEME
input_data : WRITEME
output_data : WRITEME
output_width : WRITEME
Returns
-------
WRITEME
"""
if output_width is None:
if output_data is None:
raise ValueError('output_data or output_width should be provided')
output_width = output_data.shape[1]
output_length = input_data.shape[0]
if output_data is None:
output_data = numpy.empty((output_length, output_width))
else:
assert output_data.shape[0] == input_data.shape[0], ('output_data '
'should have the same length as input_data',
output_data.shape[0], input_data.shape[0])
for i in xrange(0, output_length, batch_size):
output_data[i:i+batch_size] = fn(input_data[i:i+batch_size])
return output_data
| bsd-3-clause |
jarvis-fga/Projetos | Problema 2/lucas/src/text_classification.py | 1 | 5048 | # Create by Lucas Andrade
# On 11 / 09 / 2017
import pandas as pd
import codecs
import numpy as np
from sklearn import svm
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
k = 10
file = codecs.open('../dados/comments_min.csv','rU','UTF-8')
my_data = pd.read_csv(file, sep='\t')
comments_ugly = my_data['comentarios']
marks = my_data['status']
comments = comments_ugly.str.lower().str.split(' ')
dictionary = set()
for words in comments:
dictionary.update(words)
tuples = zip(dictionary, xrange(len(dictionary)))
translator = { word:number for word,number in tuples}
def comment_as_vector(comment, translator):
vector = [0] * len(dictionary)
for word in comment:
if word in translator:
position = translator[word]
vector[position] += 1
return vector
def vector_all_comments(comments, translator):
new_comments = [comment_as_vector(comment, translator) for comment in comments]
return new_comments
X = vector_all_comments(comments, translator)
Y = list(marks)
X_training = X[0:2399]
Y_training = Y[0:2399]
X_test = X[2399:2999]
Y_test = Y[2399:2999]
def check_corrects(predicted):
accerts = predicted - Y_test
total_accerts = 0
for accert in accerts:
if accert == 0:
total_accerts+=1
return total_accerts
def calc_percent(predicted, name):
accerts = check_corrects(predicted)
percent = 100.0 * accerts/len(Y_test)
print("{0} {1}\n".format(name, percent))
return percent
# Multinomial NB
print("Multinomial NB is training . . .")
nb_model = MultinomialNB()
nb_model.fit(X_training, Y_training)
nb_result = nb_model.predict(X_test)
calc_percent(nb_result, "Multinomial NB: ")
# Gaussian NB
print("Gaussian NB is training . . .")
gaussian_nb = GaussianNB()
gaussian_nb.fit(X_training, Y_training)
gaussian_nb_result = gaussian_nb.predict(X_test)
calc_percent(gaussian_nb_result, "Gaussian NB: ")
# LogisticRegression
print("Logic Regression is training . . .")
logic_regression = LogisticRegression(random_state=1)
logic_regression.fit(X_training, Y_training)
logic_regression_result = logic_regression.predict(X_test)
calc_percent(logic_regression_result, "Logic Regression: ")
# Gradient Boosting
print("Gradient Boosting is training . . .")
gradient_model = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,
max_depth=1, random_state=0)
gradient_model.fit(X_training, Y_training)
gradient_result = gradient_model.predict(X_test)
calc_percent(gradient_result, "Gradient Boosting: ")
# Random Florest !!!! Sempre um resultado diferente
print("Random Florest is training . . .")
random_f = RandomForestClassifier(random_state=1)
random_f.fit(X_training, Y_training)
random_florest_result = random_f.predict(X_test)
calc_percent(random_florest_result, "Random Florest: ")
# SVC pure
svc_pure = SVC(kernel="linear", C=1.0, random_state=0)
print("SVC Pure is training . . .")
svc_pure.fit(X_training, Y_training)
svcpure_test_result = svc_pure.predict(X_test)
calc_percent(svcpure_test_result, "SVC pure real:")
# Juntando os resultado
def get_results(comment):
comment_a = [comment]
print(comment_a)
comment_b = comment_a.str.lower().str.split(' ')
print(comment_b)
vector_comment = comment_as_vector(comment_b, translator)
nb = nb_model.predict(vector_comment)
logic = logic_regression.predict(vector_comment)
svc = svc_pure.predict(vector_comment)
gradient = gradient_model.predict(vector_comment)
florest = random_f.predict(vector_comment)
results = nb + logic + svc + gradient + florest
return results
# Usando o resultado do melhores cinco modelos
def final_result(results):
i=0
for result in results:
if result < 3:
results[i] = 0
else:
results[i] = 1
i = i + 1
calc_percent(results, "Resultado Final: ")
all_results = nb_result+logic_regression_result+svcpure_test_result+gradient_result+random_florest_result
final_result(all_results)
# def new_comment():
# comment = "not null"
# while(comment != ""):
# comment = input("Type here your comment")
# final_result(comment)
#
# new_comment()
# SVC with cross validation k = 10
# svc = svm.SVC(kernel='linear', C=1.0, random_state=0)
# print("SVC with cross validation is training . . .")
# accuracy_svc = cross_val_score(svc, X, Y, cv=k, scoring='accuracy').mean()
# print("SVC with cross val training: ", accuracy_svc)
# print("\n")
#
# # Gradient Boosting
# from sklearn.ensemble import GradientBoostingClassifier
# gradient_boosting = GradientBoostingClassifier(n_estimators=50, learning_rate=1.0, max_depth=1, random_state=0)
# print("Gradient Boosting is training . . .")
# accuracy_boost = cross_val_score(gradient_boosting, X, Y, cv=k, scoring='accuracy').mean()
# print("Boosting training: ", accuracy_boost) | mit |
hail-is/hail | hail/python/hail/table.py | 1 | 126621 | import collections
import itertools
import pandas
import pyspark
from typing import Optional, Dict, Callable
from hail.expr.expressions import Expression, StructExpression, \
BooleanExpression, expr_struct, expr_any, expr_bool, analyze, Indices, \
construct_reference, to_expr, construct_expr, extract_refs_by_indices, \
ExpressionException, TupleExpression, unify_all, NumericExpression, \
StringExpression, CallExpression, CollectionExpression, DictExpression, \
IntervalExpression, LocusExpression, NDArrayExpression, expr_array
from hail.expr.types import hail_type, tstruct, types_match, tarray, tset
from hail.expr.table_type import ttable
import hail.ir as ir
from hail.typecheck import typecheck, typecheck_method, dictof, anytype, \
anyfunc, nullable, sequenceof, oneof, numeric, lazy, enumeration, \
table_key_type, func_spec
from hail.utils.placement_tree import PlacementTree
from hail.utils.java import Env, info, warning
from hail.utils.misc import wrap_to_tuple, storage_level, plural, \
get_nice_field_error, get_nice_attr_error, get_key_by_exprs, check_keys, \
get_select_exprs, check_annotate_exprs, process_joins
import hail as hl
table_type = lazy()
class TableIndexKeyError(Exception):
def __init__(self, key_type, index_expressions):
super().__init__()
self.key_type = key_type
self.index_expressions = index_expressions
class Ascending:
def __init__(self, col):
self.col = col
def __eq__(self, other):
return isinstance(other, Ascending) and self.col == other.col
def __ne__(self, other):
return not self == other
class Descending:
def __init__(self, col):
self.col = col
def __eq__(self, other):
return isinstance(other, Descending) and self.col == other.col
def __ne__(self, other):
return not self == other
@typecheck(col=oneof(Expression, str))
def asc(col):
"""Sort by `col` ascending."""
return Ascending(col)
@typecheck(col=oneof(Expression, str))
def desc(col):
"""Sort by `col` descending."""
return Descending(col)
class ExprContainer:
# this can only grow as big as the object dir, so no need to worry about memory leak
_warned_about = set()
def __init__(self):
self._fields: Dict[str, Expression] = {}
self._fields_inverse: Dict[Expression, str] = {}
self._dir = set(dir(self))
super(ExprContainer, self).__init__()
def _set_field(self, key, value):
assert key not in self._fields_inverse, key
self._fields[key] = value
self._fields_inverse[value] = key
# key is in __dir for methods
# key is in __dict__ for private class fields
if key in self._dir or key in self.__dict__:
if key not in ExprContainer._warned_about:
ExprContainer._warned_about.add(key)
warning(f"Name collision: field {repr(key)} already in object dict. "
f"\n This field must be referenced with __getitem__ syntax: obj[{repr(key)}]")
else:
self.__dict__[key] = value
def _get_field(self, item) -> Expression:
if item in self._fields:
return self._fields[item]
raise LookupError(get_nice_field_error(self, item))
def __iter__(self):
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
def __delattr__(self, item):
if not item[0] == '_':
raise NotImplementedError(f"'{self.__class__.__name__}' object is not mutable")
def __setattr__(self, key, value):
if not key[0] == '_':
raise NotImplementedError(f"'{self.__class__.__name__}' object is not mutable")
self.__dict__[key] = value
def __getattr__(self, item):
if item in self.__dict__:
return self.__dict__[item]
raise AttributeError(get_nice_attr_error(self, item))
def _copy_fields_from(self, other: 'ExprContainer'):
self._fields = other._fields
self._fields_inverse = other._fields_inverse
class GroupedTable(ExprContainer):
"""Table grouped by row that can be aggregated into a new table.
There are only two operations on a grouped table, :meth:`.GroupedTable.partition_hint`
and :meth:`.GroupedTable.aggregate`.
"""
def __init__(self, parent: 'Table', key_expr):
super(GroupedTable, self).__init__()
self._key_expr = key_expr
self._parent = parent
self._npartitions = None
self._buffer_size = 50
self._copy_fields_from(parent)
def partition_hint(self, n: int) -> 'GroupedTable':
"""Set the target number of partitions for aggregation.
Examples
--------
Use `partition_hint` in a :meth:`.Table.group_by` / :meth:`.GroupedTable.aggregate`
pipeline:
>>> table_result = (table1.group_by(table1.ID)
... .partition_hint(5)
... .aggregate(meanX = hl.agg.mean(table1.X), sumZ = hl.agg.sum(table1.Z)))
Notes
-----
Until Hail's query optimizer is intelligent enough to sample records at all
stages of a pipeline, it can be necessary in some places to provide some
explicit hints.
The default number of partitions for :meth:`.GroupedTable.aggregate` is the
number of partitions in the upstream table. If the aggregation greatly
reduces the size of the table, providing a hint for the target number of
partitions can accelerate downstream operations.
Parameters
----------
n : int
Number of partitions.
Returns
-------
:class:`.GroupedTable`
Same grouped table with a partition hint.
"""
self._npartitions = n
return self
def _set_buffer_size(self, n: int) -> 'GroupedTable':
"""Set the map-side combiner buffer size (in rows).
Parameters
----------
n : int
Buffer size.
Returns
-------
:class:`.GroupedTable`
Same grouped table with a buffer size.
"""
if n <= 0:
raise ValueError(n)
self._buffer_size = n
return self
@typecheck_method(named_exprs=expr_any)
def aggregate(self, **named_exprs) -> 'Table':
"""Aggregate by group, used after :meth:`.Table.group_by`.
Examples
--------
Compute the mean value of `X` and the sum of `Z` per unique `ID`:
>>> table_result = (table1.group_by(table1.ID)
... .aggregate(meanX = hl.agg.mean(table1.X), sumZ = hl.agg.sum(table1.Z)))
Group by a height bin and compute sex ratio per bin:
>>> table_result = (table1.group_by(height_bin = table1.HT // 20)
... .aggregate(fraction_female = hl.agg.fraction(table1.SEX == 'F')))
Notes
-----
The resulting table has a key field for each group and a value field for
each aggregation. The names of the aggregation expressions must be
distinct from the names of the groups.
Parameters
----------
named_exprs : varargs of :class:`.Expression`
Aggregation expressions.
Returns
-------
:class:`.Table`
Aggregated table.
"""
for name, expr in named_exprs.items():
analyze(f'GroupedTable.aggregate: ({repr(name)})', expr, self._parent._global_indices, {self._parent._row_axis})
if not named_exprs.keys().isdisjoint(set(self._key_expr)):
intersection = set(named_exprs.keys()) & set(self._key_expr)
raise ValueError(
f'GroupedTable.aggregate: Group names and aggregration expression names overlap: {intersection}')
base, _ = self._parent._process_joins(self._key_expr, *named_exprs.values())
key_struct = self._key_expr
return Table(ir.TableKeyByAndAggregate(base._tir,
hl.struct(**named_exprs)._ir,
key_struct._ir,
self._npartitions,
self._buffer_size))
class Table(ExprContainer):
"""Hail's distributed implementation of a dataframe or SQL table.
Use :func:`.read_table` to read a table that was written with
:meth:`.Table.write`. Use :meth:`.to_spark` and :meth:`.Table.from_spark`
to inter-operate with PySpark's
`SQL <https://spark.apache.org/docs/latest/sql-programming-guide.html>`__ and
`machine learning <https://spark.apache.org/docs/latest/ml-guide.html>`__
functionality.
Examples
--------
The examples below use ``table1`` and ``table2``, which are imported
from text files using :func:`.import_table`.
>>> table1 = hl.import_table('data/kt_example1.tsv', impute=True, key='ID')
>>> table1.show()
.. code-block:: text
+-------+-------+-----+-------+-------+-------+-------+-------+
| ID | HT | SEX | X | Z | C1 | C2 | C3 |
+-------+-------+-----+-------+-------+-------+-------+-------+
| int32 | int32 | str | int32 | int32 | int32 | int32 | int32 |
+-------+-------+-----+-------+-------+-------+-------+-------+
| 1 | 65 | M | 5 | 4 | 2 | 50 | 5 |
| 2 | 72 | M | 6 | 3 | 2 | 61 | 1 |
| 3 | 70 | F | 7 | 3 | 10 | 81 | -5 |
| 4 | 60 | F | 8 | 2 | 11 | 90 | -10 |
+-------+-------+-----+-------+-------+-------+-------+-------+
>>> table2 = hl.import_table('data/kt_example2.tsv', impute=True, key='ID')
>>> table2.show()
.. code-block:: text
+-------+-------+--------+
| ID | A | B |
+-------+-------+--------+
| int32 | int32 | str |
+-------+-------+--------+
| 1 | 65 | cat |
| 2 | 72 | dog |
| 3 | 70 | mouse |
| 4 | 60 | rabbit |
+-------+-------+--------+
Define new annotations:
>>> height_mean_m = 68
>>> height_sd_m = 3
>>> height_mean_f = 65
>>> height_sd_f = 2.5
>>>
>>> def get_z(height, sex):
... return hl.cond(sex == 'M',
... (height - height_mean_m) / height_sd_m,
... (height - height_mean_f) / height_sd_f)
>>>
>>> table1 = table1.annotate(height_z = get_z(table1.HT, table1.SEX))
>>> table1 = table1.annotate_globals(global_field_1 = [1, 2, 3])
Filter rows of the table:
>>> table2 = table2.filter(table2.B != 'rabbit')
Compute global aggregation statistics:
>>> t1_stats = table1.aggregate(hl.struct(mean_c1 = hl.agg.mean(table1.C1),
... mean_c2 = hl.agg.mean(table1.C2),
... stats_c3 = hl.agg.stats(table1.C3)))
>>> print(t1_stats)
Group by a field and aggregate to produce a new table:
>>> table3 = (table1.group_by(table1.SEX)
... .aggregate(mean_height_data = hl.agg.mean(table1.HT)))
>>> table3.show()
Join tables together inside an annotation expression:
>>> table2 = table2.key_by('ID')
>>> table1 = table1.annotate(B = table2[table1.ID].B)
>>> table1.show()
"""
@staticmethod
def _from_java(jtir):
return Table(ir.JavaTable(jtir))
def __init__(self, tir):
super(Table, self).__init__()
self._tir = tir
self._type = self._tir.typ
self._row_axis = 'row'
self._global_indices = Indices(axes=set(), source=self)
self._row_indices = Indices(axes={self._row_axis}, source=self)
self._global_type = self._type.global_type
self._row_type = self._type.row_type
self._globals = construct_reference('global', self._global_type, indices=self._global_indices)
self._row = construct_reference('row', self._row_type, indices=self._row_indices)
self._indices_from_ref = {'global': self._global_indices,
'row': self._row_indices}
self._key = hl.struct(
**{k: self._row[k] for k in self._type.row_key})
for k, v in itertools.chain(self._globals.items(),
self._row.items()):
self._set_field(k, v)
@property
def _schema(self) -> ttable:
return ttable(self._global_type, self._row_type, list(self._key))
def __getitem__(self, item):
if isinstance(item, str):
return self._get_field(item)
try:
return self.index(*wrap_to_tuple(item))
except TypeError as e:
raise TypeError("Table.__getitem__: invalid index argument(s)\n"
" Usage 1: field selection: ht['field']\n"
" Usage 2: Left distinct join: ht[ht2.key] or ht[ht2.field1, ht2.field2]") from e
@property
def key(self) -> StructExpression:
"""Row key struct.
Examples
--------
List of key field names:
>>> list(table1.key)
['ID']
Number of key fields:
>>> len(table1.key)
1
Returns
-------
:class:`.StructExpression`
"""
return self._key
@property
def _value(self) -> 'StructExpression':
return self.row.drop(*self.key)
def n_partitions(self):
"""Returns the number of partitions in the table.
Returns
-------
:obj:`int`
"""
return Env.backend().execute(ir.TableToValueApply(self._tir, {'name': 'NPartitionsTable'}))
def count(self):
"""Count the number of rows in the table.
Examples
--------
>>> table1.count()
4
Returns
-------
:obj:`int`
"""
return Env.backend().execute(ir.TableCount(self._tir))
def _force_count(self):
return Env.backend().execute(ir.TableToValueApply(self._tir, {'name': 'ForceCountTable'}))
@typecheck_method(caller=str,
row=expr_struct())
def _select(self, caller, row) -> 'Table':
analyze(caller, row, self._row_indices)
base, cleanup = self._process_joins(row)
return cleanup(Table(ir.TableMapRows(base._tir, row._ir)))
@typecheck_method(caller=str, s=expr_struct())
def _select_globals(self, caller, s) -> 'Table':
base, cleanup = self._process_joins(s)
analyze(caller, s, self._global_indices)
return cleanup(Table(ir.TableMapGlobals(base._tir, s._ir)))
@classmethod
@typecheck_method(rows=anytype,
schema=nullable(hail_type),
key=table_key_type,
n_partitions=nullable(int))
def parallelize(cls, rows, schema=None, key=None, n_partitions=None) -> 'Table':
"""Parallelize a local array of structs into a distributed table.
Examples
--------
Parallelize a list of dictionaries:
>>> a = [ {'a': 5, 'b': 10}, {'a': 0, 'b': 200} ]
>>> table = hl.Table.parallelize(hl.literal(a, 'array<struct{a: int, b: int}>'))
>>> table.show()
Warning
-------
Parallelizing very large local arrays will be slow.
Parameters
----------
rows
List of row values, or expression of type ``array<struct{...}>``.
schema : str or :class:`.HailType:`, optional
Value type.
key : Union[str, List[str]]], optional
Key field(s).
n_partitions : int, optional
Returns
-------
:class:`.Table`
"""
rows = to_expr(rows, dtype=hl.tarray(schema) if schema is not None else None)
if not isinstance(rows.dtype.element_type, tstruct):
raise TypeError("'parallelize' expects an array with element type 'struct', found '{}'"
.format(rows.dtype))
table = Table(ir.TableParallelize(ir.MakeStruct([
('rows', rows._ir),
('global', ir.MakeStruct([]))]), n_partitions))
if key is not None:
table = table.key_by(*key)
return table
@typecheck_method(keys=oneof(str, expr_any),
named_keys=expr_any)
def key_by(self, *keys, **named_keys) -> 'Table':
"""Key table by a new set of fields.
Examples
--------
Assume `table1` is a :class:`.Table` with three fields: `C1`, `C2`
and `C3`.
Changing key fields:
>>> table_result = table1.key_by('C2', 'C3')
This keys the table by 'C2' and 'C3', preserving old keys as value fields.
>>> table_result = table1.key_by(table1.C1)
This keys the table by 'C1', preserving old keys as value fields.
>>> table_result = table1.key_by(C1 = table1.C2, foo = table1.C1)
This keys the table by fields named 'C1' and 'foo', which have values
corresponding to the original 'C2' and 'C1' fields respectively. The original
'C1' field has been overwritten by the new assignment, but the original
'C2' field is preserved as a value field.
Remove key:
>>> table_result = table1.key_by()
Notes
-----
This method is used to specify all the fields of a new row key. The old
key fields may be overwritten by newly-assigned fields, as described in
:meth:`.Table.annotate`. If not overwritten, they are preserved as non-key
fields.
See :meth:`.Table.select` for more information about how to define new
key fields.
Parameters
----------
keys : varargs of type :obj:`str`
Field(s) to key by.
Returns
-------
:class:`.Table`
Table with a new key.
"""
key_fields, computed_keys = get_key_by_exprs("Table.key_by", keys, named_keys, self._row_indices)
if not computed_keys:
return Table(ir.TableKeyBy(self._tir, key_fields))
new_row = self.row.annotate(**computed_keys)
base, cleanup = self._process_joins(new_row)
return cleanup(Table(
ir.TableKeyBy(
ir.TableMapRows(
ir.TableKeyBy(base._tir, []),
new_row._ir),
list(key_fields))))
@typecheck_method(keys=oneof(str, expr_any),
named_keys=expr_any)
def _key_by_assert_sorted(self, *keys, **named_keys) -> 'Table':
key_fields, computed_keys = get_key_by_exprs("Table.key_by", keys, named_keys, self._row_indices)
if not computed_keys:
return Table(ir.TableKeyBy(self._tir, key_fields, is_sorted=True))
else:
new_row = self.row.annotate(**computed_keys)
base, cleanup = self._process_joins(new_row)
return cleanup(Table(
ir.TableKeyBy(
ir.TableMapRows(
ir.TableKeyBy(base._tir, []),
new_row._ir),
list(key_fields),
is_sorted=True)))
@typecheck_method(named_exprs=expr_any)
def annotate_globals(self, **named_exprs) -> 'Table':
"""Add new global fields.
Examples
--------
Add a new global field:
>>> table_result = table1.annotate_globals(pops = ['EUR', 'AFR', 'EAS', 'SAS'])
Note
----
This method does not support aggregation.
Parameters
----------
named_exprs : varargs of :class:`.Expression`
Annotation expressions.
Returns
-------
:class:`.Table`
Table with new global field(s).
"""
caller = 'Table.annotate_globals'
check_annotate_exprs(caller, named_exprs, self._global_indices)
return self._select_globals('Table.annotate_globals', self.globals.annotate(**named_exprs))
def select_globals(self, *exprs, **named_exprs) -> 'Table':
"""Select existing global fields or create new fields by name, dropping the rest.
Examples
--------
Select one existing field and compute a new one:
>>> table_result = table1.select_globals(table1.global_field_1,
... another_global=['AFR', 'EUR', 'EAS', 'AMR', 'SAS'])
Notes
-----
This method creates new global fields. If a created field shares its name
with a row-indexed field of the table, the method will fail.
Note
----
See :meth:`.Table.select` for more information about using ``select`` methods.
Note
----
This method does not support aggregation.
Parameters
----------
exprs : variable-length args of :obj:`str` or :class:`.Expression`
Arguments that specify field names or nested field reference expressions.
named_exprs : keyword args of :class:`.Expression`
Field names and the expressions to compute them.
Returns
-------
:class:`.Table`
Table with specified global fields.
"""
caller = 'Table.select_globals'
new_globals = get_select_exprs(caller,
exprs,
named_exprs,
self._global_indices,
self._globals)
return self._select_globals(caller, new_globals)
@typecheck_method(named_exprs=expr_any)
def transmute_globals(self, **named_exprs) -> 'Table':
"""Similar to :meth:`.Table.annotate_globals`, but drops referenced fields.
Notes
-----
This method adds new global fields according to `named_exprs`, and
drops all global fields referenced in those expressions. See
:meth:`.Table.transmute` for full documentation on how transmute
methods work.
See Also
--------
:meth:`.Table.transmute`, :meth:`.Table.select_globals`,
:meth:`.Table.annotate_globals`
Parameters
----------
named_exprs : keyword args of :class:`.Expression`
Annotation expressions.
Returns
-------
:class:`.Table`
"""
caller = 'Table.transmute_globals'
check_annotate_exprs(caller, named_exprs, self._global_indices)
fields_referenced = extract_refs_by_indices(named_exprs.values(), self._global_indices) - set(named_exprs.keys())
return self._select_globals(caller,
self.globals.annotate(**named_exprs).drop(*fields_referenced))
@typecheck_method(named_exprs=expr_any)
def transmute(self, **named_exprs) -> 'Table':
"""Add new fields and drop fields referenced.
Examples
--------
Create a single field from an expression of `C1`, `C2`, and `C3`.
>>> table4.show()
+-------+------+---------+-------+-------+-------+-------+-------+
| A | B.B0 | B.B1 | C | D.cat | D.dog | E.A | E.B |
+-------+------+---------+-------+-------+-------+-------+-------+
| int32 | bool | str | bool | int32 | int32 | int32 | int32 |
+-------+------+---------+-------+-------+-------+-------+-------+
| 32 | true | "hello" | false | 5 | 7 | 5 | 7 |
+-------+------+---------+-------+-------+-------+-------+-------+
>>> table_result = table4.transmute(F=table4.A + 2 * table4.E.B)
>>> table_result.show()
+------+---------+-------+-------+-------+-------+
| B.B0 | B.B1 | C | D.cat | D.dog | F |
+------+---------+-------+-------+-------+-------+
| bool | str | bool | int32 | int32 | int32 |
+------+---------+-------+-------+-------+-------+
| true | "hello" | false | 5 | 7 | 46 |
+------+---------+-------+-------+-------+-------+
Notes
-----
This method functions to create new row-indexed fields and consume
fields found in the expressions in `named_exprs`.
All row-indexed top-level fields found in an expression are dropped
after the new fields are created.
Note
----
:meth:`transmute` will not drop key fields.
Warning
-------
References to fields inside a top-level struct will remove the entire
struct, as field `E` was removed in the example above since `E.B` was
referenced.
Note
----
This method does not support aggregation.
Parameters
----------
named_exprs : keyword args of :class:`.Expression`
New field expressions.
Returns
-------
:class:`.Table`
Table with transmuted fields.
"""
caller = "Table.transmute"
check_annotate_exprs(caller, named_exprs, self._row_indices)
fields_referenced = extract_refs_by_indices(named_exprs.values(), self._row_indices) - set(named_exprs.keys())
fields_referenced -= set(self.key)
return self._select(caller, self.row.annotate(**named_exprs).drop(*fields_referenced))
@typecheck_method(named_exprs=expr_any)
def annotate(self, **named_exprs) -> 'Table':
"""Add new fields.
Examples
--------
Add field `Y` by computing the square of `X`:
>>> table_result = table1.annotate(Y = table1.X ** 2)
Add multiple fields simultaneously:
>>> table_result = table1.annotate(A = table1.X / 2,
... B = table1.X + 21)
Parameters
----------
named_exprs : keyword args of :class:`.Expression`
Expressions for new fields.
Returns
-------
:class:`.Table`
Table with new fields.
"""
caller = "Table.annotate"
check_annotate_exprs(caller, named_exprs, self._row_indices)
return self._select(caller, self.row.annotate(**named_exprs))
@typecheck_method(expr=expr_bool,
keep=bool)
def filter(self, expr, keep=True) -> 'Table':
"""Filter rows.
Examples
--------
Keep rows where ``C1`` equals 5:
>>> table_result = table1.filter(table1.C1 == 5)
Remove rows where ``C1`` equals 10:
>>> table_result = table1.filter(table1.C1 == 10, keep=False)
Notes
-----
The expression `expr` will be evaluated for every row of the table. If
`keep` is ``True``, then rows where `expr` evaluates to ``True`` will be
kept (the filter removes the rows where the predicate evaluates to
``False``). If `keep` is ``False``, then rows where `expr` evaluates to
``True`` will be removed (the filter keeps the rows where the predicate
evaluates to ``False``).
Warning
-------
When `expr` evaluates to missing, the row will be removed regardless of `keep`.
Note
----
This method does not support aggregation.
Parameters
----------
expr : bool or :class:`.BooleanExpression`
Filter expression.
keep : bool
Keep rows where `expr` is true.
Returns
-------
:class:`.Table`
Filtered table.
"""
analyze('Table.filter', expr, self._row_indices)
base, cleanup = self._process_joins(expr)
return cleanup(Table(ir.TableFilter(base._tir, ir.filter_predicate_with_keep(expr._ir, keep))))
@typecheck_method(exprs=oneof(Expression, str),
named_exprs=anytype)
def select(self, *exprs, **named_exprs) -> 'Table':
"""Select existing fields or create new fields by name, dropping the rest.
Examples
--------
Select a few old fields and compute a new one:
>>> table_result = table1.select(table1.C1, Y=table1.Z - table1.X)
Notes
-----
This method creates new row-indexed fields. If a created field shares its name
with a global field of the table, the method will fail.
Note
----
**Using select**
Select and its sibling methods (:meth:`.Table.select_globals`,
:meth:`.MatrixTable.select_globals`, :meth:`.MatrixTable.select_rows`,
:meth:`.MatrixTable.select_cols`, and :meth:`.MatrixTable.select_entries`) accept
both variable-length (``f(x, y, z)``) and keyword (``f(a=x, b=y, c=z)``)
arguments.
Select methods will always preserve the key along that axis; e.g. for
:meth:`.Table.select`, the table key will aways be kept. To modify the
key, use :meth:`.key_by`.
Variable-length arguments can be either strings or expressions that reference a
(possibly nested) field of the table. Keyword arguments can be arbitrary
expressions.
**The following three usages are all equivalent**, producing a new table with
fields `C1` and `C2` of `table1`, and the table key `ID`.
First, variable-length string arguments:
>>> table_result = table1.select('C1', 'C2')
Second, field reference variable-length arguments:
>>> table_result = table1.select(table1.C1, table1.C2)
Last, expression keyword arguments:
>>> table_result = table1.select(C1 = table1.C1, C2 = table1.C2)
Additionally, the variable-length argument syntax also permits nested field
references. Given the following struct field `s`:
>>> table3 = table1.annotate(s = hl.struct(x=table1.X, z=table1.Z))
The following two usages are equivalent, producing a table with one field, `x`.:
>>> table3_result = table3.select(table3.s.x)
>>> table3_result = table3.select(x = table3.s.x)
The keyword argument syntax permits arbitrary expressions:
>>> table_result = table1.select(foo=table1.X ** 2 + 1)
These syntaxes can be mixed together, with the stipulation that all keyword arguments
must come at the end due to Python language restrictions.
>>> table_result = table1.select(table1.X, 'Z', bar = [table1.C1, table1.C2])
Note
----
This method does not support aggregation.
Parameters
----------
exprs : variable-length args of :obj:`str` or :class:`.Expression`
Arguments that specify field names or nested field reference expressions.
named_exprs : keyword args of :class:`.Expression`
Field names and the expressions to compute them.
Returns
-------
:class:`.Table`
Table with specified fields.
"""
row = get_select_exprs('Table.select',
exprs,
named_exprs,
self._row_indices,
self._row)
return self._select('Table.select', row)
@typecheck_method(exprs=oneof(str, Expression))
def drop(self, *exprs) -> 'Table':
"""Drop fields from the table.
Examples
--------
Drop fields `C1` and `C2` using strings:
>>> table_result = table1.drop('C1', 'C2')
Drop fields `C1` and `C2` using field references:
>>> table_result = table1.drop(table1.C1, table1.C2)
Drop a list of fields:
>>> fields_to_drop = ['C1', 'C2']
>>> table_result = table1.drop(*fields_to_drop)
Notes
-----
This method can be used to drop global or row-indexed fields. The arguments
can be either strings (``'field'``), or top-level field references
(``table.field`` or ``table['field']``).
Parameters
----------
exprs : varargs of :obj:`str` or :class:`.Expression`
Names of fields to drop or field reference expressions.
Returns
-------
:class:`.Table`
Table without specified fields.
"""
all_field_exprs = {e: k for k, e in self._fields.items()}
fields_to_drop = set()
for e in exprs:
if isinstance(e, Expression):
if e in all_field_exprs:
fields_to_drop.add(all_field_exprs[e])
else:
raise ExpressionException("method 'drop' expects string field names or top-level field expressions"
" (e.g. table['foo'])")
else:
assert isinstance(e, str)
if e not in self._fields:
raise IndexError("table has no field '{}'".format(e))
fields_to_drop.add(e)
table = self
if any(self._fields[field]._indices == self._global_indices for field in fields_to_drop):
# need to drop globals
table = table._select_globals('drop',
self._globals.drop(*[f for f in table.globals if f in fields_to_drop]))
if any(self._fields[field]._indices == self._row_indices for field in fields_to_drop):
# need to drop row fields
protected_key = set(self._row_indices.protected_key)
for f in fields_to_drop:
check_keys('drop', f, protected_key)
row_fields = set(table.row)
to_drop = [f for f in fields_to_drop if f in row_fields]
table = table._select('drop', table.row.drop(*to_drop))
return table
@typecheck_method(output=str,
types_file=nullable(str),
header=bool,
parallel=nullable(ir.ExportType.checker),
delimiter=str)
def export(self, output, types_file=None, header=True, parallel=None, delimiter='\t'):
"""Export to a TSV file.
Examples
--------
Export to a tab-separated file:
>>> table1.export('output/table1.tsv.bgz')
Note
----
It is highly recommended to export large files with a ``.bgz`` extension,
which will use a block gzipped compression codec. These files can be
read natively with any Hail method, as well as with Python's ``gzip.open``
and R's ``read.table``.
Warning
-------
Do not export to a path that is being read from in the same pipeline.
Parameters
----------
output : :obj:`str`
URI at which to write exported file.
types_file : :obj:`str`, optional
URI at which to write file containing field type information.
header : :obj:`bool`
Include a header in the file.
parallel : :obj:`str`, optional
If None, a single file is produced, otherwise a
folder of file shards is produced. If 'separate_header',
the header file is output separately from the file shards. If
'header_per_shard', each file shard has a header. If set to None
the export will be slower.
delimiter : :obj:`str`
Field delimiter.
"""
parallel = ir.ExportType.default(parallel)
Env.backend().execute(
ir.TableWrite(self._tir, ir.TableTextWriter(output, types_file, header, parallel, delimiter)))
def group_by(self, *exprs, **named_exprs) -> 'GroupedTable':
"""Group by a new key for use with :meth:`.GroupedTable.aggregate`.
Examples
--------
Compute the mean value of `X` and the sum of `Z` per unique `ID`:
>>> table_result = (table1.group_by(table1.ID)
... .aggregate(meanX = hl.agg.mean(table1.X), sumZ = hl.agg.sum(table1.Z)))
Group by a height bin and compute sex ratio per bin:
>>> table_result = (table1.group_by(height_bin = table1.HT // 20)
... .aggregate(fraction_female = hl.agg.fraction(table1.SEX == 'F')))
Notes
-----
This function is always followed by :meth:`.GroupedTable.aggregate`. Follow the
link for documentation on the aggregation step.
Note
----
**Using group_by**
**group_by** and its sibling methods (:meth:`.MatrixTable.group_rows_by` and
:meth:`.MatrixTable.group_cols_by`) accept both variable-length (``f(x, y, z)``)
and keyword (``f(a=x, b=y, c=z)``) arguments.
Variable-length arguments can be either strings or expressions that reference a
(possibly nested) field of the table. Keyword arguments can be arbitrary
expressions.
**The following three usages are all equivalent**, producing a
:class:`.GroupedTable` grouped by fields `C1` and `C2` of `table1`.
First, variable-length string arguments:
>>> table_result = (table1.group_by('C1', 'C2')
... .aggregate(meanX = hl.agg.mean(table1.X)))
Second, field reference variable-length arguments:
>>> table_result = (table1.group_by(table1.C1, table1.C2)
... .aggregate(meanX = hl.agg.mean(table1.X)))
Last, expression keyword arguments:
>>> table_result = (table1.group_by(C1 = table1.C1, C2 = table1.C2)
... .aggregate(meanX = hl.agg.mean(table1.X)))
Additionally, the variable-length argument syntax also permits nested field
references. Given the following struct field `s`:
>>> table3 = table1.annotate(s = hl.struct(x=table1.X, z=table1.Z))
The following two usages are equivalent, grouping by one field, `x`:
>>> table_result = (table3.group_by(table3.s.x)
... .aggregate(meanX = hl.agg.mean(table3.X)))
>>> table_result = (table3.group_by(x = table3.s.x)
... .aggregate(meanX = hl.agg.mean(table3.X)))
The keyword argument syntax permits arbitrary expressions:
>>> table_result = (table1.group_by(foo=table1.X ** 2 + 1)
... .aggregate(meanZ = hl.agg.mean(table1.Z)))
These syntaxes can be mixed together, with the stipulation that all keyword arguments
must come at the end due to Python language restrictions.
>>> table_result = (table1.group_by(table1.C1, 'C2', height_bin = table1.HT // 20)
... .aggregate(meanX = hl.agg.mean(table1.X)))
Note
----
This method does not support aggregation in key expressions.
Arguments
---------
exprs : varargs of type str or :class:`.Expression`
Field names or field reference expressions.
named_exprs : keyword args of type :class:`.Expression`
Field names and expressions to compute them.
Returns
-------
:class:`.GroupedTable`
Grouped table; use :meth:`.GroupedTable.aggregate` to complete the aggregation.
"""
key, computed_key = get_key_by_exprs('Table.group_by',
exprs,
named_exprs,
self._row_indices,
override_protected_indices={self._global_indices})
return GroupedTable(self, self.row.annotate(**computed_key).select(*key))
@typecheck_method(expr=expr_any, _localize=bool)
def aggregate(self, expr, _localize=True):
"""Aggregate over rows into a local value.
Examples
--------
Aggregate over rows:
>>> table1.aggregate(hl.struct(fraction_male=hl.agg.fraction(table1.SEX == 'M'),
... mean_x=hl.agg.mean(table1.X)))
Struct(fraction_male=0.5, mean_x=6.5)
Note
----
This method supports (and expects!) aggregation over rows.
Parameters
----------
expr : :class:`.Expression`
Aggregation expression.
Returns
-------
any
Aggregated value dependent on `expr`.
"""
expr = to_expr(expr)
base, _ = self._process_joins(expr)
analyze('Table.aggregate', expr, self._global_indices, {self._row_axis})
agg_ir = ir.TableAggregate(base._tir, expr._ir)
if _localize:
return Env.backend().execute(agg_ir)
return construct_expr(ir.LiftMeOut(agg_ir), expr.dtype)
@typecheck_method(output=str,
overwrite=bool,
stage_locally=bool,
_codec_spec=nullable(str),
_read_if_exists=bool,
_intervals=nullable(sequenceof(anytype)),
_filter_intervals=bool)
def checkpoint(self, output: str, overwrite: bool = False, stage_locally: bool = False,
_codec_spec: Optional[str] = None, _read_if_exists: bool = False,
_intervals=None, _filter_intervals=False) -> 'Table':
"""Checkpoint the table to disk by writing and reading.
Parameters
----------
output : str
Path at which to write.
stage_locally: bool
If ``True``, major output will be written to temporary local storage
before being copied to ``output``
overwrite : bool
If ``True``, overwrite an existing file at the destination.
Returns
-------
:class:`Table`
.. include:: _templates/write_warning.rst
Notes
-----
An alias for :meth:`write` followed by :func:`.read_table`. It is
possible to read the file at this path later with :func:`.read_table`.
Examples
--------
>>> table1 = table1.checkpoint('output/table_checkpoint.ht')
"""
if not _read_if_exists or not hl.hadoop_exists(f'{output}/_SUCCESS'):
self.write(output=output, overwrite=overwrite, stage_locally=stage_locally, _codec_spec=_codec_spec)
return hl.read_table(output, _intervals=_intervals, _filter_intervals=_filter_intervals)
@typecheck_method(output=str,
overwrite=bool,
stage_locally=bool,
_codec_spec=nullable(str))
def write(self, output: str, overwrite=False, stage_locally: bool = False,
_codec_spec: Optional[str] = None):
"""Write to disk.
Examples
--------
>>> table1.write('output/table1.ht')
.. include:: _templates/write_warning.rst
Parameters
----------
output : str
Path at which to write.
stage_locally: bool
If ``True``, major output will be written to temporary local storage
before being copied to ``output``.
overwrite : bool
If ``True``, overwrite an existing file at the destination.
"""
Env.backend().execute(ir.TableWrite(self._tir, ir.TableNativeWriter(output, overwrite, stage_locally, _codec_spec)))
def _show(self, n, width, truncate, types):
return Table._Show(self, n, width, truncate, types)
class _Show:
def __init__(self, table, n, width, truncate, types):
if n is None or width is None:
import shutil
(columns, lines) = shutil.get_terminal_size((80, 10))
width = width or columns
n = n or min(max(10, (lines - 20)), 100)
self.table = table
self.n = n
self.width = max(width, 8)
if truncate:
self.truncate = min(max(truncate, 4), width - 4)
else:
self.truncate = width - 4
self.types = types
self._data = None
def __str__(self):
return self._ascii_str()
def __repr__(self):
return self.__str__()
def data(self):
if self._data is None:
t = self.table.flatten()
row_dtype = t.row.dtype
t = t.select(**{k: hl._showstr(v) for (k, v) in t.row.items()})
rows, has_more = t._take_n(self.n)
self._data = (rows, has_more, row_dtype)
return self._data
def _repr_html_(self):
return self._html_str()
def _ascii_str(self):
truncate = self.truncate
types = self.types
def trunc(s):
if len(s) > truncate:
return s[:truncate - 3] + "..."
return s
rows, has_more, dtype = self.data()
fields = list(dtype)
trunc_fields = [trunc(f) for f in fields]
n_fields = len(fields)
type_strs = [trunc(str(dtype[f])) for f in fields] if types else [''] * len(fields)
right_align = [hl.expr.types.is_numeric(dtype[f]) for f in fields]
rows = [[trunc(row[f]) for f in fields] for row in rows]
def max_value_width(i):
return max(itertools.chain([0], (len(row[i]) for row in rows)))
column_width = [max(len(trunc_fields[i]), len(type_strs[i]), max_value_width(i)) for i in range(n_fields)]
column_blocks = []
start = 0
i = 1
w = column_width[0] + 4 if column_width else 0
while i < n_fields:
w = w + column_width[i] + 3
if w > self.width:
column_blocks.append((start, i))
start = i
w = column_width[i] + 4
i = i + 1
column_blocks.append((start, i))
def format_hline(widths):
if not widths:
return "++\n"
return '+-' + '-+-'.join(['-' * w for w in widths]) + '-+\n'
def pad(v, w, ra):
e = w - len(v)
if ra:
return ' ' * e + v
else:
return v + ' ' * e
def format_line(values, widths, right_align):
if not values:
return "||\n"
values = map(pad, values, widths, right_align)
return '| ' + ' | '.join(values) + ' |\n'
s = ''
first = True
for (start, end) in column_blocks:
if first:
first = False
else:
s += '\n'
block_column_width = column_width[start:end]
block_right_align = right_align[start:end]
hline = format_hline(block_column_width)
s += hline
s += format_line(trunc_fields[start:end], block_column_width, block_right_align)
s += hline
if types:
s += format_line(type_strs[start:end], block_column_width, block_right_align)
s += hline
for row in rows:
row = row[start:end]
s += format_line(row, block_column_width, block_right_align)
s += hline
if has_more:
n_rows = len(rows)
s += f"showing top { n_rows } { 'row' if n_rows == 1 else 'rows' }\n"
return s
def _html_str(self):
import html
types = self.types
rows, has_more, dtype = self.data()
fields = list(dtype)
default_td_style = ('white-space: nowrap; '
'max-width: 500px; '
'overflow: hidden; '
'text-overflow: ellipsis; ')
def format_line(values, extra_style=''):
style = default_td_style + extra_style
return (f'<tr><td style="{style}">' + f'</td><td style="{style}">'.join(values) + '</td></tr>\n')
arranged_field_names = PlacementTree.from_named_type('row', self.table.row.dtype)
s = '<table>'
s += '<thead>'
for header_row in arranged_field_names.to_grid():
s += '<tr>'
div_style = 'text-align: left;'
non_empty_div_style = 'border-bottom: solid 2px #000; padding-bottom: 5px'
for header_cell in header_row:
text, width = header_cell
s += f'<td style="{default_td_style}" colspan="{width}">'
if text is not None:
s += f'<div style="{div_style}{non_empty_div_style}">'
s += text
s += '</div>'
else:
s += f'<div style="{div_style}"></div>'
s += '</td>'
s += '</tr>'
if types:
s += format_line([html.escape(str(dtype[f])) for f in fields],
extra_style="text-align: left;")
s += '</thead><tbody>'
for row in rows:
s += format_line([html.escape(row[f]) for f in row])
s += '</tbody></table>'
if has_more:
n_rows = len(rows)
s += '<p style="background: #fdd; padding: 0.4em;">'
s += f"showing top { n_rows } { plural('row', n_rows) }"
s += '</p>\n'
return s
def _take_n(self, n):
if n < 0:
rows = self.collect()
has_more = False
else:
rows = self.take(n + 1)
has_more = len(rows) > n
rows = rows[:n]
return rows, has_more
@staticmethod
def _hl_format(v, truncate):
return hl._showstr(v, truncate)
@typecheck_method(n=nullable(int), width=nullable(int), truncate=nullable(int), types=bool, handler=nullable(anyfunc), n_rows=nullable(int))
def show(self, n=None, width=None, truncate=None, types=True, handler=None, n_rows=None):
"""Print the first few rows of the table to the console.
Examples
--------
Show the first lines of the table:
>>> table1.show()
+-------+-------+-----+-------+-------+-------+-------+-------+
| ID | HT | SEX | X | Z | C1 | C2 | C3 |
+-------+-------+-----+-------+-------+-------+-------+-------+
| int32 | int32 | str | int32 | int32 | int32 | int32 | int32 |
+-------+-------+-----+-------+-------+-------+-------+-------+
| 1 | 65 | "M" | 5 | 4 | 2 | 50 | 5 |
| 2 | 72 | "M" | 6 | 3 | 2 | 61 | 1 |
| 3 | 70 | "F" | 7 | 3 | 10 | 81 | -5 |
| 4 | 60 | "F" | 8 | 2 | 11 | 90 | -10 |
+-------+-------+-----+-------+-------+-------+-------+-------+
Parameters
----------
n or n_rows : :obj:`int`
Maximum number of rows to show, or negative to show all rows.
width : :obj:`int`
Horizontal width at which to break fields.
truncate : :obj:`int`, optional
Truncate each field to the given number of characters. If
``None``, truncate fields to the given `width`.
types : :obj:`bool`
Print an extra header line with the type of each field.
handler : Callable[[str], Any]
Handler function for data string.
"""
if n_rows is not None and n is not None:
raise ValueError(f'specify one of n_rows or n, received {n_rows} and {n}')
if n_rows is not None:
n = n_rows
del n_rows
if handler is None:
handler = hl.utils.default_handler()
handler(self._show(n, width, truncate, types))
def index(self, *exprs, all_matches=False) -> 'Expression':
"""Expose the row values as if looked up in a dictionary, indexing
with `exprs`.
Examples
--------
In the example below, both `table1` and `table2` are keyed by one
field `ID` of type ``int``.
>>> table_result = table1.select(B = table2.index(table1.ID).B)
>>> table_result.B.show()
+-------+----------+
| ID | B |
+-------+----------+
| int32 | str |
+-------+----------+
| 1 | "cat" |
| 2 | "dog" |
| 3 | "mouse" |
| 4 | "rabbit" |
+-------+----------+
Using `key` as the sole index expression is equivalent to passing all
key fields individually:
>>> table_result = table1.select(B = table2.index(table1.key).B)
It is also possible to use non-key fields or expressions as the index
expressions:
>>> table_result = table1.select(B = table2.index(table1.C1 % 4).B)
>>> table_result.show()
+-------+---------+
| ID | B |
+-------+---------+
| int32 | str |
+-------+---------+
| 1 | "dog" |
| 2 | "dog" |
| 3 | "dog" |
| 4 | "mouse" |
+-------+---------+
Notes
-----
:meth:`.Table.index` is used to expose one table's fields for use in
expressions involving the another table or matrix table's fields. The
result of the method call is a struct expression that is usable in the
same scope as `exprs`, just as if `exprs` were used to look up values of
the table in a dictionary.
The type of the struct expression is the same as the indexed table's
:meth:`.row_value` (the key fields are removed, as they are available
in the form of the index expressions).
Note
----
There is a shorthand syntax for :meth:`.Table.index` using square
brackets (the Python ``__getitem__`` syntax). This syntax is preferred.
>>> table_result = table1.select(B = table2[table1.ID].B)
Parameters
----------
exprs : variable-length args of :class:`.Expression`
Index expressions.
all_matches : bool
Experimental. If ``True``, value of expression is array of all matches.
Returns
-------
:class:`.Expression`
"""
try:
return self._index(*exprs, all_matches=all_matches)
except TableIndexKeyError as err:
raise ExpressionException(f"Key type mismatch: cannot index table with given expressions:\n"
f" Table key: {', '.join(str(t) for t in err.key_type.values()) or '<<<empty key>>>'}\n"
f" Index Expressions: {', '.join(str(e.dtype) for e in err.index_expressions)}")
@staticmethod
def _maybe_truncate_for_flexindex(indexer, indexee_dtype):
if not len(indexee_dtype) > 0:
raise ValueError('Must have non-empty key to index')
if not isinstance(indexer.dtype, (hl.tstruct, hl.ttuple)):
indexer = hl.tuple([indexer])
matching_prefix = 0
for x, y in zip(indexer.dtype.types, indexee_dtype.types):
if x != y:
break
matching_prefix += 1
prefix_match = matching_prefix == len(indexee_dtype)
direct_match = prefix_match and \
len(indexer) == len(indexee_dtype)
prefix_interval_match = len(indexee_dtype) == 1 and \
isinstance(indexee_dtype[0], hl.tinterval) and \
indexer.dtype[0] == indexee_dtype[0].point_type
direct_interval_match = prefix_interval_match and \
len(indexer) == 1
if direct_match or direct_interval_match:
return indexer
if prefix_match:
return indexer[0:matching_prefix]
if prefix_interval_match:
return indexer[0]
return None
@typecheck_method(indexer=expr_any, all_matches=bool)
def _maybe_flexindex_table_by_expr(self, indexer, all_matches=False):
truncated_indexer = Table._maybe_truncate_for_flexindex(
indexer, self.key.dtype)
if truncated_indexer is not None:
return self.index(truncated_indexer, all_matches=all_matches)
return None
def _index(self, *exprs, all_matches=False) -> 'Expression':
exprs = tuple(exprs)
if not len(exprs) > 0:
raise ValueError('Require at least one expression to index')
non_exprs = list(filter(lambda e: not isinstance(e, Expression), exprs))
if non_exprs:
raise TypeError(f"Index arguments must be expressions, found {non_exprs}")
from hail.matrixtable import MatrixTable
indices, aggregations = unify_all(*exprs)
src = indices.source
if src is None or len(indices.axes) == 0:
# FIXME: this should be OK: table[m.global_index_into_table]
raise ExpressionException('Cannot index with a scalar expression')
is_interval = (len(exprs) == 1
and len(self.key) > 0
and isinstance(self.key[0].dtype, hl.tinterval)
and exprs[0].dtype == self.key[0].dtype.point_type)
if not types_match(list(self.key.values()), list(exprs)):
if (len(exprs) == 1
and isinstance(exprs[0], TupleExpression)):
return self._index(*exprs[0], all_matches=all_matches)
if (len(exprs) == 1
and isinstance(exprs[0], StructExpression)):
return self._index(*exprs[0].values(), all_matches=all_matches)
if not is_interval:
raise TableIndexKeyError(self.key.dtype, exprs)
uid = Env.get_uid()
if all_matches and not is_interval:
return self.collect_by_key(uid).index(*exprs)[uid]
new_schema = self.row_value.dtype
if all_matches:
new_schema = hl.tarray(new_schema)
if isinstance(src, Table):
for e in exprs:
analyze('Table.index', e, src._row_indices)
is_key = len(src.key) >= len(exprs) and all(expr is key_field for expr, key_field in zip(exprs, src.key.values()))
if not is_key:
uids = [Env.get_uid() for i in range(len(exprs))]
all_uids = uids[:]
else:
all_uids = []
def joiner(left):
if not is_key:
original_key = list(left.key)
left = Table(ir.TableMapRows(left.key_by()._tir,
ir.InsertFields(left._row._ir,
list(zip(uids, [e._ir for e in exprs])),
None))).key_by(*uids)
def rekey_f(t):
return t.key_by(*original_key)
else:
def rekey_f(t):
return t
if is_interval:
left = Table(ir.TableIntervalJoin(left._tir, self._tir, uid, all_matches))
else:
left = Table(ir.TableLeftJoinRightDistinct(left._tir, self._tir, uid))
return rekey_f(left)
all_uids.append(uid)
join_ir = ir.Join(ir.GetField(ir.TopLevelReference('row'), uid),
all_uids,
exprs,
joiner)
return construct_expr(join_ir, new_schema, indices, aggregations)
elif isinstance(src, MatrixTable):
for e in exprs:
analyze('Table.index', e, src._entry_indices)
right = self
# match on indices to determine join type
if indices == src._entry_indices:
raise NotImplementedError('entry-based matrix joins')
elif indices == src._row_indices:
is_subset_row_key = len(exprs) <= len(src.row_key) and all(
expr is key_field for expr, key_field in zip(exprs, src.row_key.values()))
if not (is_subset_row_key or is_interval):
# foreign-key join
foreign_key_annotates = {Env.get_uid(): e for e in exprs}
# contains original key and join key
join_table = src.select_rows(**foreign_key_annotates).rows()
join_table = join_table.key_by(*foreign_key_annotates)
value_uid = Env.get_uid()
join_table = join_table.annotate(**{value_uid: right.index(join_table.key)})
# FIXME: Maybe zip join here?
join_table = join_table.group_by(*src.row_key).aggregate(
**{uid:
hl.dict(hl.agg.collect(hl.tuple([hl.tuple([join_table[f] for f in foreign_key_annotates]),
join_table[value_uid]])))})
def joiner(left: MatrixTable):
return MatrixTable(
ir.MatrixMapRows(
ir.MatrixAnnotateRowsTable(left._mir, join_table._tir, uid),
ir.InsertFields(
ir.Ref('va'),
[(uid, ir.Apply('get', join_table._row_type[uid].value_type,
ir.GetField(ir.GetField(ir.Ref('va'), uid), uid),
ir.MakeTuple([e._ir for e in exprs])))],
None)))
else:
def joiner(left: MatrixTable):
return MatrixTable(ir.MatrixAnnotateRowsTable(left._mir, right._tir, uid, all_matches))
ast = ir.Join(ir.GetField(ir.TopLevelReference('va'), uid),
[uid],
exprs,
joiner)
return construct_expr(ast, new_schema, indices, aggregations)
elif indices == src._col_indices and not (is_interval and all_matches):
all_uids = [uid]
if len(exprs) == len(src.col_key) and all([
exprs[i] is src.col_key[i] for i in range(len(exprs))]):
# key is already correct
def joiner(left):
return MatrixTable(ir.MatrixAnnotateColsTable(left._mir, right._tir, uid))
else:
index_uid = Env.get_uid()
uids = [Env.get_uid() for _ in exprs]
all_uids.append(index_uid)
all_uids.extend(uids)
def joiner(left: MatrixTable):
prev_key = list(src.col_key)
joined = (src
.annotate_cols(**dict(zip(uids, exprs)))
.add_col_index(index_uid)
.key_cols_by(*uids)
.cols()
.select(index_uid)
.join(self, 'inner')
.key_by(index_uid)
.drop(*uids))
result = MatrixTable(ir.MatrixAnnotateColsTable(
(left.add_col_index(index_uid)
.key_cols_by(index_uid)
._mir),
joined._tir,
uid)).key_cols_by(*prev_key)
return result
join_ir = ir.Join(ir.GetField(ir.TopLevelReference('sa'), uid),
all_uids,
exprs,
joiner)
return construct_expr(join_ir, new_schema, indices, aggregations)
else:
raise NotImplementedError()
else:
raise TypeError("Cannot join with expressions derived from '{}'".format(src.__class__))
def index_globals(self) -> 'StructExpression':
"""Return this table's global variables for use in another
expression context.
Examples
--------
>>> table_result = table2.annotate(C = table2.A * table1.index_globals().global_field_1)
Returns
-------
:class:`.StructExpression`
"""
return construct_expr(ir.TableGetGlobals(self._tir), self.globals.dtype)
def _process_joins(self, *exprs) -> 'Table':
return process_joins(self, exprs)
def cache(self) -> 'Table':
"""Persist this table in memory.
Examples
--------
Persist the table in memory:
>>> table = table.cache() # doctest: +SKIP
Notes
-----
This method is an alias for :func:`persist("MEMORY_ONLY") <hail.Table.persist>`.
Returns
-------
:class:`.Table`
Cached table.
"""
return self.persist('MEMORY_ONLY')
@typecheck_method(storage_level=storage_level)
def persist(self, storage_level='MEMORY_AND_DISK') -> 'Table':
"""Persist this table in memory or on disk.
Examples
--------
Persist the table to both memory and disk:
>>> table = table.persist() # doctest: +SKIP
Notes
-----
The :meth:`.Table.persist` and :meth:`.Table.cache` methods store the
current table on disk or in memory temporarily to avoid redundant computation
and improve the performance of Hail pipelines. This method is not a substitution
for :meth:`.Table.write`, which stores a permanent file.
Most users should use the "MEMORY_AND_DISK" storage level. See the `Spark
documentation
<http://spark.apache.org/docs/latest/programming-guide.html#rdd-persistence>`__
for a more in-depth discussion of persisting data.
Parameters
----------
storage_level : str
Storage level. One of: NONE, DISK_ONLY,
DISK_ONLY_2, MEMORY_ONLY, MEMORY_ONLY_2, MEMORY_ONLY_SER,
MEMORY_ONLY_SER_2, MEMORY_AND_DISK, MEMORY_AND_DISK_2,
MEMORY_AND_DISK_SER, MEMORY_AND_DISK_SER_2, OFF_HEAP
Returns
-------
:class:`.Table`
Persisted table.
"""
return Env.backend().persist_table(self, storage_level)
def unpersist(self) -> 'Table':
"""
Unpersists this table from memory/disk.
Notes
-----
This function will have no effect on a table that was not previously
persisted.
Returns
-------
:class:`.Table`
Unpersisted table.
"""
return Env.backend().unpersist_table(self)
@typecheck_method(_localize=bool)
def collect(self, _localize=True):
"""Collect the rows of the table into a local list.
Examples
--------
Collect a list of all `X` records:
>>> all_xs = [row['X'] for row in table1.select(table1.X).collect()]
Notes
-----
This method returns a list whose elements are of type :class:`.Struct`. Fields
of these structs can be accessed similarly to fields on a table, using dot
methods (``struct.foo``) or string indexing (``struct['foo']``).
Warning
-------
Using this method can cause out of memory errors. Only collect small tables.
Returns
-------
:obj:`list` of :class:`.Struct`
List of rows.
"""
if len(self.key) > 0:
t = self.order_by(*self.key)
else:
t = self
rows_ir = ir.GetField(ir.TableCollect(t._tir), 'rows')
e = construct_expr(rows_ir, hl.tarray(t.row.dtype))
if _localize:
return Env.backend().execute(e._ir)
else:
return e
def describe(self, handler=print, *, widget=False):
"""Print information about the fields in the table.
Note
----
The `widget` argument is **experimental**.
Parameters
----------
handler : Callable[[str], None]
Handler function for returned string.
widget : bool
Create an interactive IPython widget.
"""
if widget:
from hail.experimental.interact import interact
return interact(self)
def format_type(typ):
return typ.pretty(indent=4).lstrip()
if len(self.globals) == 0:
global_fields = '\n None'
else:
global_fields = ''.join("\n '{name}': {type} ".format(
name=f, type=format_type(t)) for f, t in self.globals.dtype.items())
if len(self.row) == 0:
row_fields = '\n None'
else:
row_fields = ''.join("\n '{name}': {type} ".format(
name=f, type=format_type(t)) for f, t in self.row.dtype.items())
row_key = '[' + ', '.join("'{name}'".format(name=f) for f in self.key) + ']'
s = '----------------------------------------\n' \
'Global fields:{g}\n' \
'----------------------------------------\n' \
'Row fields:{r}\n' \
'----------------------------------------\n' \
'Key: {rk}\n' \
'----------------------------------------'.format(g=global_fields,
rk=row_key,
r=row_fields)
handler(s)
@typecheck_method(name=str)
def add_index(self, name='idx') -> 'Table':
"""Add the integer index of each row as a new row field.
Examples
--------
>>> table_result = table1.add_index()
>>> table_result.show() # doctest: +SKIP_OUTPUT_CHECK
+-------+-------+-----+-------+-------+-------+-------+-------+-------+
| ID | HT | SEX | X | Z | C1 | C2 | C3 | idx |
+-------+-------+-----+-------+-------+-------+-------+-------+-------+
| int32 | int32 | str | int32 | int32 | int32 | int32 | int32 | int64 |
+-------+-------+-----+-------+-------+-------+-------+-------+-------+
| 1 | 65 | M | 5 | 4 | 2 | 50 | 5 | 0 |
| 2 | 72 | M | 6 | 3 | 2 | 61 | 1 | 1 |
| 3 | 70 | F | 7 | 3 | 10 | 81 | -5 | 2 |
| 4 | 60 | F | 8 | 2 | 11 | 90 | -10 | 3 |
+-------+-------+-----+-------+-------+-------+-------+-------+-------+
Notes
-----
This method returns a table with a new field whose name is given by
the `name` parameter, with type :py:data:`.tint64`. The value of this field
is the integer index of each row, starting from 0. Methods that respect
ordering (like :meth:`.Table.take` or :meth:`.Table.export`) will
return rows in order.
This method is also helpful for creating a unique integer index for
rows of a table so that more complex types can be encoded as a simple
number for performance reasons.
Parameters
----------
name : str
Name of index field.
Returns
-------
:class:`.Table`
Table with a new index field.
"""
return self.annotate(**{name: hl.scan.count()})
@typecheck_method(tables=table_type, unify=bool)
def union(self, *tables, unify: bool = False) -> 'Table':
"""Union the rows of multiple tables.
Examples
--------
Take the union of rows from two tables:
>>> union_table = table1.union(other_table)
Notes
-----
If a row appears in more than one table identically, it is duplicated
in the result. All tables must have the same key names and types. They
must also have the same row types, unless the `unify` parameter is
``True``, in which case a field appearing in any table will be included
in the result, with missing values for tables that do not contain the
field. If a field appears in multiple tables with incompatible types,
like arrays and strings, then an error will be raised.
Parameters
----------
tables : varargs of :class:`.Table`
Tables to union.
unify : :obj:`bool`
Attempt to unify table field.
Returns
-------
:class:`.Table`
Table with all rows from each component table.
"""
left_key = self.key.dtype
for i, ht, in enumerate(tables):
if left_key != ht.key.dtype:
raise ValueError(f"'union': table {i} has a different key."
f" Expected: {left_key}\n"
f" Table {i}: {ht.key.dtype}")
if not (unify or ht.row.dtype == self.row.dtype):
raise ValueError(f"'union': table {i} has a different row type.\n"
f" Expected: {self.row.dtype}\n"
f" Table {i}: {ht.row.dtype}\n"
f" If the tables have the same fields in different orders, or some\n"
f" common and some unique fields, then the 'unify' parameter may be\n"
f" able to coerce the tables to a common type.")
all_tables = [self]
all_tables.extend(tables)
if unify and not len(set(ht.row_value.dtype for ht in all_tables)) == 1:
discovered = collections.defaultdict(dict)
for i, ht in enumerate(all_tables):
for field_name in ht.row_value:
discovered[field_name][i] = ht[field_name]
all_fields = [{} for _ in all_tables]
for field_name, expr_dict in discovered.items():
*unified, can_unify = hl.expr.expressions.unify_exprs(*expr_dict.values())
if not can_unify:
raise ValueError(f"cannot unify field {field_name!r}: found fields of types "
f"{[str(t) for t in {e.dtype for e in expr_dict.values()}]}")
unified_map = dict(zip(expr_dict.keys(), unified))
default = hl.null(unified[0].dtype)
for i in range(len(all_tables)):
all_fields[i][field_name] = unified_map.get(i, default)
for i, t in enumerate(all_tables):
all_tables[i] = t.select(**all_fields[i])
return Table(ir.TableUnion([table._tir for table in all_tables]))
@typecheck_method(n=int, _localize=bool)
def take(self, n, _localize=True):
"""Collect the first `n` rows of the table into a local list.
Examples
--------
Take the first three rows:
>>> first3 = table1.take(3)
>>> first3
[Struct(ID=1, HT=65, SEX='M', X=5, Z=4, C1=2, C2=50, C3=5),
Struct(ID=2, HT=72, SEX='M', X=6, Z=3, C1=2, C2=61, C3=1),
Struct(ID=3, HT=70, SEX='F', X=7, Z=3, C1=10, C2=81, C3=-5)]
Notes
-----
This method does not need to look at all the data in the table, and
allows for fast queries of the start of the table.
This method is equivalent to :meth:`.Table.head` followed by
:meth:`.Table.collect`.
Parameters
----------
n : int
Number of rows to take.
Returns
-------
:obj:`list` of :class:`.Struct`
List of row structs.
"""
return self.head(n).collect(_localize)
@typecheck_method(n=int)
def head(self, n) -> 'Table':
"""Subset table to first `n` rows.
Examples
--------
Subset to the first three rows:
>>> table_result = table1.head(3)
>>> table_result.count()
3
Notes
-----
The number of partitions in the new table is equal to the number of
partitions containing the first `n` rows.
Parameters
----------
n : int
Number of rows to include.
Returns
-------
:class:`.Table`
Table including the first `n` rows.
"""
return Table(ir.TableHead(self._tir, n))
@typecheck_method(n=int)
def tail(self, n) -> 'Table':
"""Subset table to last `n` rows.
Examples
--------
Subset to the last three rows:
>>> table_result = table1.tail(3)
>>> table_result.count()
3
Notes
-----
The number of partitions in the new table is equal to the number of
partitions containing the last `n` rows.
Parameters
----------
n : int
Number of rows to include.
Returns
-------
:class:`.Table`
Table including the last `n` rows.
"""
return Table(ir.TableTail(self._tir, n))
@typecheck_method(p=numeric,
seed=nullable(int))
def sample(self, p, seed=None) -> 'Table':
"""Downsample the table by keeping each row with probability ``p``.
Examples
--------
Downsample the table to approximately 1% of its rows.
>>> small_table1 = table1.sample(0.01)
Parameters
----------
p : :obj:`float`
Probability of keeping each row.
seed : :obj:`int`
Random seed.
Returns
-------
:class:`.Table`
Table with approximately ``p * n_rows`` rows.
"""
if not 0 <= p <= 1:
raise ValueError("Requires 'p' in [0,1]. Found p={}".format(p))
return self.filter(hl.rand_bool(p, seed))
@typecheck_method(n=int,
shuffle=bool)
def repartition(self, n, shuffle=True) -> 'Table':
"""Change the number of partitions.
Examples
--------
Repartition to 500 partitions:
>>> table_result = table1.repartition(500)
Notes
-----
Check the current number of partitions with :meth:`.n_partitions`.
The data in a dataset is divided into chunks called partitions, which
may be stored together or across a network, so that each partition may
be read and processed in parallel by available cores. When a table with
:math:`M` rows is first imported, each of the :math:`k` partitions will
contain about :math:`M/k` of the rows. Since each partition has some
computational overhead, decreasing the number of partitions can improve
performance after significant filtering. Since it's recommended to have
at least 2 - 4 partitions per core, increasing the number of partitions
can allow one to take advantage of more cores. Partitions are a core
concept of distributed computation in Spark, see `their documentation
<http://spark.apache.org/docs/latest/programming-guide.html#resilient-distributed-datasets-rdds>`__
for details.
When ``shuffle=True``, Hail does a full shuffle of the data
and creates equal sized partitions. When ``shuffle=False``,
Hail combines existing partitions to avoid a full shuffle.
These algorithms correspond to the `repartition` and
`coalesce` commands in Spark, respectively. In particular,
when ``shuffle=False``, ``n_partitions`` cannot exceed current
number of partitions.
Parameters
----------
n : int
Desired number of partitions.
shuffle : bool
If ``True``, use full shuffle to repartition.
Returns
-------
:class:`.Table`
Repartitioned table.
"""
return Table(ir.TableRepartition(
self._tir, n, ir.RepartitionStrategy.SHUFFLE if shuffle else ir.RepartitionStrategy.COALESCE))
@typecheck_method(max_partitions=int)
def naive_coalesce(self, max_partitions: int) -> 'Table':
"""Naively decrease the number of partitions.
Example
-------
Naively repartition to 10 partitions:
>>> table_result = table1.naive_coalesce(10)
Warning
-------
:meth:`.naive_coalesce` simply combines adjacent partitions to achieve
the desired number. It does not attempt to rebalance, unlike
:meth:`.repartition`, so it can produce a heavily unbalanced dataset. An
unbalanced dataset can be inefficient to operate on because the work is
not evenly distributed across partitions.
Parameters
----------
max_partitions : int
Desired number of partitions. If the current number of partitions is
less than or equal to `max_partitions`, do nothing.
Returns
-------
:class:`.Table`
Table with at most `max_partitions` partitions.
"""
return Table(ir.TableRepartition(
self._tir, max_partitions, ir.RepartitionStrategy.NAIVE_COALESCE))
@typecheck_method(other=table_type)
def semi_join(self, other: 'Table') -> 'Table':
"""Filters the table to rows whose key appears in `other`.
Parameters
----------
other : :class:`.Table`
Table with compatible key field(s).
Returns
-------
:class:`.Table`
Notes
-----
The key type of the table must match the key type of `other`.
This method does not change the schema of the table; it is a method of
filtering the table to keys present in another table.
To discard keys present in `other`, use :meth:`.anti_join`.
Examples
--------
>>> table_result = table1.semi_join(table2)
It may be expensive to key the left-side table by the right-side key.
In this case, it is possible to implement a semi-join using a non-key
field as follows:
>>> table_result = table1.filter(hl.is_defined(table2.index(table1['ID'])))
See Also
--------
:meth:`.anti_join`
"""
return self.filter(hl.is_defined(other.index(self.key)))
@typecheck_method(other=table_type)
def anti_join(self, other: 'Table') -> 'Table':
"""Filters the table to rows whose key does not appear in `other`.
Parameters
----------
other : :class:`.Table`
Table with compatible key field(s).
Returns
-------
:class:`.Table`
Notes
-----
The key type of the table must match the key type of `other`.
This method does not change the schema of the table; it is a method of
filtering the table to keys not present in another table.
To restrict to keys present in `other`, use :meth:`.semi_join`.
Examples
--------
>>> table_result = table1.anti_join(table2)
It may be expensive to key the left-side table by the right-side key.
In this case, it is possible to implement an anti-join using a non-key
field as follows:
>>> table_result = table1.filter(hl.is_missing(table2.index(table1['ID'])))
See Also
--------
:meth:`.semi_join`, :meth:`.filter`
"""
return self.filter(hl.is_missing(other.index(self.key)))
@typecheck_method(right=table_type,
how=enumeration('inner', 'outer', 'left', 'right'),
_mangle=anyfunc)
def join(self,
right: 'Table',
how='inner',
_mangle: Callable[[str, int], str] = lambda s, i: f'{s}_{i}') -> 'Table':
"""Join two tables together.
Examples
--------
Join `table1` to `table2` to produce `table_joined`:
>>> table_joined = table1.key_by('ID').join(table2.key_by('ID'))
Notes
-----
Tables are joined at rows whose key fields have equal values. Missing values never match.
The inclusion of a row with no match in the opposite table depends on the
join type:
- **inner** -- Only rows with a matching key in the opposite table are included
in the resulting table.
- **left** -- All rows from the left table are included in the resulting table.
If a row in the left table has no match in the right table, then the fields
derived from the right table will be missing.
- **right** -- All rows from the right table are included in the resulting table.
If a row in the right table has no match in the left table, then the fields
derived from the left table will be missing.
- **outer** -- All rows are included in the resulting table. If a row in the right
table has no match in the left table, then the fields derived from the left
table will be missing. If a row in the right table has no match in the left table,
then the fields derived from the left table will be missing.
Both tables must have the same number of keys and the corresponding
types of each key must be the same (order matters), but the key names
can be different. For example, if `table1` is keyed by fields ``['a',
'b']``, both of type ``int32``, and `table2` is keyed by fields ``['c',
'd']``, both of type ``int32``, then the two tables can be joined (their
rows will be joined where ``table1.a == table2.c`` and ``table1.b ==
table2.d``).
The key fields and order from the left table are preserved,
while the key fields from the right table are not present in
the result.
Note
----
These join methods implement a traditional `Cartesian product
<https://en.wikipedia.org/wiki/Cartesian_product>`__ join, and
the number of records in the resulting table can be larger than
the number of records on the left or right if duplicate keys are
present.
Parameters
----------
right : :class:`.Table`
Table to join.
how : :obj:`str`
Join type. One of "inner", "left", "right", "outer"
Returns
-------
:class:`.Table`
Joined table.
"""
left_key_types = list(self.key.dtype.values())
right_key_types = list(right.key.dtype.values())
if not left_key_types == right_key_types:
raise ValueError(f"'join': key mismatch:\n "
f" left: [{', '.join(str(t) for t in left_key_types)}]\n "
f" right: [{', '.join(str(t) for t in right_key_types)}]")
seen = set(self._fields.keys())
renames = {}
for field in right._fields:
if field in seen and field not in right.key:
i = 1
while i < 100:
mod = _mangle(field, i)
if mod not in seen:
renames[field] = mod
seen.add(mod)
break
i += 1
else:
raise RecursionError(f'cannot rename field {repr(field)} after 99'
f' mangling attempts')
else:
seen.add(field)
if renames:
right = right.rename(renames)
info('Table.join: renamed the following fields on the right to avoid name conflicts:'
+ ''.join(f'\n {repr(k)} -> {repr(v)}' for k, v in renames.items()))
return Table(ir.TableJoin(self._tir, right._tir, how, len(self.key)))
@typecheck_method(expr=BooleanExpression)
def all(self, expr):
"""Evaluate whether a boolean expression is true for all rows.
Examples
--------
Test whether `C1` is greater than 5 in all rows of the table:
>>> if table1.all(table1.C1 == 5):
... print("All rows have C1 equal 5.")
Parameters
----------
expr : :class:`.BooleanExpression`
Expression to test.
Returns
-------
:obj:`bool`
"""
return self.aggregate(hl.agg.all(expr))
@typecheck_method(expr=BooleanExpression)
def any(self, expr):
"""Evaluate whether a Boolean expression is true for at least one row.
Examples
--------
Test whether `C1` is equal to 5 any row in any row of the table:
>>> if table1.any(table1.C1 == 5):
... print("At least one row has C1 equal 5.")
Parameters
----------
expr : :class:`.BooleanExpression`
Boolean expression.
Returns
-------
:obj:`bool`
``True`` if the predicate evaluated for ``True`` for any row, otherwise ``False``.
"""
return self.aggregate(hl.agg.any(expr))
@typecheck_method(mapping=dictof(str, str))
def rename(self, mapping) -> 'Table':
"""Rename fields of the table.
Examples
--------
Rename `C1` to `col1` and `C2` to `col2`:
>>> table_result = table1.rename({'C1' : 'col1', 'C2' : 'col2'})
Parameters
----------
mapping : :obj:`dict` of :obj:`str`, :obj:`str`
Mapping from old field names to new field names.
Notes
-----
Any field that does not appear as a key in `mapping` will not be
renamed.
Returns
-------
:class:`.Table`
Table with renamed fields.
"""
seen = {}
row_map = {}
global_map = {}
for k, v in mapping.items():
if v in seen:
raise ValueError(
"Cannot rename two fields to the same name: attempted to rename {} and {} both to {}".format(
repr(seen[v]), repr(k), repr(v)))
if v in self._fields and v not in mapping:
raise ValueError("Cannot rename {} to {}: field already exists.".format(repr(k), repr(v)))
seen[v] = k
if self[k]._indices == self._row_indices:
row_map[k] = v
else:
assert self[k]._indices == self._global_indices
global_map[k] = v
stray = set(mapping.keys()) - set(seen.values())
if stray:
raise ValueError(f"found rename rules for fields not present in table: {list(stray)}")
return Table(ir.TableRename(self._tir, row_map, global_map))
def expand_types(self) -> 'Table':
"""Expand complex types into structs and arrays.
Examples
--------
>>> table_result = table1.expand_types()
Notes
-----
Expands the following types: :class:`.tlocus`, :class:`.tinterval`,
:class:`.tset`, :class:`.tdict`, :class:`.ttuple`.
The only types that will remain after this method are:
:py:data:`.tbool`, :py:data:`.tint32`, :py:data:`.tint64`,
:py:data:`.tfloat64`, :py:data:`.tfloat32`, :class:`.tarray`,
:class:`.tstruct`.
Note, expand_types always returns an unkeyed table.
Returns
-------
:class:`.Table`
Expanded table.
"""
t = self
if len(t.key) > 0:
t = t.order_by(*t.key)
def _expand(e):
if isinstance(e, CollectionExpression) or isinstance(e, DictExpression):
return hl.map(lambda x: _expand(x), hl.array(e))
elif isinstance(e, StructExpression):
return hl.struct(**{k: _expand(v) for (k, v) in e.items()})
elif isinstance(e, TupleExpression):
return hl.struct(**{f'_{i}': x for (i, x) in enumerate(e)})
elif isinstance(e, IntervalExpression):
return hl.struct(start=e.start,
end=e.end,
includesStart=e.includes_start,
includesEnd=e.includes_end)
elif isinstance(e, LocusExpression):
return hl.struct(contig=e.contig,
position=e.position)
elif isinstance(e, CallExpression):
return hl.struct(alleles=hl.map(lambda i: e[i], hl.range(0, e.ploidy)),
phased=e.phased)
elif isinstance(e, NDArrayExpression):
return hl.struct(shape=e.shape, data=_expand(e._data_array()))
else:
assert isinstance(e, (NumericExpression, BooleanExpression, StringExpression))
return e
t = t.select(**_expand(t.row))
t = t.select_globals(**_expand(t.globals))
return t
def flatten(self) -> 'Table':
"""Flatten nested structs.
Examples
--------
Flatten table:
>>> table_result = table1.flatten()
Notes
-----
Consider a table with signature
.. code-block:: text
a: struct{
p: int32,
q: str
},
b: int32,
c: struct{
x: str,
y: array<struct{
y: str,
z: str
}>
}
and key ``a``. The result of flatten is
.. code-block:: text
a.p: int32
a.q: str
b: int32
c.x: str
c.y: array<struct{
y: str,
z: str
}>
with key ``a.p, a.q``.
Note, structures inside collections like arrays or sets will not be
flattened.
Note, the result of flatten is always unkeyed.
Warning
-------
Flattening a table will produces fields that cannot be referenced using
the ``table.<field>`` syntax, e.g. "a.b". Reference these fields using
square bracket lookups: ``table['a.b']``.
Returns
-------
:class:`.Table`
Table with a flat schema (no struct fields).
"""
# unkey but preserve order
t = self.order_by(*self.key)
t = Table(ir.TableMapRows(t._tir, t.row.flatten()._ir))
return t
@typecheck_method(exprs=oneof(str, Expression, Ascending, Descending))
def order_by(self, *exprs) -> 'Table':
"""Sort by the specified fields, defaulting to ascending order. Will unkey the table if it is keyed.
Examples
--------
Let's assume we have a field called `HT` in our table.
By default, ascending order is used:
>>> sorted_table = table1.order_by(table1.HT)
>>> sorted_table = table1.order_by('HT')
You can sort in ascending order explicitly:
>>> sorted_table = table1.order_by(hl.asc(table1.HT))
>>> sorted_table = table1.order_by(hl.asc('HT'))
Tables can be sorted by field descending order as well:
>>> sorted_table = table1.order_by(hl.desc(table1.HT))
>>> sorted_table = table1.order_by(hl.desc('HT'))
Tables can also be sorted on multiple fields:
>>> sorted_table = table1.order_by(hl.desc('HT'), hl.asc('SEX'))
Notes
-----
Missing values are sorted after non-missing values. When multiple
fields are passed, the table will be sorted first by the first
argument, then the second, etc.
Note
----
This method unkeys the table.
Parameters
----------
exprs : varargs of :class:`.Ascending` or :class:`.Descending` or :class:`.Expression` or :obj:`str`
Fields to sort by.
Returns
-------
:class:`.Table`
Table sorted by the given fields.
"""
lifted_exprs = []
for e in exprs:
sort_type = 'A'
if isinstance(e, Ascending):
e = e.col
elif isinstance(e, Descending):
e = e.col
sort_type = 'D'
if isinstance(e, str):
expr = self[e]
else:
expr = e
lifted_exprs.append((expr, sort_type))
sort_fields = []
complex_exprs = {}
for e, sort_type in lifted_exprs:
if e._indices.source is not self:
if e._indices.source is None:
raise ValueError("Sort fields must be fields of the callee Table, found scalar expression")
else:
raise ValueError(f"Sort fields must be fields of the callee Table,"
f" found field of {e._indices.source}")
elif e._indices != self._row_indices:
raise ValueError("Sort fields must be row-indexed, found global sort expression")
else:
field_name = self._fields_inverse.get(e)
if field_name is None:
field_name = Env.get_uid()
complex_exprs[field_name] = e
sort_fields.append((field_name, sort_type))
t = self
if complex_exprs:
t = t.annotate(**complex_exprs)
t = Table(ir.TableOrderBy(t._tir, sort_fields))
if complex_exprs:
t = t.drop(*complex_exprs.keys())
return t
@typecheck_method(field=oneof(str, Expression),
name=nullable(str))
def explode(self, field, name=None) -> 'Table':
"""Explode rows along a field of type array or set, copying the entire row for each element.
Examples
--------
`people_table` is a :class:`.Table` with three fields: `Name`, `Age`
and `Children`.
>>> people_table.show()
+------------+-------+--------------------------+
| Name | Age | Children |
+------------+-------+--------------------------+
| str | int32 | array<str> |
+------------+-------+--------------------------+
| "Alice" | 34 | ["Dave","Ernie","Frank"] |
| "Bob" | 51 | ["Gaby","Helen"] |
| "Caroline" | 10 | [] |
+------------+-------+--------------------------+
:meth:`.Table.explode` can be used to produce a distinct row for each
element in the `Children` field:
>>> exploded = people_table.explode('Children')
>>> exploded.show() # doctest: +SKIP_OUTPUT_CHECK
+---------+-------+----------+
| Name | Age | Children |
+---------+-------+----------+
| str | int32 | str |
+---------+-------+----------+
| "Alice" | 34 | "Dave" |
| "Alice" | 34 | "Ernie" |
| "Alice" | 34 | "Frank" |
| "Bob" | 51 | "Gaby" |
| "Bob" | 51 | "Helen" |
+---------+-------+----------+
The `name` parameter can be used to produce more appropriate field
names:
>>> exploded = people_table.explode('Children', name='Child')
>>> exploded.show() # doctest: +SKIP_OUTPUT_CHECK
+---------+-------+---------+
| Name | Age | Child |
+---------+-------+---------+
| str | int32 | str |
+---------+-------+---------+
| "Alice" | 34 | "Dave" |
| "Alice" | 34 | "Ernie" |
| "Alice" | 34 | "Frank" |
| "Bob" | 51 | "Gaby" |
| "Bob" | 51 | "Helen" |
+---------+-------+---------+
Notes
-----
Each row is copied for each element of `field`. The explode operation
unpacks the elements in a field of type ``array`` or ``set`` into its
own row. If an empty ``array`` or ``set`` is exploded, the entire row is
removed from the table. In the example above, notice that the name
"Caroline" is not found in the exploded table.
Missing arrays or sets are treated as empty.
Currently, the `name` argument may not be used if `field` is not a
top-level field of the table (e.g. `name` may be used with ``ht.foo``
but not ``ht.foo.bar``).
Parameters
----------
field : :obj:`str` or :class:`.Expression`
Top-level field name or expression.
name : :obj:`str` or None
If not `None`, rename the exploded field to `name`.
Returns
-------
:class:`.Table`
"""
if isinstance(field, str):
if field not in self._fields:
raise KeyError("Table has no field '{}'".format(field))
elif self._fields[field]._indices != self._row_indices:
raise ExpressionException("Method 'explode' expects a field indexed by row, found axes '{}'"
.format(self._fields[field]._indices.axes))
root = [field]
field = self._fields[field]
else:
analyze('Table.explode', field, self._row_indices, set(self._fields.keys()))
if not field._ir.is_nested_field:
raise ExpressionException(
"method 'explode' requires a field or subfield, not a complex expression")
nested = field._ir
root = []
while isinstance(nested, ir.GetField):
root.append(nested.name)
nested = nested.o
root = root[::-1]
if not isinstance(field.dtype, (tarray, tset)):
raise ValueError(f"method 'explode' expects array or set, found: {field.dtype}")
for k in self.key.values():
if k is field:
raise ValueError("method 'explode' cannot explode a key field")
t = Table(ir.TableExplode(self._tir, root))
if name is not None:
if len(root) > 1:
raise ValueError("'Table.explode' does not support the 'name' argument when exploding nested fields")
t = t.rename({root[0]: name})
return t
@typecheck_method(row_key=sequenceof(str),
col_key=sequenceof(str),
row_fields=sequenceof(str),
col_fields=sequenceof(str),
n_partitions=nullable(int))
def to_matrix_table(self, row_key, col_key, row_fields=[], col_fields=[], n_partitions=None) -> 'hl.MatrixTable':
"""Construct a matrix table from a table in coordinate representation.
Examples
--------
Import a coordinate-representation table from disk:
>>> coord_ht = hl.import_table('data/coordinate_matrix.tsv', impute=True)
>>> coord_ht.show()
+---------+---------+----------+
| row_idx | col_idx | x |
+---------+---------+----------+
| int32 | int32 | float64 |
+---------+---------+----------+
| 1 | 1 | 2.50e-01 |
| 1 | 2 | 3.30e-01 |
| 2 | 1 | 1.10e-01 |
| 3 | 1 | 1.00e+00 |
| 3 | 2 | 0.00e+00 |
+---------+---------+----------+
Convert to a matrix table and show:
>>> dense_mt = coord_ht.to_matrix_table(row_key=['row_idx'], col_key=['col_idx'])
>>> dense_mt.show()
+---------+----------+----------+
| row_idx | 1.x | 2.x |
+---------+----------+----------+
| int32 | float64 | float64 |
+---------+----------+----------+
| 1 | 2.50e-01 | 3.30e-01 |
| 2 | 1.10e-01 | NA |
| 3 | 1.00e+00 | 0.00e+00 |
+---------+----------+----------+
Notes
-----
Any row fields in the table that do not appear in one of the arguments
to this method are assumed to be entry fields of the resulting matrix
table.
Parameters
----------
row_key : Sequence[str]
Fields to be used as row key.
col_key : Sequence[str]
Fields to be used as column key.
row_fields : Sequence[str]
Fields to be stored once per row.
col_fields : Sequence[str]
Fields to be stored once per column.
n_partitions : int or None
Number of partitions.
Returns
-------
:class:`.MatrixTable`
"""
all_fields = list(itertools.chain(row_key, col_key, row_fields, col_fields))
c = collections.Counter(all_fields)
row_field_set = set(self.row)
for k, v in c.items():
if k not in row_field_set:
raise ValueError(f"'to_matrix_table': field {repr(k)} is not a row field")
if v > 1:
raise ValueError(f"'to_matrix_table': field {repr(k)} appeared in {v} field groups")
if len(row_key) == 0:
raise ValueError("'to_matrix_table': require at least one row key field")
if len(col_key) == 0:
raise ValueError("'to_matrix_table': require at least one col key field")
ht = self.key_by()
non_entry_fields = set(itertools.chain(row_key, col_key, row_fields, col_fields))
entry_fields = [x for x in ht.row if x not in non_entry_fields]
if not entry_fields:
raise ValueError("'Table.to_matrix_table': no fields remain as entry fields:\n"
" all table fields found in one of 'row_key', 'col_key', 'row_fields', 'col_fields'")
col_data = hl.rbind(
hl.array(
ht.aggregate(
hl.agg.group_by(ht.row.select(*col_key), hl.agg.take(ht.row.select(*col_fields), 1)[0]),
_localize=False)),
lambda data: hl.struct(data=data,
key_to_index=hl.dict(hl.range(0, hl.len(data)).map(lambda i: (data[i][0], i))))
)
col_data_uid = Env.get_uid()
ht = ht.drop(*col_fields)
ht = ht.annotate_globals(**{col_data_uid: col_data})
entries_uid = Env.get_uid()
ht = (ht.group_by(*row_key)
.partition_hint(n_partitions)
# FIXME: should be agg._prev_nonnull https://github.com/hail-is/hail/issues/5345
.aggregate(**{x: hl.agg.take(ht[x], 1)[0] for x in row_fields},
**{entries_uid: hl.rbind(
hl.dict(hl.agg.collect((ht[col_data_uid]['key_to_index'][ht.row.select(*col_key)],
ht.row.select(*entry_fields)))),
lambda entry_dict: hl.range(0, hl.len(ht[col_data_uid]['key_to_index']))
.map(lambda i: entry_dict.get(i)))}))
ht = ht.annotate_globals(
**{col_data_uid: hl.array(ht[col_data_uid]['data'].map(lambda elt: hl.struct(**elt[0], **elt[1])))})
return ht._unlocalize_entries(entries_uid, col_data_uid, col_key)
@typecheck_method(columns=sequenceof(str), entry_field_name=nullable(str), col_field_name=str)
def to_matrix_table_row_major(self, columns, entry_field_name=None, col_field_name='col'):
"""Construct a matrix table from a table in row major representation. Each element in `columns`
is a field that will become an entry field in the matrix table. Fields omitted from `columns` become row
fields. If `columns` are structs, then the matrix table will have the entry fields of those structs. Otherwise,
the matrix table will have one entry field named `entry_field_name` whose values come from the values
of the `columns` fields. The matrix table is column indexed by `col_field_name`.
If you find yourself using this method after :func:`.import_table`,
consider instead using :func:`.import_matrix_table`.
Examples
--------
Convert a table of RNA expression samples to a :class:`.MatrixTable`:
>>> t = hl.import_table('data/rna_expression.tsv', impute=True)
>>> t = t.key_by('gene')
>>> t.show()
+---------+---------+---------+----------+-----------+-----------+-----------+
| gene | lung001 | lung002 | heart001 | muscle001 | muscle002 | muscle003 |
+---------+---------+---------+----------+-----------+-----------+-----------+
| str | int32 | int32 | int32 | int32 | int32 | int32 |
+---------+---------+---------+----------+-----------+-----------+-----------+
| "LD4" | 1 | 2 | 0 | 2 | 1 | 1 |
| "SCN1A" | 2 | 1 | 1 | 0 | 0 | 0 |
| "TITIN" | 3 | 0 | 0 | 1 | 2 | 1 |
+---------+---------+---------+----------+-----------+-----------+-----------+
>>> mt = t.to_matrix_table_row_major(
... columns=['lung001', 'lung002', 'heart001',
... 'muscle001', 'muscle002', 'muscle003'],
... entry_field_name='expression',
... col_field_name='sample')
>>> mt.describe()
----------------------------------------
Global fields:
None
----------------------------------------
Column fields:
'sample': str
----------------------------------------
Row fields:
'gene': str
----------------------------------------
Entry fields:
'expression': int32
----------------------------------------
Column key: ['sample']
Row key: ['gene']
----------------------------------------
>>> mt.show(n_cols=2)
+---------+----------------------+----------------------+
| gene | 'lung001'.expression | 'lung002'.expression |
+---------+----------------------+----------------------+
| str | int32 | int32 |
+---------+----------------------+----------------------+
| "LD4" | 1 | 2 |
| "SCN1A" | 2 | 1 |
| "TITIN" | 3 | 0 |
+---------+----------------------+----------------------+
showing the first 2 of 6 columns
Notes
-----
All fields in `columns` must have the same type.
Parameters
----------
columns : Sequence[str]
Fields to be used as columns.
entry_field_name : :obj:`str` or None
Field name for the entries of the matrix table.
col_field_name : :obj:`str`
Field name for the columns of the matrix table.
Returns
-------
:class:`.MatrixTable`
"""
if len(columns) == 0:
raise ValueError('Columns must be non-empty.')
fields = [self[field] for field in columns]
col_types = set([field.dtype for field in fields])
if len(col_types) != 1:
raise ValueError('All columns must have the same type.')
if all([isinstance(col_typ, hl.tstruct) for col_typ in col_types]):
if entry_field_name is not None:
raise ValueError('Cannot both provide struct columns and an entry field name.')
entries = hl.array(fields)
else:
if entry_field_name is None:
raise ValueError('Must provide an entry field name.')
entries = hl.array([hl.struct(**{entry_field_name: field}) for field in fields])
t = self.transmute(entries=entries)
t = t.annotate_globals(cols=hl.array([hl.struct(**{col_field_name: col}) for col in columns]))
return t._unlocalize_entries('entries', 'cols', [col_field_name])
@property
def globals(self) -> 'StructExpression':
"""Returns a struct expression including all global fields.
Examples
--------
The data type of the globals struct:
>>> table1.globals.dtype
dtype('struct{global_field_1: int32, global_field_2: int32}')
The number of global fields:
>>> len(table1.globals)
2
Returns
-------
:class:`.StructExpression`
Struct of all global fields.
"""
return self._globals
@property
def row(self) -> 'StructExpression':
"""Returns a struct expression of all row-indexed fields, including keys.
Examples
--------
The data type of the row struct:
>>> table1.row.dtype
dtype('struct{ID: int32, HT: int32, SEX: str, X: int32, Z: int32, C1: int32, C2: int32, C3: int32}')
The number of row fields:
>>> len(table1.row)
8
Returns
-------
:class:`.StructExpression`
Struct of all row fields, including key fields.
"""
return self._row
@property
def row_value(self) -> 'StructExpression':
"""Returns a struct expression including all non-key row-indexed fields.
Examples
--------
The data type of the row struct:
>>> table1.row_value.dtype
dtype('struct{HT: int32, SEX: str, X: int32, Z: int32, C1: int32, C2: int32, C3: int32}')
The number of row fields:
>>> len(table1.row_value)
7
Returns
-------
:class:`.StructExpression`
Struct of all non-key row fields.
"""
return self._row.drop(*self.key.keys())
@staticmethod
@typecheck(df=pyspark.sql.DataFrame,
key=table_key_type)
def from_spark(df, key=[]) -> 'Table':
"""Convert PySpark SQL DataFrame to a table.
Examples
--------
>>> t = Table.from_spark(df) # doctest: +SKIP
Notes
-----
Spark SQL data types are converted to Hail types as follows:
.. code-block:: text
BooleanType => :py:data:`.tbool`
IntegerType => :py:data:`.tint32`
LongType => :py:data:`.tint64`
FloatType => :py:data:`.tfloat32`
DoubleType => :py:data:`.tfloat64`
StringType => :py:data:`.tstr`
BinaryType => :class:`.TBinary`
ArrayType => :class:`.tarray`
StructType => :class:`.tstruct`
Unlisted Spark SQL data types are currently unsupported.
Parameters
----------
df : :class:`.pyspark.sql.DataFrame`
PySpark DataFrame.
key : :obj:`str` or :obj:`list` of :obj:`str`
Key fields.
Returns
-------
:class:`.Table`
Table constructed from the Spark SQL DataFrame.
"""
return Env.spark_backend('from_spark').from_spark(df, key)
@typecheck_method(flatten=bool)
def to_spark(self, flatten=True):
"""Converts this table to a Spark DataFrame.
Because Spark cannot represent complex types, types are
expanded before flattening or conversion.
Parameters
----------
flatten : :obj:`bool`
If ``True``, :meth:`flatten` before converting to Spark DataFrame.
Returns
-------
:class:`.pyspark.sql.DataFrame`
"""
return Env.spark_backend('to_spark').to_spark(self, flatten)
@typecheck_method(flatten=bool)
def to_pandas(self, flatten=True):
"""Converts this table to a Pandas DataFrame.
Because conversion to Pandas is done through Spark, and Spark
cannot represent complex types, types are expanded before
flattening or conversion.
Parameters
----------
flatten : :obj:`bool`
If ``True``, :meth:`flatten` before converting to Pandas DataFrame.
Returns
-------
:class:`.pandas.DataFrame`
"""
return Env.spark_backend('to_pandas').to_pandas(self, flatten)
@staticmethod
@typecheck(df=pandas.DataFrame,
key=oneof(str, sequenceof(str)))
def from_pandas(df, key=[]) -> 'Table':
"""Create table from Pandas DataFrame
Examples
--------
>>> t = hl.Table.from_pandas(df) # doctest: +SKIP
Parameters
----------
df : :class:`.pandas.DataFrame`
Pandas DataFrame.
key : :obj:`str` or :obj:`list` of :obj:`str`
Key fields.
Returns
-------
:class:`.Table`
"""
return Env.spark_backend('from_pandas').from_pandas(df, key)
@typecheck_method(other=table_type, tolerance=nullable(numeric), absolute=bool)
def _same(self, other, tolerance=1e-6, absolute=False):
from hail.expr.functions import _values_similar
if self._type != other._type:
print(f'Table._same: types differ: {self._type}, {other._type}')
return False
left_global_value = Env.get_uid()
left_value = Env.get_uid()
left = self
left = left.select_globals(**{left_global_value: left.globals})
left = left.group_by(_key=left.key).aggregate(**{left_value: hl.agg.collect(left.row_value)})
right_global_value = Env.get_uid()
right_value = Env.get_uid()
right = other
right = right.select_globals(**{right_global_value: right.globals})
right = right.group_by(_key=right.key).aggregate(**{right_value: hl.agg.collect(right.row_value)})
t = left.join(right, how='outer')
if not hl.eval(_values_similar(t[left_global_value], t[right_global_value], tolerance, absolute)):
g = hl.eval(t.globals)
print(f'Table._same: globals differ: {g[left_global_value]}, {g[right_global_value]}')
return False
if not t.all(hl.is_defined(t[left_value]) & hl.is_defined(t[right_value])
& _values_similar(t[left_value], t[right_value], tolerance, absolute)):
print('Table._same: rows differ:')
t = t.filter(~ _values_similar(t[left_value], t[right_value], tolerance, absolute))
bad_rows = t.take(10)
for r in bad_rows:
print(f' Row mismatch:\n L: {r[left_value]}\n R: {r[right_value]}')
return False
return True
def collect_by_key(self, name: str = 'values') -> 'Table':
"""Collect values for each unique key into an array.
.. include:: _templates/req_keyed_table.rst
Examples
--------
>>> t1 = hl.Table.parallelize([
... {'t': 'foo', 'x': 4, 'y': 'A'},
... {'t': 'bar', 'x': 2, 'y': 'B'},
... {'t': 'bar', 'x': -3, 'y': 'C'},
... {'t': 'quam', 'x': 0, 'y': 'D'}],
... hl.tstruct(t=hl.tstr, x=hl.tint32, y=hl.tstr),
... key='t')
>>> t1.show()
+--------+-------+-----+
| t | x | y |
+--------+-------+-----+
| str | int32 | str |
+--------+-------+-----+
| "bar" | 2 | "B" |
| "bar" | -3 | "C" |
| "foo" | 4 | "A" |
| "quam" | 0 | "D" |
+--------+-------+-----+
>>> t1.collect_by_key().show()
+--------+---------------------------------+
| t | values |
+--------+---------------------------------+
| str | array<struct{x: int32, y: str}> |
+--------+---------------------------------+
| "bar" | [(2,"B"),(-3,"C")] |
| "foo" | [(4,"A")] |
| "quam" | [(0,"D")] |
+--------+---------------------------------+
Notes
-----
The order of the values array is not guaranteed.
Parameters
----------
name : :obj:`str`
Field name for all values per key.
Returns
-------
:class:`.Table`
"""
import hail.methods.misc as misc
misc.require_key(self, 'collect_by_key')
return Table(ir.TableAggregateByKey(
self._tir,
hl.struct(**{name: hl.agg.collect(self.row_value)})._ir))
def distinct(self) -> 'Table':
"""Deduplicate keys, keeping only one row for each unique key.
.. include:: _templates/req_keyed_table.rst
Examples
--------
>>> t1 = hl.Table.parallelize([
... {'a': 'foo', 'b': 1},
... {'a': 'bar', 'b': 5},
... {'a': 'bar', 'b': 2}],
... hl.tstruct(a=hl.tstr, b=hl.tint32),
... key='a')
>>> t1.show()
+-------+-------+
| a | b |
+-------+-------+
| str | int32 |
+-------+-------+
| "bar" | 5 |
| "bar" | 2 |
| "foo" | 1 |
+-------+-------+
>>> t1.distinct().show()
+-------+-------+
| a | b |
+-------+-------+
| str | int32 |
+-------+-------+
| "bar" | 5 |
| "foo" | 1 |
+-------+-------+
Notes
-----
The row chosen per distinct key is not guaranteed.
Returns
-------
:class:`.Table`
"""
import hail.methods.misc as misc
misc.require_key(self, 'distinct')
return Table(ir.TableDistinct(self._tir))
def summarize(self, handler=None):
"""Compute and print summary information about the fields in the table.
.. include:: _templates/experimental.rst
"""
if handler is None:
handler = hl.utils.default_handler()
handler(self.row._summarize(top=True))
@typecheck_method(parts=sequenceof(int), keep=bool)
def _filter_partitions(self, parts, keep=True) -> 'Table':
return Table(ir.TableToTableApply(self._tir, {'name': 'TableFilterPartitions', 'parts': parts, 'keep': keep}))
@typecheck_method(entries_field_name=str,
cols_field_name=str,
col_key=sequenceof(str))
def _unlocalize_entries(self, entries_field_name, cols_field_name, col_key) -> 'hl.MatrixTable':
return hl.MatrixTable(ir.CastTableToMatrix(
self._tir, entries_field_name, cols_field_name, col_key))
@staticmethod
@typecheck(tables=sequenceof(table_type), data_field_name=str, global_field_name=str)
def multi_way_zip_join(tables, data_field_name, global_field_name) -> 'Table':
"""Combine many tables in a zip join
.. include:: _templates/experimental.rst
Notes
-----
The row type of the returned table is a struct with the key fields, and
one extra field, `data_field_name`, which is an array of structs with
the non key fields, one per input. The array elements are missing if
their corresponding input had no row with that key or possibly if there
is another input with more rows with that key than the corresponding
input.
The global type of the returned table is an array of structs of the
global type of all of the inputs.
The types for every input must be identical, not merely compatible,
including the keys.
A zip join is similar to an outer join however rows are not duplicated
to create the full Cartesian product of duplicate keys. Instead, there
is exactly one entry in some `data_field_name` array for every row in
the inputs.
Parameters
----------
tables : :obj:`List[Table]`
A list of tables to combine
data_field_name : :obj:`str`
The name of the resulting data field
global_field_name : :obj:`str`
The name of the resulting global field
"""
if not tables:
raise ValueError('multi_way_zip_join must have at least one table as an argument')
head = tables[0]
if any(head.key.dtype != t.key.dtype for t in tables):
raise TypeError('All input tables to multi_way_zip_join must have the same key type')
if any(head.row.dtype != t.row.dtype for t in tables):
raise TypeError('All input tables to multi_way_zip_join must have the same row type')
if any(head.globals.dtype != t.globals.dtype for t in tables):
raise TypeError('All input tables to multi_way_zip_join must have the same global type')
return Table(ir.TableMultiWayZipJoin(
[t._tir for t in tables], data_field_name, global_field_name))
def _group_within_partitions(self, name, n):
return Table(ir.TableGroupWithinPartitions(self._tir, name, n))
@typecheck_method(f=func_spec(1, expr_array(expr_struct())))
def _map_partitions(self, f):
rows_uid = 'tmp_rows_' + Env.get_uid()
globals_uid = 'tmp_globals_' + Env.get_uid()
expr = construct_expr(ir.ToArray(ir.Ref(rows_uid)), hl.tarray(self.row.dtype), self._row_indices)
body = f(expr)
result_t = body.dtype
if any(k not in result_t.element_type for k in self.key):
raise ValueError(f'Table._map_partitions must preserve key fields')
body_ir = ir.Let('global', ir.Ref(globals_uid), ir.ToStream(body._ir))
return Table(ir.TableMapPartitions(self._tir, globals_uid, rows_uid, body_ir))
table_type.set(Table)
| mit |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/metrics/cluster/unsupervised.py | 30 | 10322 | """Unsupervised evaluation metrics."""
# Authors: Robert Layton <[email protected]>
# Arnaud Fouchet <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ...utils import check_X_y
from ..pairwise import pairwise_distances
from ...preprocessing import LabelEncoder
def check_number_of_labels(n_labels, n_samples):
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance or None, optional (default=None)
The generator used to randomly select a subset of samples. If int,
random_state is the seed used by the random number generator; If
RandomState instance, random_state is the random number generator; If
None, the random number generator is the RandomState instance used by
`np.random`. Used when ``sample_size is not None``.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
check_number_of_labels(len(le.classes_), X.shape[0])
distances = pairwise_distances(X, metric=metric, **kwds)
unique_labels = le.classes_
n_samples_per_label = np.bincount(labels, minlength=len(unique_labels))
# For sample i, store the mean distance of the cluster to which
# it belongs in intra_clust_dists[i]
intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype)
# For sample i, store the mean distance of the second closest
# cluster in inter_clust_dists[i]
inter_clust_dists = np.inf + intra_clust_dists
for curr_label in range(len(unique_labels)):
# Find inter_clust_dist for all samples belonging to the same
# label.
mask = labels == curr_label
current_distances = distances[mask]
# Leave out current sample.
n_samples_curr_lab = n_samples_per_label[curr_label] - 1
if n_samples_curr_lab != 0:
intra_clust_dists[mask] = np.sum(
current_distances[:, mask], axis=1) / n_samples_curr_lab
# Now iterate over all other labels, finding the mean
# cluster distance that is closest to every sample.
for other_label in range(len(unique_labels)):
if other_label != curr_label:
other_mask = labels == other_label
other_distances = np.mean(
current_distances[:, other_mask], axis=1)
inter_clust_dists[mask] = np.minimum(
inter_clust_dists[mask], other_distances)
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# score 0 for clusters of size 1, according to the paper
sil_samples[n_samples_per_label.take(labels) == 1] = 0
return sil_samples
def calinski_harabaz_score(X, labels):
"""Compute the Calinski and Harabaz score.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabaz_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabaz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0., 0.
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (1. if intra_disp == 0. else
extra_disp * (n_samples - n_labels) /
(intra_disp * (n_labels - 1.)))
| mit |
Adai0808/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
trankmichael/scikit-learn | sklearn/datasets/lfw.py | 50 | 19048 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
daniel-severo/dask-ml | dask_ml/datasets.py | 1 | 2577 | import inspect
from textwrap import dedent
import six
import numpy as np
import dask.array as da
from sklearn import datasets as _datasets
__all__ = ['make_counts']
chunks_doc = """
This returns dask.Arrays instead of numpy.ndarrays.
Requires an additional 'chunks' keyword to control the number
of blocks in the arrays."""
def _wrap_maker(func):
def inner(*args, **kwargs):
chunks = kwargs.pop('chunks')
X, y = func(*args, **kwargs)
return (da.from_array(X, chunks=(chunks, X.shape[-1])),
da.from_array(y, chunks=chunks))
__all__.append(func.__name__)
if not six.PY2:
sig = inspect.signature(func)
params = list(sig.parameters.values())
# TODO(py3): Make this keyword-only
params.append(
inspect.Parameter("chunks",
inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=None))
inner.__signature__ = sig.replace(parameters=params)
doc = func.__doc__.split("\n")
doc = [' ' + doc[0], chunks_doc] + doc[1:]
inner.__doc__ = dedent('\n'.join(doc))
inner.__name__ = func.__name__
inner.__module__ = __name__
return inner
def make_counts(n_samples=1000, n_features=100, n_informative=2, scale=1.0,
chunks=100):
"""
Generate a dummy dataset for modeling count data.
Parameters
----------
n_samples : int
number of rows in the output array
n_features : int
number of columns (features) in the output array
n_informative : int
number of features that are correlated with the outcome
scale : float
Scale the true coefficient array by this
chunks : int
Number of rows per dask array block.
Returns
-------
X : dask.array, size ``(n_samples, n_features)``
y : dask.array, size ``(n_samples,)``
array of non-negative integer-valued data
Examples
--------
>>> X, y = make_counts()
"""
X = da.random.normal(0, 1, size=(n_samples, n_features),
chunks=(chunks, n_features))
informative_idx = np.random.choice(n_features, n_informative)
beta = (np.random.random(n_features) - 1) * scale
z0 = X[:, informative_idx].dot(beta[informative_idx])
rate = da.exp(z0)
y = da.random.poisson(rate, size=1, chunks=(chunks,))
return X, y
make_classification = _wrap_maker(_datasets.make_classification)
make_regression = _wrap_maker(_datasets.make_regression)
make_blobs = _wrap_maker(_datasets.make_blobs)
| bsd-3-clause |
manashmndl/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.